Beispiel #1
0
def _get_button_images(im):
    im_mono = extract_color(im, (0, 180), (0, 120), (0, 120))
    contours, hierarchy = cv2.findContours(im_mono, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours = [cv2.approxPolyDP(c, 6, True) for c in contours]
    midpoint_y = im.shape[1] / 2
    contours = [c for c in contours if len(c) == 4 and cv2.boundingRect(c)[1] > midpoint_y and cv2.isContourConvex(c)]
    button_box = sorted(contours, key=cv2.contourArea)[-1]
    button_box = button_box.reshape((4, 2))
    button_im = four_point_transform(im, button_box)

    button_im_mono = extract_color(button_im, 18, (50, 100), (175, 255))
    contours, hierarchy = cv2.findContours(button_im_mono, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    # There's 4 buttons, which will be the largest 4 contours
    contours = sorted(contours, key=cv2.contourArea)[-4:]
    contour_rects = [cv2.boundingRect(c) for c in contours]
    # Sort from left to right
    contour_rects = sorted(contour_rects, key=lambda rect: rect[0])

    buttons = []
    for x, y, w, h in contour_rects:
        button = four_point_transform(button_im, np.array(((x, y), (x + w, y), (x, y + h), (x + w, y + h))), -6)
        button = extract_color(button, 18, (50, 100), (175, 255))
        buttons.append(button)

    return buttons
Beispiel #2
0
def _get_screen_image(im):
    im_mono = extract_color(im, 76, (50, 150), (50, 150))
    # show(im_mono)
    contours, hierarchy = cv2.findContours(im_mono, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    midpoint_y = im.shape[1] * 2 / 3
    contours_filtered = []
    for contour in contours:
        # contour = cv2.approxPolyDP(contour, 2, True)
        x, y, w, h = cv2.boundingRect(contour)
        # if y + h < midpoint_y and cv2.isContourConvex(contour):
        if y + h < midpoint_y:
            contours_filtered.append(contour)

    contours = sorted(contours_filtered, key=cv2.contourArea)[-4:]
    boxes = [cv2.boundingRect(c) for c in contours]
    x1 = min(x for x, y, w, h in boxes)
    y1 = min(y for x, y, w, h in boxes)
    x2 = max(x + w for x, y, w, h in boxes)
    y2 = max(y + h for x, y, w, h in boxes)
    points = np.array(((x1, y1), (x1, y2), (x2, y1), (x2, y2)))
    screen = four_point_transform(im, points)
    screen_mono = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
    _, screen_mono = cv2.threshold(screen_mono, 245, 255, cv2.THRESH_BINARY)

    return screen_mono
Beispiel #3
0
def contourparser(contours):
 if contours:
  contour1,contour2,contour3 = contours[0:3]
  cnt1x,cnt1y, h,w = cv2.boundingRect(contour1)
  cnt2x,cnt2y, h,w = cv2.boundingRect(contour2)
  cnt3x,cnt3y, h,w = cv2.boundingRect(contour3)     
  dist12 = np.sqrt((cnt1x-cnt2x)**2 + (cnt1y-cnt2y)**2)
  dist13 = np.sqrt((cnt1x-cnt3x)**2 + (cnt1y-cnt3y)**2)
  dist23 = np.sqrt((cnt2x-cnt3x)**2 + (cnt2y-cnt3y)**2)
  mindist = np.argmin([dist12,dist13,dist23])
  if mindist == 0 and dist12 < 26 and dist13 < 100: 
    eye1 = [contour1,cnt1x,cnt1y]
    eye2 = [contour2,cnt2x,cnt2y]
    swimb = [contour3,cnt3x,cnt3y]
  elif mindist == 1 and dist13 < 26 and dist12 < 100: 
    eye1 =  [contour1,cnt1x,cnt1y]
    eye2 = [contour3,cnt3x,cnt3y]
    swimb = [contour2,cnt2x,cnt2y]
  elif mindist == 2 and dist23 < 26 and dist13 < 100:
    eye1 =  [contour2,cnt2x,cnt2y]
    eye2 = [contour3,cnt3x,cnt3y]
    swimb = [contour1,cnt1x,cnt1y]
  else:
    eye1 = []
    eye2 = []
    swimb = []
  return eye1,eye2,swimb
 else:
  return [],[],[]
Beispiel #4
0
def warpTriangle(img1, img2, t1, t2) :

    # Find bounding rectangle for each triangle
    r1 = cv2.boundingRect(np.float32([t1]))
    r2 = cv2.boundingRect(np.float32([t2]))

    # Offset points by left top corner of the respective rectangles
    t1Rect = [] 
    t2Rect = []
    t2RectInt = []

    for i in xrange(0, 3):
        t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1])))
        t2Rect.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))
        t2RectInt.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))


    # Get mask by filling triangle
    mask = np.zeros((r2[3], r2[2], 3), dtype = np.float32)
    cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0);

    # Apply warpImage to small rectangular patches
    img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
    
    size = (r2[2], r2[3])

    img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size)
    
    img2Rect = img2Rect * mask

    # Copy triangular region of the rectangular patch to the output image
    img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] * ( (1.0, 1.0, 1.0) - mask )
     
    img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] + img2Rect
Beispiel #5
0
 def get_table_bounds(self):
     '''Get best possible table bounds
     '''
     table_bounds = None
     image_gray = cv2.cvtColor(self.image_object,cv2.COLOR_BGR2GRAY)
     temp_image, contours, hierarchy = cv2.findContours(image_gray,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
     best_match_contour_index = None
     max_contour_size = 0
     count = 0
     for contour in contours:
         if cv2.contourArea(contour) > max_contour_size:
             contour_size = cv2.contourArea(contour)
             x,y,w,h = cv2.boundingRect(contour)
             if x>1 and y>1 and contour_size > max_contour_size:
                 best_match_contour_index = count
                 max_contour_size = contour_size
         count += 1
     if best_match_contour_index:
         x,y,w,h = cv2.boundingRect(contours[best_match_contour_index])
         x = x - BUFFER_LENGTH
         w = w + BUFFER_LENGTH
         cv2.rectangle(self.image_object,(x,y),(x+w,y+h),(0,0,0),2)
         cv2.rectangle(self.image_object,(x,y),(x+w,y+h),(255,0,0),4)
         table_bounds = {"top":y*self.vertical_ratio, "left":x*self.horizontal_ratio, "bottom":(h+y)*self.vertical_ratio, "right":(w+x)*self.horizontal_ratio}
         cv2.imwrite(self.temp_img_file, self.image_object)
     return table_bounds 
Beispiel #6
0
def constructObj(contours, rIntersec, rSize):
    cnt = sorted(contours, key=getKey, reverse=True)
    nCnt = len(cnt)
    
    obj = cnt[0];
    fundRect = cv2.boundingRect(obj)
    
    ratios = np.zeros(nCnt, dtype=float)
    
    for i in range(1, nCnt):
        
        rect = cv2.boundingRect(cnt[i])
        sRect = rect[2]*rect[3]
        
        sIntersec = max(0, min(fundRect[0] + fundRect[2], rect[0] + rect[2]) - max(fundRect[0], rect[0])) \
                    * max(0, min(fundRect[1] + fundRect[3], rect[1]+rect[3]) - max(fundRect[1], rect[1]))        
        
        ratios[i] = sIntersec / float(sRect)
        
        if cv2.contourArea(cnt[i]) > rSize*cv2.contourArea(obj) and ratios[i] > rIntersec:
            #print i, obj.shape, np.shape(cnt[i])
            obj = np.append(obj, cnt[i], axis=0)
            fundRect = cv2.boundingRect(obj)
    
    return obj, ratios
Beispiel #7
0
		def recupdate(contours):
			areas = [cv2.contourArea(c) for c in contours]
			
			def showprev():
				cv2.rectangle(frame, (self.x, self.y), (self.x + self.w, self.y + self.h), (a, b, ccCccc), 2)
				cv2.rectangle(hsv_img, (self.x, self.y), (self.x + self.w, self.y + self.h), (a, b, ccCccc), 2)

			if areas != []:
				threshhold = 0.9
				oflast = 30 #frames
				max_index = np.argmax(areas)
				cnt = contours[max_index]
				nowx, nowy, noww, nowh = cv2.boundingRect(cnt)
				self.previouswidths.append(noww)
				self.previousheights.append(nowh)
				if (threshhold<(float(noww)/float(np.median(self.previouswidths[-oflast:])))<(1/threshhold)) and (threshhold<(float(nowh)/float(np.median(self.previousheights[-oflast:])))<(1/threshhold)):
					Height, Width, trash = frame.shape
					self.x, self.y, self.w, self.h = cv2.boundingRect(cnt)
					if (0.95 < self.w/self.h < 1/0.95) or len(contours) == 1:
						cv2.rectangle(frame, (self.x, self.y), (self.x + self.w, self.y + self.h), (a, b, ccCccc), 2)
						cv2.rectangle(hsv_img, (self.x, self.y), (self.x + self.w, self.y + self.h), (a, b, ccCccc), 2)
					else:
						contours = contours[:max_index] + contours[max_index + 1:]
						recupdate(contours)
				else:
					showprev()
			else:
				showprev()
Beispiel #8
0
 def calibrate(self):
     doublecheck = 1
     firstTime = True
     while(doublecheck<=5):
         try:
             count = 0
             cnts = self.getContours()[0:2]
             x1,y1,w1,h1 = cv2.boundingRect(cnts[0])
             x2,y2,w2,h2 = cv2.boundingRect(cnts[1])
             if(firstTime or self.closeTo(x1,self.originX, 600)):
                 self.originX = x1
                 count = count +1
             if(firstTime or self.closeTo(x1,self.originY, 600)):
                 self.originY = y1
                 count = count+1
             if(firstTime or self.closeTo((x2+w2)-x1,self.width, 600)):
                 self.width = (x2+w2)-x1
                 count = count +1
             if(firstTime or self.closeTo((y2+h2)-y1,self.hieght,600)):
                 self.hieght = (y2+h2)-y1
                 count = count +1
             if(count == 4):
                 firstTime = False
                 doublecheck = doublecheck+1
             else:
                 firstTime = True
                 doublecheck = 1
         except:
             print("err")
             continue
def morphTriangle(img1, img2, img, t1, t2, t, alpha):
    # Find bounding rectangle for each triangle
    r1 = cv2.boundingRect(np.float32([t1]))
    r2 = cv2.boundingRect(np.float32([t2]))
    r = cv2.boundingRect(np.float32([t]))

    # Offset points by left top corner of the respective rectangles
    t1Rect = []
    t2Rect = []
    tRect = []

    for i in range(0, 3):
        tRect.append(((t[i][0] - r[0]), (t[i][1] - r[1])))
        t1Rect.append(((t1[i][0] - r1[0]), (t1[i][1] - r1[1])))
        t2Rect.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))

    # Get mask by filling triangle
    mask = np.zeros((r[3], r[2], 3), dtype=np.float32)
    cv2.fillConvexPoly(mask, np.int32(tRect), (1.0, 1.0, 1.0), 16, 0);

    # Apply warpImage to small rectangular patches
    img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
    img2Rect = img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]]

    size = (r[2], r[3])
    warpImage1 = applyAffineTransform(img1Rect, t1Rect, tRect, size)
    warpImage2 = applyAffineTransform(img2Rect, t2Rect, tRect, size)

    # Alpha blend rectangular patches
    imgRect = (1.0 - alpha) * warpImage1 + alpha * warpImage2

    # Copy triangular region of the rectangular patch to the output image
    img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] = img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] * (1 - mask) + imgRect * mask
Beispiel #10
0
def eye_region(shape):
	pointsl = np.ndarray((6,1,2), dtype=np.float32)
	pointsr = np.ndarray((6,1,2), dtype=np.float32)
	
	for i in range(36,42):
		pointsl[i-37,0] = (shape.part(i).x, shape.part(i).y)
	for i in range(42,48):
		pointsr[i-43,0] = (shape.part(i).x, shape.part(i).y)

	rectl = cv2.boundingRect(pointsl)
	rectr = cv2.boundingRect(pointsr)

	xl = rectl[0] + rectl[2]/2.0
	yl = rectl[1] + rectl[3]/2.0
	xr = rectr[0] + rectr[2]/2.0
	yr = rectr[1] + rectl[3]/2.0

	w = (xr - xl) * 3 / 10
	h = (xr - xl) / 10

	rectl = dlib.rectangle(int(xl-w),int(yl-h),
			int(xl+w),int(yl+h))
	rectr = dlib.rectangle(int(xr-w),int(yr-h),
			int(xr+w),int(yr+h))

	return rectl,rectr
Beispiel #11
0
def find_closest(mask, hand_dir, obj_dir):
	Is = cv2.imread(mask)
	b_contour = get_contour(Is)
	rect = cv2.boundingRect(b_contour)

	Ib = Is[rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2], 0]
	Ib = cv2.resize(Ib, (512, 512))

	cv2.imshow('Ib', Ib)

	min_img = None
	min_distance = float('inf')
	min_t_contour = None
	min_It = None
	for t in ls_files(obj_dir, '.png'):
		It = cv2.imread(t)
		bg_mask = get_bg(It)
		It[bg_mask] = 0
		It[~bg_mask] = 255

		t_contour = get_contour(It)
		rect = cv2.boundingRect(t_contour)
		It = It[rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2], 1]
		It = cv2.resize(It, (512, 512))

		dist = calc_distance(Ib, It)
		if min_distance > dist:
			min_distance = dist
			min_img = t
			min_t_contour = t_contour
			min_It = It
	cv2.imshow('It', min_It)
	min_hand_img = os.path.join(hand_dir, os.path.basename(min_img))
	min_contact_img = os.path.join(contact_dir, os.path.basename(min_img))
	return min_img, min_hand_img, min_contact_img, b_contour, min_t_contour
Beispiel #12
0
def shape_match():

    img1 = cv2.imread('l1.png', 0)
    img2 = cv2.imread('l2.png', 0)

    ret, thresh1 = cv2.threshold(img1, 13, 255, 1)
    ret, thresh2 = cv2.threshold(img2, 13, 255, 1)

    im1,contours1,hierarchy = cv2.findContours(thresh1, 2, 1)
    im2,contours2,hierarchy = cv2.findContours(thresh2, 2, 1)
    print len(contours1), len(contours2)
    cv2.drawContours(img1,contours1, -1, (200,0,0))
    cv2.drawContours(img2,contours2, -1, (200,0,0))
    cv2.imshow("1",img1)
    cv2.imshow("2",img2)
    cv2.waitKey()
    #query_cnt = get_correct_cnt(contours2, img2)
    #if query_cnt is None:
    #    print 'parse img failed'
    #    return

    height, width  = img1.shape
    area = height * width
    min_area = area / 25
    max_area = area / 5

    for cnt in contours1:
        print cv2.boundingRect(cnt)
        letter_area = get_cnt_area(cnt)
        if not (min_area < letter_area and letter_area < max_area):
            continue

        print cv2.matchShapes(cnt, query_cnt, 1, 0.0)
 def detectRover(self, argFrame):
     frame    = self.frame
     hsvFrame = self.frame
     thresh   = self.frame[:,:,0]
     rGreen = (38,67,155,198,0,255)
     rPink = (165,182,155,192,0,255)
     hsvFrame  = cv2.cvtColor(self.frame.copy(), cv2.COLOR_BGR2HSV)
     thresh = cv2.inRange(hsvFrame.copy(),np.array([rGreen[0],rGreen[2],rGreen[4]]),np.array([rGreen[1],rGreen[3],rGreen[5]]))
     thresh = cv2.medianBlur(thresh.copy(),5)
     thresh = cv2.erode(thresh.copy(), erodeElem)
     #thresh = cv2.erode(thresh.copy(), erodeElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
     if len(contours) != 1:
         return -1
     (x,y,w,h) = cv2.boundingRect(contours[0])
     greenPt = (int((x+x+w)/2),int((y+y+h)/2))
     thresh = cv2.inRange(hsvFrame.copy(),np.array([rPink[0],rPink[2],rPink[4]]),np.array([rPink[1],rPink[3],rPink[5]]))
     thresh = cv2.medianBlur(thresh.copy(),5)
     thresh = cv2.erode(thresh.copy(), erodeElem)
     #thresh = cv2.erode(thresh.copy(), erodeElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
     if len(contours) != 1:
         return -1
     (x,y,w,h) = cv2.boundingRect(contours[0])
     pinkPt = (int((x+x+w)/2),int((y+y+h)/2))
     self.roverPos = (int((greenPt[0]+pinkPt[0])/2),int((greenPt[1]+pinkPt[1])/2))
     angle = getAngle(pinkPt[0],pinkPt[1],greenPt[0],greenPt[1])
     self.roverHeading = 360+angle[2]*-1
     return greenPt, pinkPt
    def __find_conveyor(self, initialization_frame):
        """Set x, y, w, and h to fit the conveyor size

        :param initialization_frame: Init initialization_frame from which we will calibrate others frames
        :return: None
        """

        kernel_size = int(self.__image_width / 40)
        kernel = np.ones((kernel_size, kernel_size), np.uint8)

        threshold = cv2.inRange(initialization_frame, self.CONVEYOR_COLOR_MIN, self.CONVEYOR_COLOR_MAX)
        _, threshold = cv2.threshold(threshold, 200, 255, cv2.THRESH_BINARY)

        self.__show('Conveyor image', threshold)

        threshold = cv2.morphologyEx(threshold, cv2.MORPH_OPEN, kernel, iterations=3)
        threshold = cv2.morphologyEx(threshold, cv2.MORPH_CLOSE, kernel, iterations=3)

        _, contours, _ = cv2.findContours(threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        biggest_contour = self.__get_biggest_contour(contours)

        if biggest_contour is None:
            self.__log('Conveyor not found')
            return 0, 0, 0, 0
        else:
            self.__log('Conveyor found at: ' + str(cv2.boundingRect(biggest_contour)))
            return cv2.boundingRect(biggest_contour)
def operationsStage3_nontextContoursFiltering(img_pass1):

    img_pass1Copy = img_pass1.copy()
    npaContours, npaHierarchy = cv2.findContours(img_pass1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    
    for i in range(1,len(npaContours)):
        
        [intX, intY, intWidth, intHeight] = cv2.boundingRect(npaContours[i])
        area1 = intHeight * intWidth

        for j in range(1,len(npaContours)):

            [X, Y, Width, Height] = cv2.boundingRect(npaContours[j])
            cx = X + (Width/2)
            cy = Y + (Height/2)
            area2 = Width * Height

            if intX < cx < intX + intWidth and intY < cy < intY + intHeight and i != j and area1 > area2: 

                cv2.drawContours(img_pass1Copy,npaContours,j,255,-1)
            #cv2.waitKey(0)

        cv2.imshow('imgPass1Copy',img_pass1Copy)
        #cv2.waitKey(0)

    cv2.imshow('imgPass1Copy',img_pass1Copy)
    cv2.imwrite('files/1.jpg',img_pass1Copy)
    return img_pass1Copy
    def _get_pointer_canny(self, frame, x, y, w, h, window):
        # finding out ROI
        new_x = x
        new_w = (7 * w) / 6
        new_y = max(y-int(3*h/4.0), 0)
        new_h = y + int(3*h/4.0) - new_y
        target = frame[new_y:new_y+new_h, new_x:new_x+new_w]
        target_gray = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)

        # Finding out canny edges
        canny_img = cv2.Canny(target_gray, 100, 200)
        contours, hierarchy = cv2.findContours(canny_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours_img = cv2.cvtColor(target_gray, cv2.COLOR_GRAY2BGR)

        # cv2.drawContours(contours_img, contours, -1, (0, 0, 255), 1)
        # Finding finger tip
        top_cnt = []
        rects = []
        min_y = 100000
        for cnt in contours:
            (rectx, recty, rectw, recth) = cv2.boundingRect(cnt)
            if recty < min_y:
                top_cnt = cnt
        rects.append(cv2.boundingRect(top_cnt))

        # Drawing rectangle
        for rect in rects:
            x, y, w, h = rect
            cv2.rectangle(contours_img, (x, y), (x+w, y+h), (255, 0, 0), 2)

        cv2.imshow(window, contours_img)

        return x, y
Beispiel #17
0
def detectCardInColumn(column, cx, cy, output = None):
    cards = []
    gray = cv2.inRange(column, (200,200,200), (255,255,255))
    ret,thresh = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)
    if thresh == None:
        return cards
    contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        area = cv2.contourArea(cnt)
        if area > 6000 and area < 7000:	
            [x,y,w,h] = cv2.boundingRect(cnt)
            card = column[y:(y+h), x:(x+w)]
            data = detectCard(card)
            success = 1 if data[0] != '' else 0
            if success == 1:
                cards.append((data, (cx + x, cy + y, w,h ), 0, cy + y))
                if output != None:
                    cv2.rectangle(output,(cx + x, cy + y),(cx + x + w, cy + y + h),(0,0,255),2)
        elif area > 700 and area < 1500:
            [x,y,w,h] = cv2.boundingRect(cnt)
            card = column[y:(y+h), x:(x+w)]
            data = detectCard(card)
            success = 1 if data[0] != '' else 0
            if success == 1:
                cards.append((data, (cx + x, cy + y, w,h ), 1, cy + y))
                if output != None:
                    cv2.rectangle(output,(cx + x, cy + y),(cx + x + w, cy + y + h),(0,0,255),2)
    return sorted(cards, key=itemgetter(3))
    def match_overlap(self, contour):
        # first check if the contour directly overlaps with a previous
        (cx, cy), radius = cv2.minEnclosingCircle(contour)

        lf = self.frames[self.last_frame]

        if cx > lf.cx + 6:
            return False

        x1, y1, w1, h1 = cv2.boundingRect(contour)
        x2, y2, w2, h2 = cv2.boundingRect(lf.contour)

        if x2 < x1:
            x1, y1, w1, h1 = cv2.boundingRect(lf.contour)
            x2, y2, w2, h2 = cv2.boundingRect(contour)

        bx, by, bw, bh = cv2.boundingRect(lf.contour)

        #logging.debug("Checking LF Box: " + str(bx) + "\t" + str(bx+bw) + "\t" + str(by) + "\t" + str(by+bh))
        #logging.debug("Checking Cnt   : " + str(cx+radius) + "\t" + str(cy))

        if not x1 <= x2 <= x1 + w1:
            return False

        return (y1 <= y2 <= y1 + h1) or (y2 <= y1 <= y2 + h2)
Beispiel #19
0
def draw_bounding(img_url, n):
    """Given input image, draw bounding rectangles on top of
    first n contours"""
    img = cv2.imread(img_url)    
    original = cv2.imread(img_url)

    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    _, img = cv2.threshold(img, 127, 255, 1)
    plt.gray()

    contours, hier = cv2.findContours(np.array(img), 
                                      cv2.RETR_EXTERNAL, 
                                      cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) == 1: 
        print cv2.boundingRect(contours[0])
        cv2.drawContours(img, [contours[0]], 0, 0, 100) 

    contours, hier = cv2.findContours(np.array(img), 
                                      cv2.RETR_EXTERNAL, 
                                      cv2.CHAIN_APPROX_SIMPLE)
    #sort contours by maximum area
    contours = sorted(contours, key=lambda cnt:cv2.contourArea(cnt), reverse=True)
     
    for cnt in contours[:n]:
        x,y,w,h = cv2.boundingRect(cnt)
        cv2.rectangle(original, (x,y), (x+w,y+h), (0,255,0),2)

    plt.imshow(original)
    plt.show()
def get_conts(gray):
    """
    Grab two biggest contours from grayscale image
    :param gray: grayscale image
    :return: list of contours
    """
    conts = []
    ret, thresh = cv2.threshold(gray, 127, 255, 0)
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    max_contour_2 = 0
    max_contour_1 = 0
    for i in range(len(contours)):
        if len(contours[i]) > len(contours[max_contour_1]):
            max_contour_2 = max_contour_1
            max_contour_1 = i
    if contours:
        rect_1 = cv2.boundingRect(contours[max_contour_1])
        rect_2 = cv2.boundingRect(contours[max_contour_2])
    else:
        return conts
    if nested_rect(rect_2, rect_1):
        conts.append(contours[max_contour_2])
        return conts
    else:
        conts.append(contours[max_contour_1])
        conts.append(contours[max_contour_2])
        return conts
Beispiel #21
0
def findMaxContours():

    conts = []

    for c in range(180):
    # for c in [170,50,86,111]:

        lower_blue = np.array([c-hdelta,0,0])
        upper_blue = np.array([c+hdelta,255,250])
        mask = cv2.inRange(img, lower_blue, upper_blue)

        x = kersize
        kernel = np.ones((x,x), np.uint8)
        erosion = cv2.dilate(mask,kernel,iterations = 1)
        kernel = np.ones((x/2,x/2), np.uint8)
        erosion = cv2.erode(erosion,kernel,iterations = 1)

        contours,h = cv2.findContours(erosion,1,2)
        for i,cnt in enumerate(contours):
            x,y,w,h = cv2.boundingRect(cnt)
            if w > 0.6 * img.shape[1]:
                app = True
                for cnt2 in conts:
                    if cv2.pointPolygonTest(cnt2, tuple(cnt2[0][0]) , False):
                        app = False
                if app:
                    conts.append(cnt)

    contsS = sorted(conts, key=lambda x: cv2.boundingRect(x)[2] )

    for i,cnt in enumerate(contsS[-90:] ):
        cv2.drawContours(img,[cnt],0,(0,i*10 % 255, (i*223)%255  ),4)
    cv2.imshow('dst',img)

    return contsS[-100:]
Beispiel #22
0
 def line_sort(a,b):
     ax,ay,_,_ = cv2.boundingRect(a)
     bx,by,_,_ = cv2.boundingRect(b)
     if abs(ay-by) > 15:
         return ay-by
     else:
         return ax-bx
Beispiel #23
0
def warp_image(img, triangulation, base_points, coord):
    """
    Realize the mesh warping phase

    triangulation is the Delaunay triangulation of the base points
    base_points are the coordinates of the landmark poitns of the reference image

    code inspired from http://www.learnopencv.com/warp-one-triangle-to-another-using-opencv-c-python/
    """
    all_points, coordinates = preprocess_image_before_triangulation(img)
    img_out = 255 * np.ones(img.shape, dtype=img.dtype)
    for t in triangulation:
        # triangles to map one another
        src_tri = np.array([[all_points[x][0], all_points[x][1]] for x in t]).astype(np.float32)
        dest_tri = np.array([[base_points[x][0], base_points[x][1]] for x in t]).astype(np.float32)
        # bounding boxes
        src_rect = cv2.boundingRect(np.array([src_tri]))
        dest_rect = cv2.boundingRect(np.array([dest_tri]))

        # crop images
        src_crop_tri = np.zeros((3, 2), dtype=np.float32)
        dest_crop_tri = np.zeros((3, 2))
        for k in range(0, 3):
            for dim in range(0, 2):
                src_crop_tri[k][dim] = src_tri[k][dim] - src_rect[dim]
                dest_crop_tri[k][dim] = dest_tri[k][dim] - dest_rect[dim]

        src_crop_img = img[src_rect[1]:src_rect[1] + src_rect[3], src_rect[0]:src_rect[0] + src_rect[2]]

        # affine transformation estimation
        mat = cv2.getAffineTransform(
            np.float32(src_crop_tri),
            np.float32(dest_crop_tri)
        )
        dest_crop_img = cv2.warpAffine(
            src_crop_img,
            mat,
            (dest_rect[2], dest_rect[3]),
            None,
            flags=cv2.INTER_LINEAR,
            borderMode=cv2.BORDER_REFLECT_101
        )

        # Use a mask to keep only the triangle pixels
        # Get mask by filling triangle
        mask = np.zeros((dest_rect[3], dest_rect[2], 3), dtype=np.float32)
        cv2.fillConvexPoly(mask, np.int32(dest_crop_tri), (1.0, 1.0, 1.0), 16, 0)

        # Apply mask to cropped region
        dest_crop_img = dest_crop_img * mask

        # Copy triangular region of the rectangular patch to the output image
        img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] = \
            img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] * (
                (1.0, 1.0, 1.0) - mask)

        img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] = \
            img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] + dest_crop_img

    return img_out[coord[2]:coord[3], coord[0]:coord[1]]
Beispiel #24
0
def drawRects(img, ctrs):
	i = 1
	rectList = []
	for ct in ctrs[0]:
		x, y, w, h = cv2.boundingRect(ct)

		#process only vertical rectagles (ie, w<h) with w and h > 1
		if w < h and w > 10 and h > 10:
			#print i, ". ", len(ct), " -- ", cv2.boundingRect(ct), (x+w/2), cv2.minAreaRect(ct)
			rectList.append([cv2.boundingRect(ct), cv2.minAreaRect(ct)])
			clr=(random.randrange(0,255),random.randrange(0,255),random.randrange(0,255))
			#cv2.drawContours(image=img, contours=ct, contourIdx=-1, color=clr , thickness=-1)
			cv2.rectangle(img, (x,y), (x+w,y+h), clr, 5)
			cv2.fillConvexPoly(img, ct, clr)
			cv2.rectangle(img, (x+w/2-3,y), (x+w/2+3,y+h), (255,255,255), -1)
			cv2.rectangle(img, (x,y+h/2-3), (x+w,y+h/2+3), (255,255,255), -1)
			
			rotRect = cv2.minAreaRect(ct)
			box = cv2.cv.BoxPoints(rotRect)
			box = np.int0(box)
			print box
			cv2.drawContours(img, [box], 0, (0,0,255),2)
			#cv2.imshow("asdsdasdadasdasd",img)
			#key = cv2.waitKey(1000)
			i = i + 1
	cv2.rectangle(img, (318,0), (322,640), (255,255,255), -1)
	cv2.imshow("Output",img)
	print "done"
	return rectList
Beispiel #25
0
def cutTaxPayer(img):
    new_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    new_img = cv2.adaptiveThreshold(new_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 9 , 9)
    contours0, hierarchy = cv2.findContours(new_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    H, W = img.shape[:2]
    result = np.zeros((H, W), np.uint8)
    for i in range(len(contours0) - 1, -1, -1):
        cnt = contours0[i]
        x, y, w, h = cv2.boundingRect(cnt)
        if w < 10 or h < 10:
            contours0.pop(i)
        else :
            cv2.rectangle(result, (x, y), (x + w, y + h), (255), 1)
            
    for h in range(H):
        for w in range(W):
            if  result[h][w] == 255:
                cv2.line(result, (0, h), (W, h), (255))
                break
    contours1, hierarchy1 = cv2.findContours(result, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    # for cnt in contours1:
    #     x, y, w, h = cv2.boundingRect(cnt)  
    #     cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 1)
    #     print x, y
    # cv2.imshow("xxx1", img)
    if len(contours1) < 2:
        return np.ones((H / 4, W * 3 / 4, 3), np.uint8) * 255 
    x, y, w, h = cv2.boundingRect(contours1[-2])  
    conter = (x + w / 2, y + h / 2)
    img = cv2.getRectSubPix(img, (w, h), conter)
    return img
Beispiel #26
0
 def find(self, frame):
     if frame == None:
         self.xbar = 99.0 # xbar, ybar should be in the range [-1.0, 1.0]
         self.ybar = 99.0
         self.diam = 99.0
         return None
     # The capture was successful. Start processing
     hsv_image = cv2.cvtColor(frame, cv.CV_BGR2HSV)
     # Choose mask based on self._is_red
     if self._is_red:
         # Red alliance
         mask_neg = cv2.inRange(hsv_image, np.array((0, 50, 50)), np.array((10, 255, 255)))
         mask_pos = cv2.inRange(hsv_image, np.array((170, 50, 50)), np.array((180, 255, 255)))
         mask = mask_pos | mask_neg
     else:
         # Blue alliance
         mask = cv2.inRange(hsv_image, np.array((105, 50, 50)), np.array((130, 255, 255)))
     
     #Eroding and Dilating mask
     opened = cv2.erode(mask, kernel, iterations = 7)
     opened = cv2.dilate(opened, kernel, iterations = 7)
 
     contours, hierarchy = cv2.findContours(opened, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_NONE)
     largest_size = 0
     largest_index = 0
     
     ball_found = False
     if contours: 
         for index, contour in enumerate(contours):
             if cv2.contourArea(contour) > largest_size: 
                 
                 #get co-ordinates and dimensions
                 x,y,w,h = cv2.boundingRect(contours[largest_index])
                 
                 #ball squareness
                 ballratio = 1.0 * w/h
                 
                 if ballratio > self.target_ratio and ballratio < 1.0/self.target_ratio:
                     largest_size = cv2.contourArea(contour)
                     largest_index = index
                     
         if largest_size > 0:
             if w > self._MaxWidth:
                 moments = cv2.moments(contours[largest_index])
                 if moments['m00'] != 0:
                     self.xbar = 2.0*moments['m10']/moments['m00']/self._width - 1.0
                     self.ybar = 2.0*moments['m01']/moments['m00']/self._height - 1.0
                     x,y,w,h = cv2.boundingRect(contours[largest_index])
                     self.diam = (w + h)/ self._width
                     ball_found = True
                                 
     if not ball_found:
         # No ball found so set the member variables to invalid values
         self.xbar = 99.0
         self.ybar = 99.0
         self.diam = 99.0
     # Return the frame, the contours and largest image in case we
     # want to show them on the screen
     return (frame, contours, largest_index)
def getRects(ctrs, imageOut=None):
  i = 1
  rectList = []
  #print "getRects(): {0} contours".format(len(ctrs[0]))
  for ct in ctrs[0]:
    #ct = ct.astype(np.int32)
    bbox = cv2.boundingRect(ct)
    x, y, w, h = bbox

    length = ""
    #process only vertical rectagles (ie, w<h) with w and h > 1
    if w < h and w > 30 and h > 70:
      #print i, ". ", len(ct), " -- ", cv2.boundingRect(ct), (x+w/2), cv2.minAreaRect(ct)
      
      #dist = 320-(x+w/2)
      #direction = 1
      #if dist < 0:
      #  direction = -1
      #print "Distance to center: ", dist, "pixels -- ", dist*0.0192, "inches --", dist*0.0192*1622/9.89,"revolutions"
      
      #if (x < 320) and ((x+w) > 320):
      if h > 173:
        length = "large"
      elif h > 140:
        length = "medium"
      elif h > 100:
        length = "small"
      #print i, " : ", cv2.boundingRect(ct), " -- ", length, "---", x, x+w, y, h
      
      #color detection code here... 
      color = "red"
      
      rectList.append([cv2.boundingRect(ct), cv2.minAreaRect(ct),length, color])
      
      if imageOut is not None:
        clr=(random.randrange(0,255),random.randrange(0,255),random.randrange(0,255))
        #cv2.drawContours(image=imageOut, contours=ct, contourIdx=-1, color=clr , thickness=-1)
        cv2.rectangle(imageOut, (x,y), (x+w,y+h), clr, 5)
        #cv2.fillConvexPoly(imageOut, ct, clr)
        cv2.rectangle(imageOut, (x+w/2-3,y), (x+w/2+3,y+h), (255,255,255), -1)
        cv2.rectangle(imageOut, (x,y+h/2-3), (x+w,y+h/2+3), (255,255,255), -1)
        rotRect = cv2.minAreaRect(ct)
        box = cv2.cv.BoxPoints(rotRect)
        box = np.int0(box)
        #print box
        #cv2.drawContours(imageOut, [box], 0, (0,0,255),2)
      
      i = i + 1
  
  if imageOut is not None:
    cv2.rectangle(imageOut, (318,0), (322,640), (255,255,255), -1)
    #cv2.imshow("Rects", imageOut)
  #print "done"
  
  ## sort rectList by the first tuple - so that they are from left to right in image.
  rectList.sort(key=lambda tup: tup[0])
  
  return rectList
Beispiel #28
0
def text_regions(image):
    img = cv2.imread(image)
    img = imutils.resize(img,width = 1000)

    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
    joining_kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(10,5))

    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    #Detects dark regions, in this case writing
    black = cv2.morphologyEx(img_gray, cv2.MORPH_BLACKHAT, kernel)

    #Finds areas within dark regions that have vertical edges
    gradX = cv2.Sobel(black, ddepth = cv2.CV_32F, dx = 1, dy =0, ksize = -1)
    gradX= np.absolute(gradX)
    (minVal, maxVal) = (np.min(gradX), np.max(gradX))
    gradX = (255*((gradX - minVal)/(maxVal - minVal))).astype("uint8")
    gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, kernel)

    thresh = cv2.threshold(gradX, 127 , 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
    thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, joining_kernel)

    cv2.imshow("thresh", thresh)
    cv2.waitKey(0)

    contours, hierarchy = cv2.findContours(thresh,
        cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    dollars = []
    for contour in contours:
        [x,y,w,h] = cv2.boundingRect(contour)
        #get rid of very large and small contours
        if w < 60 or w > 75 or h < 13 or h > 18 :
            continue
        #gets rid of contours in the check that cant be the money region(
        #(ex theyre on the right side of check)
        if x < 500:
            continue
        dollars = img_gray[y-10:y+h+10, x-10:x+w+10]
        cv2.imshow("dollars", dollars)
        cv2.waitKey(0)

    #detecting contours in the dollars field
    ret, im_th = cv2.threshold(dollars, 90, 255, cv2.THRESH_BINARY_INV)
    ctrs, hier = cv2.findContours(im_th.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    numbers = []
    for ctr in ctrs:
        [x,y,w,h] = cv2.boundingRect(ctr)
        print("%d %d" %(w,h))
        if w < 7 and h < 7: #ignore very small regions
            continue
        temp = dollars[y-3:y+h+3, x-2:x+w+2]
        temp = cv2.resize(temp,(28,28), interpolation = cv2.INTER_AREA)
        cv2.imshow("dollars", temp)
        cv2.waitKey(0)
        numbers.append(temp)

    return numbers
def drawStuff(image,summaries):
    #load image
    orig = cv2.imread(image,cv2.IMREAD_UNCHANGED)
    img = cv2.imread(image,cv2.IMREAD_UNCHANGED)
    
    #autocanny
    edges = auto_canny(img) #returns edged image
    
    ret,thresh = cv2.threshold(edges,127,255,0)
    contours,hierarchy = cv2.findContours(thresh, 1, 2)
    print len(contours)
    
    for i in range(0, len(contours)-1):
        cnt = contours[i]
        x,y,w,h = cv2.boundingRect(cnt)
        r = cv2.boundingRect(cnt)

        a = buildSobelMap(orig[r[1]:r[1]+r[3], r[0]:r[0]+r[2]])
        b = buildNgMat(a)
        feature = getNGFeatureVector(b)
        
        output = StringIO.StringIO()
        for i in range(0,len(feature)):
            output.write(str(feature[i]))
            output.write(",")
        output.write("'?'")
        content = output.getvalue()
        output.close()
        
        split = content.split(",")
        split2 =([s.replace('\'', '') for s in split])
        
        for i in range(0,(len(split2)-1)):
            d=0
            a = split2[i].replace('[','')
            split2[i] = a
            b = split2[i].replace(']','')
            split2[i] = b
            c = split2[i].split(" ")
            
            for j in range(0,len(c)):
                d+=fn(c[j])
                split2[i]=d/3
                
        
        
        pred = predict(summaries,split2)
        if (pred==1.0):
            cv2.rectangle(orig,(x,y),(x+w,y+h),(225,0,0))
        elif (pred==0.0):
            cv2.rectangle(orig,(x,y),(x+w,y+h),(0,0,225))

        #cv2.rectangle(orig,(x,y),(x+w,y+h),(0,0,255))
    
    #display
    cv2.imshow(image,orig)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Beispiel #30
0
def extract_digits_and_symbols(image, charCnts, minW=5, minH=15):
	# grab the internal Python iterator for the list of character
	# contours, then  initialize the character ROI and location
	# lists, respectively
	charIter = charCnts.__iter__()
	rois = []
	locs = []
 
	# keep looping over the character contours until we reach the end
	# of the list
	while True:
		try:
			# grab the next character contour from the list, compute
			# its bounding box, and initialize the ROI
			c = next(charIter)
			(cX, cY, cW, cH) = cv2.boundingRect(c)
			roi = None
 
			# check to see if the width and height are sufficiently
			# large, indicating that we have found a digit
			if cW >= minW and cH >= minH:
				# extract the ROI
				roi = image[cY:cY + cH, cX:cX + cW]
				rois.append(roi)
				locs.append((cX, cY, cX + cW, cY + cH))
        
        			# otherwise, we are examining one of the special symbols
			else:
				# MICR symbols include three separate parts, so we
				# need to grab the next two parts from our iterator,
				# followed by initializing the bounding box
				# coordinates for the symbol
				parts = [c, next(charIter), next(charIter)]
				(sXA, sYA, sXB, sYB) = (np.inf, np.inf, -np.inf,
					-np.inf)
 
				# loop over the parts
				for p in parts:
					# compute the bounding box for the part, then
					# update our bookkeeping variables
					(pX, pY, pW, pH) = cv2.boundingRect(p)
					sXA = min(sXA, pX)
					sYA = min(sYA, pY)
					sXB = max(sXB, pX + pW)
					sYB = max(sYB, pY + pH)
 
				# extract the ROI
				roi = image[sYA:sYB, sXA:sXB]
				rois.append(roi)
				locs.append((sXA, sYA, sXB, sYB))
        		# we have reached the end of the iterator; gracefully break
		# from the loop
		except StopIteration:
			break
 
	# return a tuple of the ROIs and locations
	return (rois, locs)
Beispiel #31
0
def analyse_image(image_path):
    # print ("1")
    # image_path="IMG-20180814-WA0002.jpg";
    if not os.path.exists(image_path):
        print("Image path does not exist")
        return "{\"Error\":\"path_does_not_exist\"}"
    image = cv2.imread(image_path)
    if image is None:
        return "{\"Error\":\"the file is not a image\"}"

    image_candidates, sidx, cidx = get_objects(image, 13, 13, 20, 25, 20)
    if len(image_candidates) == 0:
        return "{\"Error\":\"no hierarchy\"}"

    if len(image_candidates) != 2:
        for i in range(1, 4):
            if i == 1:
                image_candidates, sidx, cidx = get_objects(
                    image, 13, 13, 20, 25, 10)
                if len(image_candidates) == 2:
                    break
            elif i == 2:
                image_candidates, sidx, cidx = get_objects(
                    image, 13, 13, 20, 25, 20)
                if len(image_candidates) == 2:
                    break

            elif i == 3:
                image_candidates, sidx, cidx = get_objects(
                    image, 13, 13, 20, 25, 65)
                if len(image_candidates) == 2:
                    break

    if len(image_candidates) != 2:
        return "{\"Error\":\"objects not found\"}"

    Strip = image_candidates[sidx]
    color_checker = image_candidates[cidx]
    #show_Image(image_candidates[1],'image_candidates[1]')
    if Strip is None:
        return "{\"Error\":\"Strip not found\"}"
    #getting the position of 'Siemens' text in the image
    Siemen_x, Siemens_y = get_strip_position(Strip)
    if Siemen_x == 0 and Siemens_y == 0:
        return "{\"Error\":\"no hierarchy in Strip\"}"

    Y, X, Z = np.shape(Strip)
    Sort = get_sort_direction(X, Y, Siemen_x, Siemens_y)
    Strip_candidates = Strip_analysis(Strip, 23, 23, 20, 35, 5, 12)
    if len(Strip_candidates) != 11:
        for i in range(1, 14):
            print(i)
            if i == 1:
                Strip_candidates = Strip_analysis(Strip, 23, 23, 20, 35, 5, 12)
                if len(Strip_candidates) == 11:
                    break

            elif i == 2:
                Strip_candidates = Strip_analysis(Strip, 11, 11, 6, 12, 12, 15)
                if len(Strip_candidates) == 11:
                    break
            if i == 3:
                Strip_candidates = Strip_analysis(Strip, 21, 21, 5, 4, 7, 9)
                if len(Strip_candidates) == 11:
                    break
            elif i == 4:
                Strip_candidates = Strip_analysis(Strip, 15, 15, 0, 13, 12, 15)
                if len(Strip_candidates) == 11:
                    break

            elif i == 5:
                #dont change 3159
                Strip_candidates = Strip_analysis(Strip, 27, 27, 7, 29, 9, 11)
                if len(Strip_candidates) == 11:
                    break
            if i == 6:
                Strip_candidates = Strip_analysis(Strip, 25, 25, 13, 25, 5, 8)
                if len(Strip_candidates) == 11:
                    break
            if i == 7:
                Strip_candidates = Strip_analysis(Strip, 23, 23, 15, 25, 5, 12)
                if len(Strip_candidates) == 11:
                    break

            if i == 8:
                Strip_candidates = Strip_analysis(Strip, 61, 61, 2, 25, 5, 10)
                if len(Strip_candidates) == 11:
                    break
            if i == 9:
                # dont change 3159
                Strip_candidates = Strip_analysis(Strip, 61, 61, 2, 35, 9, 12)
                if len(Strip_candidates) == 11:
                    break
            if i == 9:
                # dont change 3159
                Strip_candidates = Strip_analysis(Strip, 61, 61, 2, 35, 9, 12)
                if len(Strip_candidates) == 11:
                    break
            if i == 10:
                Strip_candidates = Strip_analysis(Strip, 43, 43, 3, 25, 5, 15)
                if len(Strip_candidates) == 11:
                    break
            if i == 11:
                Strip_candidates = Strip_analysis(Strip, 31, 31, 2, 25, 5, 7)
                if len(Strip_candidates) == 11:
                    break
            if i == 12:
                Strip_candidates = Strip_analysis(Strip, 31, 31, 2, 18, 5, 7)
                if len(Strip_candidates) == 11:
                    break
                #works for latest
            if i == 13:
                Strip_candidates = Strip_analysis(Strip, 31, 31, 5, 25, 10, 12)
                if len(Strip_candidates) == 11:
                    break

    if len(Strip_candidates) != 11 and len(Strip_candidates) != 11:
        return "{\"Error\":\"Strip_segmentation failed\"}"

    #sorrt the contours based on the decided criteria
    Strip_cnts, bounding_boxes = sort_contours(Strip_candidates, Sort)

    idx = 0
    pad_dict = {}
    padlist = [
        "LEU", "NIT", "URO", "PRO", "PH", "BLO", "SG", "KET", "BIL", "GLU", "k"
    ]
    # print("length",len(Strip_cnts))
    for c2 in Strip_cnts:

        Strip_x, Strip_y, Strip_w, Strip_h = cv2.boundingRect(c2)
        pad = Strip[Strip_y:Strip_y + Strip_h, Strip_x:Strip_x + Strip_w]
        M = cv2.moments(c2)

        cx = int((Strip_x + Strip_x + Strip_w) / 2)
        cy = int((Strip_y + Strip_y + Strip_h) / 2)

        idx += 1
        if idx < 12:
            bgr = Strip[cy, cx]
            [mean_red, mean_green, mean_blue] = get_BGR(Strip, cx, cy)
            pad_dict[padlist[idx - 1]] = [mean_red, mean_green, mean_blue]

            #show_Image(pad, "pad" + str(idx))

    #print(pad_dict)

    colorchecker = color_checker
    gray = cv2.cvtColor(colorchecker, cv2.COLOR_BGR2GRAY)

    cnts, candidates = get_the_contours(colorchecker, gray, 7, 7, 13, 13, 0,
                                        20, 8, 10)

    if len(candidates) != 30:
        for i in range(1, 9):
            print(i)
            if i == 1:
                cnts, candidates = get_the_contours(colorchecker, gray, 5, 5,
                                                    11, 11, 0, 20, 8, 10)
                if len(candidates) == 30:
                    # print("breaking here")
                    break
            elif i == 2:
                cnts, candidates = get_the_contours(colorchecker, gray, 9, 9,
                                                    15, 15, 4, 20, 8, 10)
                # dont change.. for 3159
                if len(candidates) == 30:
                    break
            elif i == 3:
                cnts, candidates = get_the_contours(colorchecker, gray, 7, 7,
                                                    21, 21, 9, 20, 5, 8)
                if len(candidates) == 30:
                    break
            elif i == 4:
                cnts, candidates = get_the_contours(colorchecker, gray, 7, 7,
                                                    13, 13, 4, 20, 8, 10)
                if len(candidates) == 30:
                    break
            #
            elif i == 5:
                cnts, candidates = get_the_contours(colorchecker, gray, 7, 7,
                                                    21, 21, 5, 16, 5, 8)
                if len(candidates) == 30:
                    #dont change.. for 3159
                    break
            if i == 6:
                cnts, candidates = get_the_contours(colorchecker, gray, 6, 6,
                                                    17, 17, 5, 25, 5, 7)
                if len(candidates) == 30:
                    break
            # elif i == 7:
            #     cnts, candidates = get_the_contours(colorchecker, gray, 9,9, 15, 15, 10,10, 5,8)
            #     if len(candidates) == 30:
            #         break

    if len(candidates) != 30:
        return "{\"Error\":\"color_checker segmentation failed\"}"

    Maxcant = max(cnts, key=lambda cnts: cv2.contourArea(cnts))
    x, y, w, h = cv2.boundingRect(Maxcant)

    colorlist = []
    if (w > h):

        result = sort_by_row_col(deepcopy(candidates), 5, 6)
        if len(result) == 30:  #else should be handled from here
            X25_Ratio = get_Red_Green_Ratio(colorchecker, result[24].cX,
                                            result[24].cY)
            X6_Ratio = get_Red_Green_Ratio(colorchecker, result[5].cX,
                                           result[5].cY)
            if X25_Ratio > 2:
                colorlist = [
                    1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
                    18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30
                ]
                # print("x25_yes")
            elif X6_Ratio > 2:
                colorlist = [
                    30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
                    15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1
                ]
                # print("x6_yes")

    elif (h > w):
        result = sort_by_row_col(deepcopy(candidates), 6, 5)
        if len(result) == 30:
            X1_Ratio = get_Red_Green_Ratio(colorchecker, result[0].cX,
                                           result[0].cY)
            X30_Ratio = get_Red_Green_Ratio(colorchecker, result[29].cX,
                                            result[29].cY)

            if X1_Ratio > 2:
                colorlist = [
                    25, 19, 13, 7, 1, 26, 20, 14, 8, 2, 27, 21, 15, 9, 3, 28,
                    22, 16, 10, 4, 29, 23, 17, 11, 5, 30, 24, 18, 12, 6
                ]
                # print("x1_yes")
            elif X30_Ratio > 2:
                colorlist = [
                    6, 12, 18, 24, 30, 5, 11, 17, 23, 29, 4, 10, 16, 22, 28, 3,
                    9, 15, 21, 27, 2, 8, 14, 20, 26, 1, 7, 13, 19, 25
                ]
                # print("x29_yes")

    idx = 0
    color_dict = {}

    if len(colorlist) != 30:
        return "{\"Error\":\"color_checker segmentation failed\"}"

    if len(result) == 30:
        for con in result:
            if len(colorlist) == 30:
                color_dict[colorlist[idx]] = get_BGR(colorchecker, con.cX,
                                                     con.cY)
            x, y, w, h = cv2.boundingRect(con.contour)
            idx += 1

    print(color_dict)

    pad = pd.DataFrame.from_dict(
        pad_dict,
        orient='index',
        columns=['Mean_red', 'Mean_Green', 'Mean_Blue'])
    print(pad)
    pad.iloc[:, 0] = pad.iloc[:, 0].div(int(pad.loc['k', 'Mean_red']))
    pad.iloc[:, 1] = pad.iloc[:, 1].div(int(pad.loc['k', 'Mean_Green']))
    pad.iloc[:, 2] = pad.iloc[:, 2].div(int(pad.loc['k', 'Mean_Blue']))

    color_checker_CC = pd.DataFrame.from_dict(
        color_dict,
        orient='index',
        columns=['Mean_red', 'Mean_Green', 'Mean_Blue'])
    color_checker_CC.iloc[:, 0] = color_checker_CC.iloc[:, 0].div(
        int(color_checker_CC.iloc[7, 0]))
    color_checker_CC.iloc[:, 1] = color_checker_CC.iloc[:, 1].div(
        int(color_checker_CC.iloc[7, 1]))
    color_checker_CC.iloc[:, 2] = color_checker_CC.iloc[:, 2].div(
        int(color_checker_CC.iloc[7, 2]))

    color_checker_CS = pd.read_csv(
        'ColorChecker30.csv',
        header=None,
        skiprows=2,
        names=['CC_no', 'Mean_red', 'Mean_Green', 'Mean_Blue'],
        index_col='CC_no')
    color_checker_CS.iloc[:, 0] = color_checker_CS.iloc[:, 0].div(
        int(color_checker_CS.iloc[7, 0]))
    color_checker_CS.iloc[:, 1] = color_checker_CS.iloc[:, 1].div(
        int(color_checker_CS.iloc[7, 1]))
    color_checker_CS.iloc[:, 2] = color_checker_CS.iloc[:, 2].div(
        int(color_checker_CS.iloc[7, 2]))

    diff = color_checker_CC.sub(color_checker_CS)

    corrected_dict = {}
    look_up_dict = {}
    look_up_dict = {
        'LEU': {
            'Neg': 0.9079,
            'Trace': 0.82,
            'Small': 0.6627,
            'Moderate': 0.4995,
            'Large': 0.348
        },
        'NIT': {
            'Negative': 0.9793,
            'Positive': 0.8947
        },
        'URO': {
            0.2: 0.8123,
            1: 0.7053,
            2: 0.626,
            4: 0.5384,
            '> 8.0': 0.4817
        },
        'PRO': {
            'Neg': 0.8871,
            'Trace': 0.7117,
            30: 0.6096,
            100: 0.5176,
            '>=300': 0.4228
        },
        'BLO': {
            'Neg': 0.6531,
            'Trace-Lysed': 0.5031,
            'Moderate': 0.2289
        },
        'SG': {
            '<=1.00': 0.2173,
            '1.01': 0.2781,
            '1.015': 0.4088,
            '1.02': 0.4522,
            '>=1.030': 0.6096
        },
        'KET': {
            'Neg': 0.7519,
            'Trace': 0.6063,
            15: 0.5122,
            40: 0.3934,
            80: 0.2534,
            '>=160': 0.1685
        },
        'BIL': {
            'Neg': 0.9404,
            'Small': 0.8072,
            'Moderate': 0.7263,
            'Large': 0.6186
        },
        'GLU': {
            'Neg': 0.7659,
            100: 0.7346,
            250: 0.6903,
            500: 0.3729,
            '>=1000': 0.3118
        },
        'PH': {
            '5.0': 0.8893,
            5.5: 0.8792,
            6.0: 0.8336,
            6.5: 0.768,
            7.0: 0.561,
            7.5: 0.4238,
            8.5: 0.3273
        }
    }

    get__values(corrected_dict, "GLU", 'Mean_Green', 29, 26, 1, diff,
                color_checker_CC, pad, look_up_dict)
    get__values(corrected_dict, "BIL", 'Mean_Green', 28, 1, 19, diff,
                color_checker_CC, pad, look_up_dict)
    get__values(corrected_dict, "KET", 'Mean_Green', 2, 24, 1, diff,
                color_checker_CC, pad, look_up_dict)
    get__values(corrected_dict, "SG", 'Mean_Green', 26, 29, 28, diff,
                color_checker_CC, pad, look_up_dict)
    get__values(corrected_dict, "BLO", 'Mean_Green', 28, 19, 29, diff,
                color_checker_CC, pad, look_up_dict)
    get__values(corrected_dict, "PH", 'Mean_red', 12, 28, 26, diff,
                color_checker_CC, pad, look_up_dict)
    get__values(corrected_dict, "PRO", 'Mean_red', 29, 26, 6, diff,
                color_checker_CC, pad, look_up_dict)
    get__values(corrected_dict, "URO", 'Mean_Green', 2, 25, 24, diff,
                color_checker_CC, pad, look_up_dict)
    get__values(corrected_dict, "NIT", 'Mean_Green', 2, 28, 12, diff,
                color_checker_CC, pad, look_up_dict)
    get__values(corrected_dict, "LEU", 'Mean_red', 1, 30, 4, diff,
                color_checker_CC, pad, look_up_dict)

    json_string = json.dumps(corrected_dict)

    return json_string
Beispiel #32
0
    ust_deger = np.array([20, 255, 255])  #20 255 255

    renk_filtresi_sonuc = cv2.inRange(kesilmis_kare_hsv, alt_deger, ust_deger)
    renk_filtresi_sonuc = cv2.morphologyEx(renk_filtresi_sonuc,
                                           cv2.MORPH_CLOSE, kernel)

    sonuc = kesilmis_kare.copy()

    cnts, hierarchy = cv2.findContours(renk_filtresi_sonuc, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_NONE)
    max_genislik = 0
    max_uzunluk = 0
    max_index = -1
    for t in range(len(cnts)):
        cnt = cnts[t]
        x, y, w, h = cv2.boundingRect(cnt)
        if (w > max_genislik and h > max_uzunluk):
            max_uzunluk = h
            max_genislik = w
            max_index = t

    if (len(cnts) > 0):
        x, y, w, h = cv2.boundingRect(cnts[max_index])
        cv2.rectangle(sonuc, (x, y), (x + w, y + h), [0, 255, 0], 2)
        el_resim = renk_filtresi_sonuc[y:y + h, x:x + w]
        cv2.imshow("el resim", el_resim)

    cv2.imshow("kare", kare)
    cv2.imshow("kesilmiş", kesilmis_kare)
    cv2.imshow("renk filtresi ", renk_filtresi_sonuc)
    cv2.imshow("sonuc", sonuc)
def letter_seg(lines_img, x_lines, i):
    copy_img = lines_img[i].copy()
    x_linescopy = x_lines[i].copy()
    
    letter_img = []
    letter_k = []
    
    chalu_img, contours, hierarchy = cv2.findContours(copy_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)   
    for cnt in contours:
        if cv2.contourArea(cnt) > 5:
            x,y,w,h = cv2.boundingRect(cnt)
            # letter_img.append(lines_img[i][y:y+h, x:x+w])
            letter_k.append((x,y,w,h))

    letter_width_sum = 0
    count = 0
    for cnt in contours:
        if cv2.contourArea(cnt) > 20:
            x, y, w, h = cv2.boundingRect(cnt)
            letter_width_sum += h
            count += 1

    #mean_height = letter_width_sum/count

    letter = sorted(letter_k, key=lambda student: student[0])

    for e in range(len(letter)):
        if e < len(letter)-1:
            if abs(letter[e][0] - letter[e+1][0]) <= 2:
                x,y,w,h = letter[e]
                x2,y2,w2,h2 = letter[e+1]
                if h >= h2:
                    letter[e] = (x,y2,w,h+h2)
                    letter.pop(e+1)
                elif h < h2:
                    letter[e+1] = (x2,y,w2,h+h2)
                    letter.pop(e)

    for e in range(len(letter)):
        letter_img_tmp = lines_img[i][letter[e][1]-0:letter[e][1]+letter[e][3]+0,letter[e][0]-0:letter[e][0]+letter[e][2]+0]
        letter_img_tmp = cv2.resize(letter_img_tmp, dsize=(28, 28), interpolation=cv2.INTER_AREA)
        width = letter_img_tmp.shape[1]
        height = letter_img_tmp.shape[0]
        count_y = np.zeros(shape=(width))
        for x in range(width):
            for y in range(height):
                if letter_img_tmp[y][x] == 255:
                    count_y[x] = count_y[x] +1
        print(count_y)
        max_list = []
        for z in range(len(count_y)):
            if z>=5 and z<= len(count_y)-6:
                if max(count_y[z-5:z+6]) == count_y[z] and count_y[z] >= 2:
                    max_list.append(z)
            elif z<5:
                if max(count_y[0:z+6]) == count_y[z] and count_y[z] >= 2:
                    max_list.append(z)
            elif z > len(count_y)-6:
                if max(count_y[z-5:len(count_y)-1]) == count_y[z] and count_y[z] >= 2:
                    max_list.append(z)
        print(max_list)
        rem_list = []
        final_max_list = []
        for z in range(len(max_list)):
            if z > 0:
                if max_list[z]-max_list[z-1] <= 3:
                    rem_list.append(z-1)
        for z in range(len(max_list)):
            if z not in rem_list:
                final_max_list.append(max_list[z])
        print(final_max_list)
        if len(final_max_list) <= 1:
            print(False)
        else:
            max_len = len(final_max_list) - 1
            for j in range(max_len):
                list = count_y[final_max_list[j]:final_max_list[j+1]]
                min_list = sorted(list)[:3]
                avg = sum(min_list)/len(min_list)
                print(avg)



    x_linescopy.pop(0)
    word = 1
    letter_index = 0
    for e in range(len(letter)):
        #print(str(letter[e][0]) + ',' + str(letter[e][1]) + ',' + str(letter[e][2]) + ',' + str(letter[e][3]) + ',' + str(e))
        if(letter[e][0]<x_linescopy[0]):
            letter_index += 1
            letter_img_tmp = lines_img[i][letter[e][1]-0:letter[e][1]+letter[e][3]+5,letter[e][0]-2:letter[e][0]+letter[e][2]+2]
            letter_img = cv2.resize(letter_img_tmp, dsize =(28, 28), interpolation = cv2.INTER_AREA)
            cv2.imwrite('./segmented_img/img1/'+str(i+1)+'_'+str(word)+'_'+str(letter_index)+'.jpg', 255-letter_img)
        else:
            x_linescopy.pop(0)
            word += 1
            letter_index = 1
            letter_img_tmp = lines_img[i][letter[e][1]-0:letter[e][1]+letter[e][3]+5,letter[e][0]-2:letter[e][0]+letter[e][2]+2]
            letter_img = cv2.resize(letter_img_tmp, dsize =(28, 28), interpolation = cv2.INTER_AREA)
            cv2.imwrite('./segmented_img/img1/'+str(i+1)+'_'+str(word)+'_'+str(letter_index)+'.jpg', 255-letter_img)
Beispiel #34
0
def upload_file():
    img_raw = parse_image(request.get_data())
    nparr = np.fromstring(img_raw, np.uint8)
    image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    blurred = cv2.GaussianBlur(gray, (5,5), 0)
    edged = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11, 4)
    #(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    _, cnts, _ =  cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted([(c, cv2.boundingRect(c)[0]) for c in  cnts], key=lambda x: x[1])

    math_detect = []

    for (c, _) in cnts:
        (x, y, w, h) = cv2.boundingRect(c)
        if w >=5 and h>5:
            roi = edged[y:y+int(1.2*h), x:x+w]
            thresh = roi.copy()

            thresh = deskew(thresh, 28)
            thresh = center_extent(thresh, (28, 28))
            thresh = np.reshape(thresh, (28, 28, 1))
            thresh = thresh / 255
            predictions = model.predict(np.expand_dims(thresh, axis=0))
            digit = np.argmax(predictions[0])
            cv2.rectangle(image, (x,y), (x+w, y+h), (0,255,0), 2)
            cv2.putText(image, label_names[digit], (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,255,0), 2)

            if label_names[digit] == "1":
                countt = 0
                mem = []
                for i in range(len(thresh[9])):
                    if thresh[9][i] > 0: 
                        countt +=1
                        mem.append(i)
                if countt >= 3 :
                        math_detect.append("1")
                else:
                    math_detect.append("/")

            else: 
                math_detect.append(label_names[digit])

    def convert_math(math_detect):
        for i in range(0, len(math_detect)):

            if math_detect[i] == '10':
                math_detect[i] = '*'
            elif math_detect[i] == '11':
                math_detect[i] = '-'
            elif math_detect[i] == '12':
                math_detect[i] = '+'
           
        return math_detect


    def calculate_string(math_detect):
        math_detect = convert_math(math_detect)
        calculator = ''.join(str(item) for item in math_detect)
        result = calculator
        return result

    result = calculate_string(math_detect)

    return result
Beispiel #35
0
def convert_object(file_path, screen_size=None, new_file_suffix="scanned"):
    """ Identifies 4 corners and does four point transformation """
    debug = True if log.level == logging.DEBUG else False
    image = cv2.imread(str(file_path))

    # convert the image to grayscale, blur it, and find edges
    # in the image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.bilateralFilter(
        gray, 11, 17, 17
    )  # 11  //TODO 11 FRO OFFLINE MAY NEED TO TUNE TO 5 FOR ONLINE

    gray = cv2.medianBlur(gray, 5)
    edged = cv2.Canny(gray, 30, 400)

    if debug:
        previewImage("Edged Image", edged)

    # find contours in the edged image, keep only the largest
    # ones, and initialize our screen contour

    contours, hierarcy = cv2.findContours(
        edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE
    )

    log.debug("Contours found: %s", len(contours))


    # approximate the contour
    ContourArea = namedtuple('ContourArea', ['curve', 'area'])
    contourAreas = [ContourArea(curve=x, area=cv2.contourArea(x))
                    for x in contours]
    contourAreas = sorted(contourAreas, key=attrgetter('area'))

    if debug:
        previewContours(image, [x.curve for x in contourAreas])

    screens = []  # 4 point polygons, repressenting possible screens (rectangles)
    for contour in contourAreas:
        peri = cv2.arcLength(contour.curve, True)
        polygon_less_vertices = cv2.approxPolyDP(contour.curve,
                                                 epsilon=0.02 * peri,  # approximation accuracy
                                                 closed=True)

        num_vertices = len(polygon_less_vertices)
        if num_vertices == 4:
            (x, y, width, height) = cv2.boundingRect(contour.curve)
            log.debug(f'x={x} y={y} width={width} height={height}')
            screens.append(Screen(fourpoints=polygon_less_vertices, width=width))

    if debug:
        log.debug(f"Screens found {len(screens)}: {screens}")
        previewContours(image, [x.fourpoints for x in screens])

    # find largest screen
    largest_screen = max(screens, key=attrgetter('width'))

    if debug:
        previewContours(image, [largest_screen.fourpoints])

    # now that we have our screen contour, we need to determine
    # the top-left, top-right, bottom-right, and bottom-left
    # points so that we can later warp the image -- we'll start
    # by reshaping our contour to be our finals and initializing
    # our output rectangle in top-left, top-right, bottom-right,
    # and bottom-left order
    pts = largest_screen.fourpoints.reshape(4, 2)
    log.debug("Found bill rectagle at %s", pts)
    rect = order_points(pts)
    log.debug(rect)

    warped = transform_to_four_points(image, pts)

    # convert the warped image to grayscale and then adjust
    # the intensity of the pixels to have minimum and maximum
    # values of 0 and 255, respectively
    warp = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
    # Replacement for `skimage.exposure.rescale_intensity`
    # Contrast Limited Adaptive Histogram Equalization
    clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(8, 8))
    warp = clahe.apply(warp)

    # show the original and warped images
    if debug:
        previewImage("Original", image)
        previewImage("warp", warp)

    warp_file = str(file_path.parent / f"{file_path.stem}-{new_file_suffix}.jpg")
    cv2.imwrite(warp_file, warp)
    log.debug(f"Result: {warp_file}")

    if screen_size:
        return cv2.cvtColor(
            cv2.resize(warp, screen_size), cv2.COLOR_GRAY2RGB
        )
    else:
        return cv2.cvtColor(warp, cv2.COLOR_GRAY2RGB)
Beispiel #36
0
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.adaptiveThreshold(blur, 255, 1, 1, 11, 2)

#################      Now finding Contours         ###################

img, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST,
                                            cv2.CHAIN_APPROX_SIMPLE)

samples = np.empty((0, 100))
responses = []
keys = [i for i in range(48, 58)]

for cnt in contours:
    if cv2.contourArea(cnt) > 50:
        [x, y, w, h] = cv2.boundingRect(cnt)

        if h > 28:
            cv2.rectangle(im, (x, y), (x + w, y + h), (0, 0, 255), 2)
            roi = thresh[y:y + h, x:x + w]
            roismall = cv2.resize(roi, (10, 10))
            cv2.imshow('norm', im)
            key = cv2.waitKey(0)

            if key == 27:  # (escape to quit)
                sys.exit()
            elif key in keys:
                responses.append(int(chr(key)))
                sample = roismall.reshape((1, 100))
                samples = np.append(samples, sample, 0)
Beispiel #37
0
def t_crop(crop_img, img):
    grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)

    # applying gaussian blur
    value = (35, 35)
    blurred = cv2.GaussianBlur(grey, value, 0)

    # thresholdin: Otsu's Binarization method
    _, thresh1 = cv2.threshold(blurred, 127, 255,
                               cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    # show thresholded image
    cv2.imshow('Thresholded', thresh1)

    # check OpenCV version to avoid unpacking error
    (version, _, _) = cv2.__version__.split('.')

    if version == '3':
        image, contours, hierarchy = cv2.findContours(thresh1.copy(), \
               cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    elif version == '2':
        contours, hierarchy = cv2.findContours(thresh1.copy(),cv2.RETR_TREE, \
               cv2.CHAIN_APPROX_NONE)

    # find contour with max area
    cnt = max(contours, key=lambda x: cv2.contourArea(x))

    # create bounding rectangle around the contour (can skip below two lines)
    x, y, w, h = cv2.boundingRect(cnt)
    cv2.rectangle(crop_img, (x, y), (x + w, y + h), (0, 0, 255), 0)

    # finding convex hull
    hull = cv2.convexHull(cnt)

    # drawing contours
    drawing = np.zeros(crop_img.shape, np.uint8)
    cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 0)
    cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 0)

    # finding convex hull
    hull = cv2.convexHull(cnt, returnPoints=False)

    # finding convexity defects
    defects = cv2.convexityDefects(cnt, hull)
    count_defects = 0
    cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)

    # applying Cosine Rule to find angle for all defects (between fingers)
    # with angle > 90 degrees and ignore defects
    for i in range(defects.shape[0]):
        s, e, f, d = defects[i, 0]

        start = tuple(cnt[s][0])
        end = tuple(cnt[e][0])
        far = tuple(cnt[f][0])
        if d > 9000:
            count_defects += 1
        # find length of all sides of triangle
        '''a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
        b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
        c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)

        # apply cosine rule here
        angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57

        # ignore angles > 90 and highlight rest with red dots
        if angle <= 90:
            count_defects += 1
            cv2.circle(crop_img, far, 1, [0,0,255], -1)
        #dist = cv2.pointPolygonTest(cnt,far,True)

        # draw a line from start to end i.e. the convex points (finger tips)
        # (can skip this part)
        cv2.line(crop_img,start, end, [0,255,0], 2)
        cv2.circle(crop_img,far,5,[0,0,255],-1)'''

    # define actions required
    count_defects += 1
    if count_defects == 2 or count_defects == 1:
        #keyboard.press_and_release('enter')
        pyautogui.press('enter')

    else:
        cv2.putText(img,"0", (50, 50),\
            cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
    ha = 0

    #time.sleep(1)
    # show appropriate images in windows
    #cv2.imshow('Gesture', img)
    all_img = np.hstack((drawing, crop_img))
    #cv2.imshow('Contours', all_img)
    print("thumbdown")
Beispiel #38
0
    blue = cv2.dilate(blue, kernal)
    res1 = cv2.bitwise_and(img, img, mask=blue)

    yellow = cv2.dilate(yellow, kernal)
    res2 = cv2.bitwise_and(img, img, mask=yellow)

    #Tracking the Red Color
    (_, contours, hierarchy) = cv2.findContours(red, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)

    for pic, contour in enumerate(contours):
        area = cv2.contourArea(contour)
        if (area > 300):

            x, y, w, h = cv2.boundingRect(contour)
            img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
            cv2.putText(img, "RED color", (x, y), cv2.FONT_HERSHEY_SIMPLEX,
                        0.7, (0, 0, 255))

    #Tracking the Blue Color
    (_, contours, hierarchy) = cv2.findContours(blue, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
    for pic, contour in enumerate(contours):
        area = cv2.contourArea(contour)
        if (area > 300):
            x, y, w, h = cv2.boundingRect(contour)
            img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
            cv2.putText(img, "Blue color", (x, y), cv2.FONT_HERSHEY_SIMPLEX,
                        0.7, (255, 0, 0))
    kernel =np.ones((5,5),np.uint8)
    frame5 =cv2.dilate(frame4,kernel,iterations=4)
    #identify contours on thershold images
    contours,nada=cv2.findContours(frame5.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)

    frame6=frame0.copy()

    targets=[]
    for c in contours:
        if cv2.contourArea(c)<500:
            continue
        #contour data
        M=cv2.moments(c)
        cx=int(M['m10']/M['m00'])
        cy=int(M['m01']/M['m00'])
        x,y,w,h=cv2.boundingRect(c)
        rx=x+int(w/2)
        ry=y+int(h/2)
        ca=cv2.contourArea(c)
        #plot contour
        cv2.drawContours(frame6,[c],0,(0,0,255),2)
        cv2.rectangle(frame6,(x,y),(x+w,y+h),(0,255,0),2)
        cv2.circle(frame6,(cx,cy),2,(0,0,255),2)
        cv2.circle(frame6,(rx,ry),2,(0,255,0),2)
        #save contours
        targets.append((rx,ry,ca))
        
    area=sum([x[2] for x in targets])
    mx=0
    my=0
    if targets:
Beispiel #40
0
# 그림의 가로세로 길이의 8분의 1 정도만 가장자리를 검게 바꿔준다.
cut_num = [(int)(closing.shape[0]/8), (int)(closing.shape[1]/8)]
print(cut_num)
closing[:cut_num[0], :] = 125
closing[-cut_num[0]:, :] = 125
closing[:, :cut_num[1]] = 125
closing[:, -cut_num[1]:] = 125


# closing = cv.morphologyEx(y2, cv.MORPH_CLOSE, kernel)
contours, _ = cv.findContours(closing[:, :, 0], cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
center=[]
for i in range(len(contours)):
    cnt = contours[i]
    area = cv.contourArea(cnt)
    x, y, w, h = cv.boundingRect(cnt)


    # by 김주희_contour 면적 _200702
    rect_area = w * h
    compare_area.append(rect_area)


    # 컨투어 영역이 1인것 없애보기 -임시
    # if rect_area == 1:
        # closing[x, y, 0]

    # by 김주희_가로 세로의 비율 _200702
    aspect_ratio = float(w)/h

Beispiel #41
0
def detect(s):
    cap = cv2.VideoCapture(s)
    frame = cap.read()[1]
    fshape = frame.shape
    fheight = fshape[0]
    fwidth = fshape[1]
    fgbg = cv2.createBackgroundSubtractorMOG2()
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('output.avi', fourcc, 25, (fwidth, fheight))
    half_width = fwidth / 2
    count1 = 0
    count2 = 0
    start_time = time.time()
    while True:
        ret, frame = cap.read()
        if not ret:
            break
        recent_time = time.time()
        # Total count of vehicles for 2 min
        if ((recent_time - start_time) > 120):
            #TODO : send count and timestamp to webpage for plotting
            count1 = 0
            count2 = 0
            start_time = recent_time

        # print(frame.shape)

        mask = fgbg.apply(frame)
        kernel = np.ones((10, 10), np.uint8)
        cv2.line(frame, (0, 250), (frame.shape[1], 250), (0, 255, 0), 2)
        # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
        opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
        cnts = cv2.findContours(opening.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[1]
        green = (0, 255, 0)
        red = (0, 0, 255)
        text = "Number of vehicles"
        text1 = "Left side :" + str(count1)
        text2 = "Right side :" + str(count2)
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(frame, text, (20, 30), font, 0.8, (255, 255, 0), 2)
        cv2.putText(frame, text1, (20, 60), font, 0.6, (255, 0, 255), 2)
        cv2.putText(frame, text2, (380, 60), font, 0.6, (255, 0, 255), 2)
        out.write(frame)
        for cnt in cnts:
            area = cv2.contourArea(cnt)
            if area < 1000:
                continue
            area = cv2.contourArea(cnt)
            (x, y, w, h) = cv2.boundingRect(cnt)
            cv2.rectangle(frame, (x, y), (x + w, y + h), green, 3)
            # cv2.putText(frame,str(area), (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, 255)
            cnt_x = int(x + w / 2)
            cnt_y = int(y + h / 2)
            cv2.circle(frame, (cnt_x, cnt_y), 7, (255, 255, 255), -1)
            #left side
            if (cnt_y < 254 and cnt_y > 246 and cnt_x < half_width):
                cv2.line(frame, (0, 250), (frame.shape[1], 250), red, 2)
                count1 += 1
                # winsound.Beep(2000,500)
            #right side
            if (cnt_y < 254 and cnt_y > 246 and cnt_x > half_width):
                cv2.line(frame, (0, 250), (frame.shape[1], 250), red, 2)
                count2 += 1
                # winsound.Beep(2000,500)

        # print(count)

        cv2.imshow('frame', frame)
        # cv2.imshow('opening',opening)
        k = cv2.waitKey(1) & 0xFF
        if k == ord('q'):
            break

    out.release()
    cap.release()
    cv2.destroyAllWindows()
    return count1, count2
Beispiel #42
0
def character(counter, marker, distance):
    print('Starting recognition thread')
    guesses = [0] * 35  # create a list of 35 lists

    for i in range(1, counter + 1):
        try:
            allContoursWithData = []  # declare empty lists
            validContoursWithData = []  # we will fill these shortly

            # set heights and width to be able to read the image when comparing to flatten images
            h = 30
            w = 30

            img = cv2.imread("colour%d.png" % i)

            height, width, numchannels = img.shape

            roi = img[int((height / 2) -
                          (height / 2) * 0.85):int((height / 2) +
                                                   (height / 2) * 0.85),
                      int((width / 2) -
                          (width / 2) * 0.85):int((width / 2) +
                                                  (width / 2) * 0.85)]

            resize = cv2.resize(roi, (100, 100))

            # Convert the image to grayscale and turn to outline of the letter
            gray = cv2.cvtColor(resize, cv2.COLOR_BGR2GRAY)

            newheight, newwidth = gray.shape

            # imgMaxContrastGrayscale = maximizeContrast(gray)

            ###########

            gauss = cv2.GaussianBlur(gray, (5, 5), 0)
            # thresh = cv2.adaptiveThreshold(gauss, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 0)
            kernel = np.ones((4, 4), np.uint8)
            # mask = cv2.inRange(gauss, 170, 255)
            edged = cv2.Canny(
                gauss, 10,
                30)  # the lower the value the more detailed it would be
            dilate = cv2.dilate(edged, kernel, iterations=1)
            kernel = np.ones((3, 3), np.uint8)
            open = cv2.morphologyEx(dilate,
                                    cv2.MORPH_OPEN,
                                    kernel,
                                    iterations=1)
            close = cv2.morphologyEx(open,
                                     cv2.MORPH_CLOSE,
                                     kernel,
                                     iterations=3)
            dilation = cv2.dilate(close, kernel, iterations=4)
            kernel = np.ones((4, 4), np.uint8)
            erode = cv2.erode(dilation, kernel, iterations=4)
            # open = cv2.morphologyEx(erode, cv2.MORPH_OPEN, kernel, iterations=1)
            # Removes the noises on the grayscale image
            denoised = cv2.fastNlMeansDenoising(erode, None, 10, 7, 21)

            mask = cv2.inRange(gray, 100, 255)  # works in lab, 100 at home,
            # cv2.waitKey(0)

            _, otsu = cv2.threshold(gauss, 0, 255,
                                    cv2.THRESH_BINARY + cv2.THRESH_OTSU)

            # imgBlurred = cv2.GaussianBlur(gray, (5, 5), 0)                    # blur

            if Step_letter:
                cv2.imshow("mask", mask)
                cv2.imshow("gg", denoised)
                cv2.imshow("img", img)
                cv2.imshow("gray", gray)
                cv2.imshow("ed", edged)
                cv2.imshow("dil", dilate)
                cv2.imshow("otsu", otsu)
                cv2.waitKey(0)

            # Fill in the letter to detect the letter easily
            # kernel = np.ones((4, 4), np.uint8)
            # closing = cv2.morphologyEx(denoised, cv2.MORPH_CLOSE, kernel)
            # dilation = cv2.dilate(closing, kernel, iterations=1)

            knn = cv2.ml.KNearest_create()  # initalise the knn
            # joins the train data with the train_labels
            knn.train(npaFlattenedImages, cv2.ml.ROW_SAMPLE,
                      npaClassifications)

            # filter image from grayscale to black and white
            imgThresh = cv2.adaptiveThreshold(
                gauss,  # input image
                255,  # make pixels that pass the threshold full white
                cv2.
                ADAPTIVE_THRESH_GAUSSIAN_C,  # use gaussian rather than mean, seems to give better results
                cv2.
                THRESH_BINARY,  # invert so foreground will be white, background will be black
                11,  # size of a pixel neighborhood used to calculate threshold value
                0)  # constant subtracted from the mean or weighted mean

            newkernal = np.ones((3, 3), np.uint8)
            opening = cv2.morphologyEx(imgThresh,
                                       cv2.MORPH_OPEN,
                                       newkernal,
                                       iterations=1)
            eroding = cv2.erode(opening, kernel, iterations=1)
            dilating = cv2.dilate(eroding, kernel, iterations=1)

            imgThreshCopy = otsu.copy(
            )  # make a copy of the thresh image, this in necessary b/c findContours modifies the image

            cv2.imwrite(
                'C:/Users/kevin/Desktop/2018-2019/method A/otsu/{0}_{1}contour.png'
                .format(marker, i), imgThreshCopy)

            (npaContours, _) = cv2.findContours(
                imgThreshCopy,  # input image, make sure to use a copy since the function will modify this image in the course of finding contours
                cv2.RETR_LIST,  # retrieve the outermost contours only
                cv2.CHAIN_APPROX_SIMPLE
            )  # compress horizontal, vertical, and diagonal segments and leave only their end points

            if Step_letter:
                cv2.imshow("npaContours", imgThreshCopy)
                cv2.imshow("planb", imgThresh)
                cv2.waitKey(0)
                cv2.destroyAllWindows()

            for npaContour in npaContours:  # for each contour
                contourWithData = ContourWithData(
                )  # instantiate a contour with data object
                contourWithData.npaContour = npaContour  # assign contour to contour with data
                contourWithData.boundingRect = cv2.boundingRect(
                    contourWithData.npaContour)  # get the bounding rect
                contourWithData.calculateRectTopLeftPointAndWidthAndHeight(
                )  # get bounding rect info
                contourWithData.fltArea = cv2.contourArea(
                    contourWithData.npaContour)  # calculate the contour area
                allContoursWithData.append(
                    contourWithData
                )  # add contour with data object to list of all contours with data
            # end for

            for contourWithData in allContoursWithData:  # for all contours
                if contourWithData.checkIfContourIsValid(
                        newheight, newwidth):  # check if valid
                    validContoursWithData.append(
                        contourWithData)  # if so, append to valid contour list
                # end if
            # end for

            validContoursWithData.sort(key=operator.attrgetter(
                "intRectX"))  # sort contours from left to right
            validContoursWithData = removeInnerOverlappingChars(
                validContoursWithData)  # removes overlapping letters

            for contourWithData in validContoursWithData:  # for each contour
                new = cv2.cvtColor(
                    cv2.rectangle(
                        roi,  # draw rectangle on original testing image
                        (contourWithData.intRectX,
                         contourWithData.intRectY),  # upper left corner
                        (contourWithData.intRectX +
                         contourWithData.intRectWidth,
                         contourWithData.intRectY +
                         contourWithData.intRectHeight),
                        # lower right corner
                        (0, 255, 0),  # green
                        2),
                    cv2.COLOR_BGR2GRAY)  # thickness

                imgROI = otsu[contourWithData.intRectY +
                              1:contourWithData.intRectY +
                              contourWithData.intRectHeight -
                              1,  # crop char out of threshold image
                              contourWithData.intRectX +
                              1:contourWithData.intRectX +
                              contourWithData.intRectWidth - 1]

                imgROIResized = cv2.resize(
                    imgROI, (w, h)
                )  # resize image, this will be more consistent for recognition and storage

                cv2.imwrite(
                    'C:/Users/kevin/Desktop/2018-2019/method A/otsu/{0}_{1}chosen.png'
                    .format(marker, i), imgROIResized)

                # for i in range(0, 360, 90):
                #   angle = i
                #   rotate = cv2.getRotationMatrix2D((w / 2, h / 2), angle, 1.0)
                #   imgROIResized = cv2.warpAffine(imgROIResized, rotate, (w, h))

                npaROIResized = imgROIResized.reshape(
                    (1, w * h))  # flatten image into 1d numpy array

                npaROIResized = np.float32(
                    npaROIResized
                )  # convert from 1d numpy array of ints to 1d numpy array of floats

                if Step_letter:
                    cv2.imshow("resize", imgROIResized)
                    cv2.imshow(
                        "imgTestingNumbers", img
                    )  # show input image with green boxes drawn around found digits
                    cv2.waitKey(0)
                # end if

                # looks for the 3 nearest neighbours comparing to the flatten images (k = neighbours)
                retval, npaResults, neigh_resp, dists = knn.findNearest(
                    npaROIResized, k=1)

                # current guess
                gg = int(npaResults[0][0])
                if Step_letter:
                    print(gg)
                # Tranform guess in ASCII format into range 0-35
                if 49 <= gg <= 57:
                    guesses[gg - 49] += 1
                elif 65 <= gg <= 90:
                    guesses[gg - 56] += 1
        except:
            continue

    # find modal character guess
    # Initialise mode and prev variables for first loop through
    if Step_letter:
        print(guesses)
    mode = 0
    prev = guesses[0]
    for j in range(35):
        new = guesses[j]
        if new > prev:
            prev = guesses[j]
            mode = j
    # Transform back into ASCII
    if 0 <= mode <= 8:
        mode = mode + 49
    elif 9 <= mode <= 34:
        mode = mode + 56

    return chr(mode)
Beispiel #43
0
def detection():
    print('Starting detection')

    # Initialising variable
    counter = 0
    marker = 1
    positions = []
    headings = []
    centres = []
    height_of_target = []
    square = 2

    # if Static_Test:
    # cap = cv2.VideoCapture("TestData2.mp4")  # video use

    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 800)  #800
    cap.set(3, 800)  #800
    cap.set(4, 800)  #800
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 800)
    cap.set(cv2.CAP_PROP_FPS, 60)

    time.sleep(2)  # allows the camera to start-up
    print('Camera on')
    # Run detection when camera is turn on
    while (cap.isOpened()):  # for video use
        # while True:
        # the camera will keep running even after the if statement so it can detect multiple ground marker
        if counter == 0 or start - end < 5:
            if Static_Test:
                distance = input("Distance it was taken")
            #  start - end < 5
            if not Static_Test:
                distance = 1
            ret, frame = cap.read()

            # Gathering data from Pixhawk
            if GPS:
                position = vehicle.location.global_relative_frame
                heading = vehicle.heading
            # end if

            # starting the timer for the length of time it hasn't found a target
            start = time.time()

            # applying image processing
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # converts to gray
            blurred = cv2.GaussianBlur(
                gray, (5, 5),
                0)  # blur the gray image for better edge detection
            edged = cv2.Canny(
                blurred, 14,
                10)  # the lower the value the more detailed it would be

            # find contours in the thresholded image and initialize the
            (contours,
             _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)  # grabs contours

            # outer square
            for c in contours:
                peri = cv2.arcLength(
                    c, True
                )  # grabs the contours of each points to complete a shape
                # get the approx. points of the actual edges of the corners
                approx = cv2.approxPolyDP(c, 0.01 * peri, True)
                if 4 <= len(approx) <= 6:
                    (x, y, w, h) = cv2.boundingRect(
                        approx
                    )  # gets the (x,y) of the top left of the square and the (w,h)
                    aspectRatio = w / float(
                        h)  # gets the aspect ratio of the width to height
                    area = cv2.contourArea(
                        c)  # grabs the area of the completed square
                    hullArea = cv2.contourArea(cv2.convexHull(c))
                    solidity = area / float(hullArea)
                    keepDims = w > 25 and h > 25
                    keepSolidity = solidity > 0.9  # to check if it's near to be an area of a square
                    keepAspectRatio = 0.6 <= aspectRatio <= 1.4
                    if keepDims and keepSolidity and keepAspectRatio:  # checks if the values are true
                        # captures the region of interest with a 5 pixel lesser in all 2D directions
                        roi = frame[y:y + h, x:x + w]

                        height, width, numchannels = frame.shape

                        centre_region = (x + w / 2, y + h / 2)
                        if GPS:
                            centre_target = (y + h / 2, x + w / 2)

                        # grabs the angle for rotation to make the square level
                        angle = cv2.minAreaRect(approx)[
                            -1]  # -1 is the angle the rectangle is at

                        if 0 == angle:
                            angle = angle
                        elif -45 > angle > 90:
                            angle = -(90 + angle)
                        elif -45 > angle:
                            angle = 90 + angle
                        else:
                            angle = angle

                        rotated = cv2.getRotationMatrix2D(
                            tuple(centre_region), angle, 1.0)

                        imgRotated = cv2.warpAffine(
                            frame, rotated,
                            (width, height))  # width and height was changed

                        imgCropped = cv2.getRectSubPix(imgRotated, (w, h),
                                                       tuple(centre_region))

                        HSVCropp = cv2.cvtColor(imgCropped, cv2.COLOR_BGR2HSV)

                        if square == 2:
                            color = imgCropped[int((h / 2) -
                                                   (h / 4)):int((h / 2) +
                                                                (h / 4)),
                                               int((w / 2) -
                                                   (w / 4)):int((w / 2) +
                                                                (w / 4))]
                        else:
                            color = imgCropped

                        if Step_detection:
                            cv2.imshow("crop", imgCropped)
                            cv2.imshow("okay", color)
                            print(HSVCropp[int((h / 2) - (h * (6 / 10))),
                                           int((w / 2) - (w * (6 / 10)))])

                        # # Convert the image to grayscale and turn to outline of  the letter
                        # g_rotated = cv2.cvtColor(imgCropped, cv2.COLOR_BGR2GRAY)
                        # b_rotated = cv2.GaussianBlur(g_rotated, (5, 5), 0)
                        # e_rotated = cv2.Canny(b_rotated, 70, 20)
                        #
                        # # uses the outline to detect the corners for the cropping of the image
                        # (contours, _) = cv2.findContours(e_rotated.copy(), cv2.RETR_LIST,
                        #                                  cv2.CHAIN_APPROX_SIMPLE)
                        #
                        # # inner square detection
                        # for cny in contours:
                        #   perin = cv2.arcLength(cny, True)
                        #   approxny = cv2.approxPolyDP(cny, 0.01 * perin, True)
                        #   if 4 <= len(approxny) <= 6:
                        #     (xx, yy), (ww, hh), angle = cv2.minAreaRect(approxny)
                        #     aspectRatio = ww / float(hh)
                        #     keepAspectRatio = 0.7 <= aspectRatio <= 1.3
                        #     angle = cv2.minAreaRect(approxny)[-1]
                        #     keep_angle = angle == 0, 90, 180, 270, 360
                        #     if keepAspectRatio and keep_angle:
                        #       (xxx, yyy, www, hhh) = cv2.boundingRect(approxny)
                        #       color = imgCropped[yyy:yyy + hhh, xxx:xxx + www]

                        # appends the data of the image to the list
                        if GPS:
                            positions.append(
                                [position.lat, position.lon, position.alt])
                            headings.append(heading)
                            centres.append(centre_target)
                            height_of_target.append(h)

                        # time that the target has been last seen
                        end = time.time()
                        time.sleep(0.5)

                        # keep count of number of saved images
                        counter = counter + 1
                        cv2.imwrite("colour%d.png" % counter, color)
                        cv2.imwrite(
                            'C:/Users/kevin/Desktop/2018-2019/method A/results/{0}_{1}.png'
                            .format(marker, counter), color)
                        print("Detected and saved a target")

                        if Static_Test:
                            # testing purposes
                            if not os.path.exists(distance):
                                os.makedirs(distance)
                            cv2.imwrite(
                                'C:/Users/kevin/Desktop/2018-2019/method A/{0}/results{1}_{2}.png'
                                .format(distance, marker, counter), color)
                            cv2.imwrite(
                                'C:/Users/kevin/Desktop/2018-2019/method A/{0}/captured{1}_{2}.png'
                                .format(distance, marker, counter), roi)
                            cv2.imwrite(
                                'C:/Users/kevin/Desktop/2018-2019/method A/{0}/orginal{1}_{2}.png'
                                .format(distance, marker, counter), frame)
                        else:
                            distance = 0

                        if Step_detection:
                            cv2.imshow("roi", roi)
                            cv2.imshow("cropped", imgCropped)
                            cv2.waitKey(0)
                        # end if
                        if counter == 7:
                            counter, marker = solution(counter, marker,
                                                       distance)
        else:
            counter, marker = solution(counter, marker, distance)

        if Step_camera:
            cv2.imshow('frame', frame)
            cv2.imshow('edge', edged)
            k = cv2.waitKey(5) & 0xFF
            if k == 27:
                break
        # end if

    cap.release()
    cv2.destroyAllWindows()
Beispiel #44
0
def recognizeDigits(filename, currentBarcodeNum):
	
	global indexTask3
	global recognizedNumber
	
	#Load my predictive model
	samples, responses = loadPredictiveModelSamples()
	
	#Train my data using KNearest
	model = initKNN(samples, responses)
	
	#For each digit in file
	for img in filename:
		
		indexTask3 = indexTask3 + 1 
		
		#Read digit image
		im = cv2.imread(img,1)
		
		#Pre-process image
		thresh = imageProccessing(im)
		
		#Find contours for each digtis
		contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
		
		#For each countours
		for cnt in contours:
			
			#If the area is greater than 50
		    if cv2.contourArea(cnt)>50:
				
		        [x,y,w,h] = cv2.boundingRect(cnt)
		        
		        #Set some constraints to avoid detecting non digits
		        if  h>30 and w>10 and abs(w-h) > 10:
					
		            #Resize the digit into 10x10 and make it into float points for KNN
		            roi = thresh[y:y+h,x:x+w]
		            roismall = cv2.resize(roi,(10,10))
		            roismall = roismall.reshape((1,100))
		            roismall = np.float32(roismall)
		            
		            #Find nearest similar digit from sample using k = 1
		            retval, results, neigh_resp, dists = model.find_nearest(roismall, k = 1)
		            
		            #String recognised
		            string = str(int((results[0][0])))
		            
		            #Assign recognizedNumber to String recognised
		            recognizedNumber = string
		
		#Save recognizedNumber to text file
		saveTxtFile(currentBarcodeNum)
		
		#Append each recognizedNumber to an array for showing the result on terminal
		outputArray.append(recognizedNumber)
		
		#Reset recognizedNumber
		recognizedNumber = "0"
	
	combineNumberIntoFile(outputArray, currentBarcodeNum)	
def image(img,typecolor):  
    trouve=False
    imgcopy=cv2.GaussianBlur(img, (3,3), 0)
    abcd=[]
    numbers= []
    for sss in numbers:
        del(sss)
    imgcopy2=img.copy()
    if (typecolor==1):
        imgcopy2=cv2.cvtColor(imgcopy2,cv2.COLOR_BGR2GRAY)
    elif(typecolor==2):
        imgcopy2 = cv2.cvtColor(imgcopy2,cv2.COLOR_BGR2YCrCb)
        imgcopy2 = imgcopy2[:,:,0]
    
    imgThresh = cv2.Canny(img, 200, 255)
    imgThreshCopy = imgThresh.copy()

    imgThreshCopy3=imgThreshCopy.copy()
    npaContours, npaHierarchy = cv2.findContours(imgThreshCopy,  
                                                cv2.RETR_EXTERNAL,
                                                cv2.CHAIN_APPROX_SIMPLE)  
    for number in npaContours: 
        [intX, intY, intWidth, intHeight] = cv2.boundingRect(number)
        fltArea = cv2.contourArea(number)
        ratio=(intWidth)/(intHeight)
    
        if (ratio > MIN_RATIO) & (ratio < MAX_RATIO):
            if (fltArea > MIN_CONTOUR_AREA) & (fltArea < MAX_CONTOUR_AREA):

            
                numbers.append([intX, intY, intWidth, intHeight])  
    
    numbers = sorted(numbers, key=lambda selule:selule[0] , reverse = False)
    
    for nmr in numbers:
        conteurState=0
        conteurI=nmr[0]
        conteurJ=nmr[1]

        while(conteurI<=nmr[2]+nmr[0]):
            while(conteurJ<=nmr[3]+nmr[1]):
                if ((imgcopy2[conteurJ,conteurI] < 225) & (imgcopy2[conteurJ,conteurI] > 40)):
                    conteurState+=1
                conteurJ+=1
            conteurI+=1 
        if ((conteurState *100) /( nmr[2]*nmr[3]) > 20 ):
            del(nmr)
    
    abcd = sorted(numbers, key=lambda selule:selule[1] , reverse = False)
    cont1 =0
    cont2 =-1
    prec=0
    state=False
    for nmr in abcd:
        
        if state==False:
            if cont2==-1:
                prec=nmr[1]
                cont1+=1
                cont2=0
            else:
                if (abs (nmr[1] - prec ) > 5):
                    cont2+=cont1
                    cont1=1
                else:
                    cont1+=1
                if cont1 ==10 :
                    state=True 
                prec=nmr[1]
        else:
            del(abcd[cont2])

    for iii in range(0,cont2):
        del (abcd[0])
    
    if state==True:
        trouve=True
        abcd = sorted(abcd, key=lambda selule:selule[0] , reverse = False)
        
        for aaaa in abcd:
    
            image_implimentation=imgThresh.copy()
            image_implimentation=cv2.cvtColor(image_implimentation,cv2.COLOR_GRAY2BGR)

            cv2.rectangle(image_implimentation
                        ,(aaaa [0] -2, aaaa [1] -2)    
                        ,(aaaa [2]+aaaa [0] +2, aaaa [3]+aaaa [1]+2)
                        , (0,0,255)
                        , 2)

    
    elif state==False:
        for kk in numbers:
            del(kk)
        for ss in npaContours:
            del(ss)
        for jj in abcd :
            del(jj)
        bnbn=imgThreshCopy.copy()
        npaContours2, npaHierarchy2 = cv2.findContours(imgThreshCopy,  
                                         cv2.RETR_TREE,         
                                         cv2.CHAIN_APPROX_SIMPLE)

        for nmrr in npaContours2:
            [intX, intY, intWidth, intHeight] = cv2.boundingRect(nmrr)
            fltArea = cv2.contourArea(nmrr) 
            ratio=(intWidth)/(intHeight)
        
            if (ratio > MIN_RATIO_PLAQUE) & (ratio < MAX_RATIO_PLAQUE) :
                if (fltArea > MIN_CONTOUR_AREA_PLAQUE) & (fltArea < MAX_CONTOUR_AREA_PLAQUE):
           
                    intXX=intX
                    intYY=intY
                    intWidthX=intX+intWidth
                    intHeightY=intY+intHeight
        
                    imgThreshCopy2 = imgThresh.copy()
        
                    npaContours5, npaHierarchy = cv2.findContours(imgThreshCopy2,
                                                             cv2.RETR_TREE,
                                                             cv2.CHAIN_APPROX_SIMPLE)
        
                    for number in npaContours5:
                        [intX, intY, intWidth, intHeight] = cv2.boundingRect(number)
                        if (intX>=intXX and intX+intWidth<=intWidthX)and(intY>=intYY and intY+intHeight<=intHeightY):
                            fltArea = cv2.contourArea(number)
                            ratio=(intWidth)/(intHeight)
                            if (fltArea > MIN_CONTOUR_AREA) & (fltArea < MAX_CONTOUR_AREA):
                                if (ratio > MIN_RATIO) & (ratio < MAX_RATIO) :    
                                    numbers.append([intX, intY, intWidth, intHeight])   
                    numbers = sorted(numbers, key=lambda selule:selule[0] , reverse = False)
                    for nmr in numbers:
                        conteurState=0
                        conteurI=nmr[0]
                        conteurJ=nmr[1]
                        while(conteurI<nmr[2]):
                            while(conteurJ<nmr[3]):
                                
                                if ((imgcopy[conteurI,conteurJ] < 230) and (imgcopy[conteurI,conteurJ] > 25)):
                                    conteurState+=1
                                    
                        if ((conteurState *100) /( nmr[2]*nmr[3]) > 20 ):
                            del(nmr)
            
                    abcd = sorted(numbers, key=lambda selule:selule[1] , reverse = False)
                    cont1 =0
                    cont2 =0
                    prec=0
                    state=False
                    for nmr5 in abcd:

                        if state==False:
                            if cont1==0:
                                prec=nmr5[1]
                                
                                cont1+=1
                            else:
                                if (abs (nmr5[1] - prec ) > 5):
                                    cont2+=cont1
                                    cont1=0
                                else:
                                    cont1+=1
                                if cont1 ==10 :
                                    state=True 
                                prec=nmr5[1]
                        else:
                            del(nmr5)       
                    
                    end = [] 
                    for i in abcd: 
                        if i not in end: 
                            end.append(i) 
                    for i in abcd:
                        del(i)
                    abcd = sorted(end, key=lambda selule:selule[0] , reverse = False)
                    
                    
                    for kkk in abcd:
                        [intX, intY, intWidth, intHeight]=kkk
                        cv2.destroyAllWindows()
                    
                    trouve=state

    
    return [trouve,abcd]
def largest_contours_rect(saliency):
    contours, hierarchy = cv2.findContours(saliency * 1, cv2.RETR_LIST,
                                           cv2.CHAIN_APPROX_SIMPLE)
    contours = sorted(contours, key=cv2.contourArea)
    return cv2.boundingRect(contours[-1])
Beispiel #47
0
def main():
    cam = cv2.VideoCapture("video.mp4")

    cv2.namedWindow(WINDOW_NAME)

    img_counter = 0

    while True:
        ret, frame = cam.read()
        if not ret:
            print("failed to grab frame")
            break
        global img
        cv2.setMouseCallback(WINDOW_NAME, capture_click)

        oldX,oldY,oldW,oldH = -1,-1,-1,-1
        global lowers,uppers

        # Blurring
        blur = cv2.blur(frame,(1,1))
        blur0=cv2.medianBlur(blur,5)
        blur1= cv2.GaussianBlur(blur0,(1,1),0)
        blur2= cv2.bilateralFilter(blur1,9,200,200)

        # Sharping
        sharp=cv2.addWeighted(frame,3,blur2,-2,0)

        # Erosion
        kernel = np.ones((1,1),np.uint8)
        sharp = cv2.erode(sharp,kernel,iterations = 1)

        img = sharp
        if(len(lowers) > 0):
          curLow = lowers[0]
          curUpp = uppers[0]


          kernel = np.ones((1,1),np.uint8)
          sharp = cv2.erode(sharp,kernel,iterations = 1)
          cv2.imshow("er",sharp)

          # Create the mask
          mask = cv2.inRange(sharp,curLow,curUpp)

          cv2.imshow("mask",mask)
          # Find the contours
          contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

          if len(contours)>0:
            # Sort the contours
            cont_sort = sorted(contours, key=cv2.contourArea, reverse=True)
            area = max(cont_sort, key=cv2.contourArea)
            (xg,yg,wg,hg) = cv2.boundingRect(area)
            cv2.rectangle(frame,(xg,yg),(xg+wg, yg+hg),(69,69,255),2)
          cv2.imshow(WINDOW_NAME, frame)
        cv2.imshow(WINDOW_NAME, frame)

        k = cv2.waitKey(1)
        if k%256 == 27:
            # ESC pressed
            print("Escape hit, closing...")
            break
        elif k%256 == 32:
            # SPACE pressed
            img_name = "opencv_frame_{}.png".format(img_counter)
            cv2.imwrite(img_name, frame)
            print("{} written!".format(img_name))
            img_counter += 1

    cam.release()

    cv2.destroyAllWindows()
def draw_contours(image, contours, color):
    for contour in contours:
        x, y, w, h = cv2.boundingRect(contour)
        cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
Beispiel #49
0
def runDetector(angle, image):
	
	#First pre-process the image
	image = processImage(angle, image)
	
	#Make it into gray scale
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	
	#Find the greatest gradient from the gray scale image
	closed = featureDetection(gray)
	 
	#Find the contours in the thresholded image, then sort the contours, keeping only the largest one area
	#This area should be the barcode area
	(cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL,
		cv2.CHAIN_APPROX_SIMPLE)
	c = sorted(cnts, key = cv2.contourArea, reverse = True)[0]
	
	#Get a rectangle for the detected area
	rect = cv2.minAreaRect(c)
	box = np.int0(cv2.cv.BoxPoints(rect))

	#Points of the reactangle
	x,y,w,h = cv2.boundingRect(c)
	
	#Extracted area called ori
	ori = extractBarcodeArea(x,y,w,h,image)
	
	#Save the the points x and y from the rectangle for later use
	savedX = x
	savedY = y
	
	#Process the extracted area make it call roi (Region of Interest)
	roi = cv2.cvtColor(ori, cv2.COLOR_BGR2GRAY)

	#refine roi with closing and eroding to get the barcode area more precisely
	closing = processROI(roi)
	
	#Again find the countours for roi
	(cnts, _) = cv2.findContours(closing.copy(), cv2.RETR_EXTERNAL,
	cv2.CHAIN_APPROX_SIMPLE)
	c = sorted(cnts, key = cv2.contourArea, reverse = True)[0]

	#Get a rectangle for the detected area in roi
	rect = cv2.minAreaRect(c)
	box2 = np.int0(cv2.cv.BoxPoints(rect))

	#Draw contours on the detected area
	cv2.drawContours(roi, [box2], -1, (0, 255, 0), 2)
	
	#Copy roi for furthur processing
	copyOfROI = roi.copy()
	
	#Points of rectangle from roi
	x,y,w,h = cv2.boundingRect(c)

	#Crop roi to only the numbers using percantage
	croppedArea,  cropped_h = cropImagePercentage(0.18, roi, x, y, h , w)
	
	#Before cropping the barcode numbers, check make sure the image has enough size to be cropped
	if ( x+savedX-10 ) <= 0:
		final = image[y+cropped_h+savedY:y+h+savedY, x+savedX:x+w+savedX]
	else:	
		final = image[y+cropped_h+savedY:y+h+savedY+5, x+savedX-10:x+w+savedX]
	
	#Saves the barcode number to directory
	saveFinalImage(final)
def main():
	cv2.namedWindow('Gamma Correction',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Correction',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Auto White Balance',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Clahe',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Clahe LAB',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Gaussian',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Anisotropic Diffusion',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Median Blur',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Median Blur 2',cv2.WINDOW_NORMAL)
	cv2.namedWindow('YCrCb Median Blur',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Output',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Thresh Output',cv2.WINDOW_NORMAL)
	cv2.namedWindow('Video',cv2.WINDOW_NORMAL)
	
	cv2.resizeWindow('Video',500,500)
	cv2.resizeWindow('Clahe',500,500)
	cv2.resizeWindow('Clahe LAB',500,500)
	cv2.resizeWindow('Gaussian',500,500)
	cv2.resizeWindow('Anisotropic Diffusion',500,500)
	cv2.resizeWindow('Median Blur',500,500)
	cv2.resizeWindow('Median Blur 2',500,500)
	cv2.resizeWindow('YCrCb Median Blur',500,500)
	cv2.resizeWindow('Output',500,500)
	cv2.resizeWindow('Thresh Output',500,500)
	
	imgpath = "/home/charmi/Desktop/Trident_Work/OpenCV/Output/SavedVideos2/Output15.avi"
	'''
	a ='/home/charmi/Desktop/Trident_Work/OpenCV/Output/SavedVideos/OutputThresh'
	b =1
	c = '.avi'
	filename = a+str(b)+c
	#d ='/home/charmi/Desktop/Trident_Work/OpenCV/Output/SavedVideos/Output'
	#filename2=d+str(b)+c
	while(os.path.exists(filename)):
		b+=1
		filename = a+str(b)+c
		#filename2=d+str(b)+c
	codec = cv2.VideoWriter_fourcc('W', 'M', 'V', '2')
	framerate = 10
	resolution = (640, 480)
	VideoFileOutput = cv2.VideoWriter(filename, codec, framerate, resolution)
	#VideoFileOutput2 = cv2.VideoWriter(filename2, codec, framerate, resolution)
	'''
	
	cv2.createTrackbar('+ve Gamma','Gamma Correction',1,20,emptyFunction)
	cv2.createTrackbar('-ve Gamma','Gamma Correction',0,20,emptyFunction)
	cv2.createTrackbar('Hue','Correction',1,200,emptyFunction)
	cv2.createTrackbar('Saturation','Correction',1,200,emptyFunction)
	cv2.createTrackbar('Value','Correction',1,200,emptyFunction)
	cv2.createTrackbar('Low H','Thresh Output',0,180,emptyFunction)
	cv2.createTrackbar('Low S','Thresh Output',0,255,emptyFunction)
	cv2.createTrackbar('Low V','Thresh Output',0,255,emptyFunction)
	cv2.createTrackbar('High H','Thresh Output',0,360,emptyFunction)
	cv2.createTrackbar('High S','Thresh Output',0,255,emptyFunction)
	cv2.createTrackbar('High V','Thresh Output',0,255,emptyFunction)
	cv2.createTrackbar('Kernel Size','YCrCb Median Blur',3,15,emptyFunction)
	cv2.createTrackbar('Laplacian ksize','Output',3,15,emptyFunction)
	cv2.createTrackbar('Clip Limit (Blue)','Clahe',0,100,emptyFunction)
	cv2.createTrackbar('Tile Grid Size (Blue)','Clahe',1,20,emptyFunction)
	cv2.createTrackbar('Clip Limit (Green)','Clahe',0,100,emptyFunction)
	cv2.createTrackbar('Tile Grid Size (Green)','Clahe',1,20,emptyFunction)
	cv2.createTrackbar('Clip Limit (Red)','Clahe',0,100,emptyFunction)
	cv2.createTrackbar('Tile Grid Size (Red)','Clahe',1,20,emptyFunction)
	cv2.createTrackbar('Pair','Clahe',0,1,emptyFunction)
	cv2.createTrackbar('Kernel Size','Gaussian',3,15,emptyFunction)
	cv2.createTrackbar('Standard Deviation','Gaussian',0,50,emptyFunction)
	cv2.createTrackbar('+ve Alpha','Gaussian',0,20,emptyFunction)
	cv2.createTrackbar('+ve Beta','Gaussian',0,20,emptyFunction)
	cv2.createTrackbar('-ve Alpha','Gaussian',0,20,emptyFunction)
	cv2.createTrackbar('-ve Beta','Gaussian',0,20,emptyFunction)
	cv2.createTrackbar('Alpha','Anisotropic Diffusion',0,10,emptyFunction)
	cv2.createTrackbar('Sensitivity','Anisotropic Diffusion',0,500,emptyFunction)
	cv2.createTrackbar('Iterations','Anisotropic Diffusion',1,500,emptyFunction)
	cv2.createTrackbar('Kernel','Median Blur',3,15,emptyFunction)
	cv2.createTrackbar('Kernel','Median Blur 2',3,25,emptyFunction)
	cv2.createTrackbar('FGauss','Gaussian',0,1,emptyFunction)
	cv2.createTrackbar('FGamma','Gamma Correction',0,1,emptyFunction)
	cv2.createTrackbar('FAWBal','Auto White Balance',0,1,emptyFunction)
	cv2.createTrackbar('FAni','Anisotropic Diffusion',0,1,emptyFunction)
	cv2.createTrackbar('FB','Median Blur',0,1,emptyFunction)
	cv2.createTrackbar('FG','Median Blur',0,1,emptyFunction)
	cv2.createTrackbar('FR','Median Blur',0,1,emptyFunction)
	cv2.createTrackbar('FMed','Median Blur 2',0,1,emptyFunction)
	cv2.createTrackbar('FL','Clahe LAB',0,1,emptyFunction)
	cv2.createTrackbar('FA','Clahe LAB',0,1,emptyFunction)
	cv2.createTrackbar('FB','Clahe LAB',0,1,emptyFunction)
	cv2.createTrackbar('FB','Clahe',0,1,emptyFunction)
	cv2.createTrackbar('FG','Clahe',0,1,emptyFunction)
	cv2.createTrackbar('FR','Clahe',0,1,emptyFunction)
	cv2.createTrackbar('FL','Output',0,1,emptyFunction)
	cv2.createTrackbar('FM','YCrCb Median Blur',0,1,emptyFunction)
	
	cap = cv2.VideoCapture(imgpath)
	ret, image=cap.read()
	(h, w, c) = image.shape
	cv2.circle(image, (w//2, h//2), 7, (255, 255, 255), -1) 
	width2 = float(w)/2
	
	cv2.setTrackbarPos('+ve Gamma','Gamma Correction',16)
	cv2.setTrackbarPos('-ve Gamma','Gamma Correction',0)
	cv2.setTrackbarPos('Hue','Correction',100)
	cv2.setTrackbarPos('Saturation','Correction',100)
	cv2.setTrackbarPos('Value','Correction',100)
	cv2.setTrackbarPos('Low H','Thresh Output',8)
	cv2.setTrackbarPos('Low S','Thresh Output',149)
	cv2.setTrackbarPos('Low V','Thresh Output',170)
	cv2.setTrackbarPos('High H','Thresh Output',34)
	cv2.setTrackbarPos('High S','Thresh Output',255)
	cv2.setTrackbarPos('High V','Thresh Output',255)
	
	cv2.setTrackbarPos('Kernel Size','YCrCb Median Blur',3)
	cv2.setTrackbarPos('Laplacian ksize','Output',3)
	
	cv2.setTrackbarPos('Clip Limit (Blue)','Clahe',0)
	cv2.setTrackbarPos('Tile Grid Size (Blue)','Clahe',4)
	cv2.setTrackbarPos('Clip Limit (Green)','Clahe',0)
	cv2.setTrackbarPos('Tile Grid Size (Green)','Clahe',4)
	cv2.setTrackbarPos('Clip Limit (Red)','Clahe',0)
	cv2.setTrackbarPos('Tile Grid Size (Red)','Clahe',4)
	cv2.setTrackbarPos('Pair','Clahe',1)
	
	cv2.setTrackbarPos('Kernel Size','Gaussian',3)
	cv2.setTrackbarPos('Standard Deviation','Gaussian',5)
	cv2.setTrackbarPos('+ve Alpha','Gaussian',11)
	cv2.setTrackbarPos('+ve Beta','Gaussian',2)
	cv2.setTrackbarPos('-ve Alpha','Gaussian',2)
	cv2.setTrackbarPos('-ve Beta','Gaussian',0)
	cv2.setTrackbarPos('Alpha','Anisotropic Diffusion',1)
	cv2.setTrackbarPos('Sensitivity','Anisotropic Diffusion',20)
	cv2.setTrackbarPos('Iterations','Anisotropic Diffusion',2)
	cv2.setTrackbarPos('Kernel','Median Blur',5)
	cv2.setTrackbarPos('Kernel','Median Blur 2',3)
	cv2.setTrackbarPos('FGauss','Gaussian',0)
	cv2.setTrackbarPos('FGamma','Gamma Correction',1)
	cv2.setTrackbarPos('FAWBal','Auto White Balance',0)
	cv2.setTrackbarPos('FAni','Anisotropic Diffusion',1)
	cv2.setTrackbarPos('FB','Median Blur',1)
	cv2.setTrackbarPos('FG','Median Blur',1)
	cv2.setTrackbarPos('FR','Median Blur',1)
	cv2.setTrackbarPos('FMed','Median Blur 2',0)
	cv2.setTrackbarPos('FL','Clahe LAB',0)
	cv2.setTrackbarPos('FA','Clahe LAB',0)
	cv2.setTrackbarPos('FB','Clahe LAB',0)
	
	cv2.setTrackbarPos('FB','Clahe',1)
	cv2.setTrackbarPos('FG','Clahe',1)
	cv2.setTrackbarPos('FR','Clahe',1)
	
	cv2.setTrackbarPos('FL','Output',0)
	cv2.setTrackbarPos('FM','YCrCb Median Blur',0)
	
	
	ret = True
	flag=1
	xdiff=0
	txdiff=0
	cX=0
	cY=0
	maxArea=0
	while (1):
		maxArea=0
		if flag==1:
			ret,img = cap.read()
		
		fgs = cv2.getTrackbarPos('FGauss','Gaussian')
		fgm = cv2.getTrackbarPos('FGamma','Gamma Correction')
		fab = cv2.getTrackbarPos('FAWBal', 'Auto White Balance')
		fad = cv2.getTrackbarPos('FAni','Anisotropic Diffusion')
		fmb = cv2.getTrackbarPos('FB','Median Blur')
		fmg = cv2.getTrackbarPos('FG','Median Blur')
		fmr = cv2.getTrackbarPos('FR','Median Blur')
		fmed = cv2.getTrackbarPos('FMed','Median Blur 2')
		fcll = cv2.getTrackbarPos('FL','Clahe LAB')
		fcla = cv2.getTrackbarPos('FA','Clahe LAB')
		fclb = cv2.getTrackbarPos('FB','Clahe LAB')
		
		fcb = cv2.getTrackbarPos('FB','Clahe')
		fcg = cv2.getTrackbarPos('FG','Clahe')
		fcr = cv2.getTrackbarPos('FR','Clahe')
		
		fyl = cv2.getTrackbarPos('FL','Output')
		fym = cv2.getTrackbarPos('FM','YCrCb Median Blur')
		
		hl = cv2.getTrackbarPos('Low H','Thresh Output')
		hh = cv2.getTrackbarPos('High H','Thresh Output')
		sl = cv2.getTrackbarPos('Low S','Thresh Output')
		sh = cv2.getTrackbarPos('High S','Thresh Output')
		vl = cv2.getTrackbarPos('Low V','Thresh Output')
		vh = cv2.getTrackbarPos('High V','Thresh Output')
		clipLim1=(cv2.getTrackbarPos('Clip Limit (Blue)','Clahe'))
		clipLim1=float(clipLim1)/1000
		tgs1=cv2.getTrackbarPos('Tile Grid Size (Blue)','Clahe')
		clipLim2=(cv2.getTrackbarPos('Clip Limit (Green)','Clahe'))
		clipLim2=float(clipLim2)/1000
		tgs2=cv2.getTrackbarPos('Tile Grid Size (Green)','Clahe')
		clipLim3=(cv2.getTrackbarPos('Clip Limit (Red)','Clahe'))
		clipLim3=float(clipLim3)/1000
		tgs3=cv2.getTrackbarPos('Tile Grid Size (Red)','Clahe')
		
		pgamma=cv2.getTrackbarPos('+ve Gamma','Gamma Correction')
		ngamma=cv2.getTrackbarPos('-ve Gamma','Gamma Correction')
		hc=cv2.getTrackbarPos('Hue','Correction')
		sc=cv2.getTrackbarPos('Saturation','Correction')
		vc=cv2.getTrackbarPos('Value','Correction')
		pgamma=float(pgamma)/10
		ngamma=float(ngamma)/10
		hc=float(hc)/100
		sc=float(sc)/100
		vc=float(vc)/100
		ks=cv2.getTrackbarPos('Kernel Size','Gaussian')
		sd=(cv2.getTrackbarPos('Standard Deviation','Gaussian'))
		sd=float(sd)/10
		alpha=(cv2.getTrackbarPos('+ve Alpha','Gaussian'))
		beta=(cv2.getTrackbarPos('+ve Beta','Gaussian'))
		nalpha=(cv2.getTrackbarPos('-ve Alpha','Gaussian'))
		nbeta=(cv2.getTrackbarPos('-ve Beta','Gaussian'))
		alpha=float(alpha)/10
		beta=float(beta)/10
		nalpha=float(nalpha)/10
		nbeta=float(nbeta)/10
		alph=(cv2.getTrackbarPos('Alpha','Anisotropic Diffusion'))
		alph=float(alph)/10
		sens=cv2.getTrackbarPos('Alpha','Anisotropic Diffusion')
		itern=cv2.getTrackbarPos('Alpha','Anisotropic Diffusion')
		mker=cv2.getTrackbarPos('Kernel','Median Blur')
		medker=cv2.getTrackbarPos('Kernel','Median Blur 2')
		ymker=cv2.getTrackbarPos('Kernel Size','YCrCb Median Blur')
		ylksize=cv2.getTrackbarPos('Laplacian ksize','Output')
		
		
		if ks%2==0:
			ks+=1
		if mker%2==0:
			mker+=1
		if medker%2==0:
			medker+=1
		if ymker%2==0:
			ymker+=1
		if ylksize%2==0:
			ylksize+=1
		
		fg=cv2.getTrackbarPos('Pair','Clahe')
		if ret:
			if cv2.waitKey(2) == 27:
				break
			if cv2.waitKey(2) == 97:
				flag = 1
			if cv2.waitKey(2) == 32:
				flag=0
			low = np.array([hl,sl,vl])
			high = np.array([hh,sh,vh])
			
			#Gamma Correction
			t1=time.time()
			gc=adjust_gamma(img,pgamma-ngamma)
			t2=time.time()-t1
			#print(t2)
			cv2.imshow('Gamma Correction',gc)
			if(fgm==0):
				gc=img
			
			
			#Gaussian
			gaussian = cv2.GaussianBlur(gc ,(ks,ks), sd)
			gauss = cv2.addWeighted(img, alpha-nalpha, gaussian, beta-nbeta, 0)
			cv2.imshow('Gaussian',gauss)
			if(fgs==0):
				gauss=gc
			
			
			#Correcting HSV Values
			st1=cv2.cvtColor(gauss,cv2.COLOR_BGR2HSV)
			st1[:, :, 0]=st1[:, :, 0]*hc
			st1[:, :, 1]=st1[:, :, 1]*sc
			st1[:, :, 2]=st1[:, :, 2]*vc
			st3=cv2.cvtColor(st1,cv2.COLOR_HSV2BGR)
			cv2.imshow('Correction',st3)
			
			
			#Auto White Balance
			result1 = cv2.cvtColor(st3, cv2.COLOR_BGR2LAB)
			avg_a = np.average(result1[:, :, 1])
			avg_b = np.average(result1[:, :, 2])
			result1[:, :, 1] = result1[:, :, 1] - ((avg_a - 128) * (result1[:, :, 0] / 255.0) * 1.5)
			result1[:, :, 2] = result1[:, :, 2] - ((avg_b - 128) * (result1[:, :, 0] / 255.0) * 1.5)
			result1 = cv2.cvtColor(result1, cv2.COLOR_LAB2BGR)
			cv2.imshow('Auto White Balance',result1)
			if (fab==0):
				result1=st3
			
			#Anisotropic Diffusion
			adf = cv2.ximgproc.anisotropicDiffusion(result1, alph, sens, itern)
			cv2.imshow('Anisotropic Diffusion',adf)
			if(fad==0):
				adf=result1
				
			#Median Blur
			b, g, r = cv2.split(adf)
			b1 = cv2.medianBlur(b,mker)
			if(fmb==0):
				b1=b
			g1 = cv2.medianBlur(g,mker)
			if(fmg==0):
				g1=g
			r1 = cv2.medianBlur(r,mker)
			if(fmr==0):
				r1=r
			medfil = cv2.merge((b1, g1, r1))
			cv2.imshow('Median Blur',medfil)
			
			#Clahe LAB
			clahe1 = cv2.createCLAHE(clipLimit=clipLim1,tileGridSize=(tgs1,tgs1))
			clahe2 = cv2.createCLAHE(clipLimit=clipLim2,tileGridSize=(tgs2,tgs2))
			clahe3 = cv2.createCLAHE(clipLimit=clipLim3,tileGridSize=(tgs3,tgs3))
			lab=cv2.cvtColor(medfil,cv2.COLOR_BGR2LAB)
			l, a, b = cv2.split(lab)
			l1 = clahe1.apply(l)
			if(fcll==0):
				l1=l
			a1 = clahe2.apply(a)
			if(fcla==0):
				a1=a
			b1 = clahe3.apply(b)
			if(fclb==0):
				b1=b
			cmer=cv2.merge((l1,a1,b1))
			cllab=cv2.cvtColor(cmer,cv2.COLOR_LAB2BGR)
			cv2.imshow('Clahe LAB',cllab)
			
			#Clahe BGR
			if(fg==1):
				cv2.setTrackbarPos('Clip Limit (Green)','Clahe',int(clipLim1*1000))
				cv2.setTrackbarPos('Tile Grid Size (Green)','Clahe',tgs1)
				cv2.setTrackbarPos('Clip Limit (Red)','Clahe',int(clipLim1*1000))
				cv2.setTrackbarPos('Tile Grid Size (Red)','Clahe',tgs1)
			clahe1 = cv2.createCLAHE(clipLimit=clipLim1,tileGridSize=(tgs1,tgs1))
			clahe2 = cv2.createCLAHE(clipLimit=clipLim2,tileGridSize=(tgs2,tgs2))
			clahe3 = cv2.createCLAHE(clipLimit=clipLim3,tileGridSize=(tgs3,tgs3))
			b, g, r = cv2.split(cllab)
			b1 = clahe1.apply(b)
			if(fcb==0):
				b1=b
			g1 = clahe2.apply(g)
			if(fcg==0):
				g1=g
			r1 = clahe3.apply(r)
			if(fcr==0):
				r1=r
			cl=cv2.merge((b1,g1,r1))
			cv2.imshow('Clahe',cl)
			
			#Median Blur
			medblur = cv2.medianBlur(cl,medker)
			cv2.imshow('Median Blur 2',medblur)
			if(fmed==0):
				medblur=cl
				
			#YCrCb  Laplacian
			ycrcb=cv2.cvtColor(medblur, cv2.COLOR_BGR2YCR_CB)
			y,cr,cb=cv2.split(ycrcb)
			dst = cv2.Laplacian( y, cv2.CV_16S, ksize=ylksize)
			absDst = cv2.convertScaleAbs( dst )
			out=cv2.merge((absDst,cr,cb))
			output=cv2.cvtColor(out, cv2.COLOR_YCR_CB2BGR)
			cv2.imshow('Output',output)
			if(fyl==0):
				output=medblur
				
			#YCrCb Median 
			med=cv2.medianBlur(absDst,ymker)
			tempOut=cv2.merge((med,cr,cb))
			tempOutput=cv2.cvtColor(tempOut, cv2.COLOR_YCR_CB2BGR)
			cv2.imshow('YCrCb Median Blur',tempOutput)
			if(fym==0):
				tempOutput=output
			
			
			
			#Masking
			hsv1= cv2.cvtColor(output, cv2.COLOR_BGR2HSV)
			obj1 = cv2.inRange(hsv1, low, high)
			res1 = cv2.bitwise_and(output, output, mask=obj1)
			
			#DETECTION
			gray=cv2.cvtColor(res1, cv2.COLOR_BGR2GRAY)
			ret1,thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
			(contours,hierarchy) = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
			for pic, contour in enumerate(contours):
				area1 = cv2.contourArea(contour)
				if(area1>700):
					x,y,w,h = cv2.boundingRect(contour)
					if(w*1.5<h and h>120):
						cv2.rectangle(res1,(x,y),(x+w,y+h),(0,165,255),2)
						M = cv2.moments(contour)
						cX = int(M["m10"] / M["m00"])
						cY = int(M["m01"] / M["m00"])
						cv2.circle(res1, (cX, cY), 7, (255, 255, 255), -1)
						if(area1>maxArea):
							maxArea=area1
							txdiff=width2-cX
			if(txdiff!=xdiff):
				xdiff=txdiff
				print('Pixel Difference:', xdiff)	
			#Display
			cv2.imshow('Thresh Output',res1)
			cv2.imshow('Video',img)	
			#VideoFileOutput.write(res1)
			#VideoFileOutput2.write(img)
			
		else:
			break
	cv2.destroyAllWindows()
	#VideoFileOutput.release()
	#VideoFileOutput2.release()
	cap.release()
Beispiel #51
0

img = cv2.imread("400.jpg")  # 이미지 읽어오기
img_gray = cv2.cvtColor(img,
                        cv2.COLOR_RGB2GRAY)  # 컬러 사진이기 때문에 흑백으로 변환해야함. 흑백으로 변환
# 그러면 사진의 연결되어 있는 것만 픽셀화함. numpy의 배열값으로 변환 해준다.
img_checker = cv2.adaptiveThreshold(img_gray, 255,
                                    cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                    cv2.THRESH_BINARY_INV, 191, 15)
plt.imshow(img_checker)
plt.show()
# 이미지에서 숫자가 있다면, 그 경계를 찾아주는 함수입니다.
contours, hierarchy = cv2.findContours(img_checker, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)
# 그래서 경계선을 찾을것을 바탕으로 사각형화 시키는 것. 시작점의 X,y좌표와 너비와 높이를 배열에 저장.
rects = [cv2.boundingRect(contour) for contour in contours]

# 그 사각형 좌표들을 for문을 돌면서 체킹
for rect in rects:
    if rect[2] * rect[
            3] < 1000:  # 너무 작은 점도 인식하기때문에, 그 작은 점의 넓이가 1000정도 되면 매우 작은 점은 인식하지 않고 넘길 수 있다.
        continue
    # 사진 내에 숫자가 인식된 경우, 그 테두리를 초록색으로 그립니다.
    cv2.rectangle(img, (rect[0], rect[1]),
                  (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 3)
    if rect[2] < 50:  # 전처리시 1 같은 너비가 좁은 숫자는 인식률이 좀 낮아서 너비를 넓히는 방향으로 진행.
        margin = 100
    else:
        margin = 30
    # x좌표-마진부터 x좌표+너비+마진 크기만큼 즉, 사각형의 배열의 일정 부분보다 좀더 가져올 수 있다.
    roi = img_checker[rect[1] - margin:rect[1] + rect[3] + margin,
Beispiel #52
0
def detectNumber(im, newResponses, barcodeNum):
	
	global newSamples
	global index
	
	x_array = []
	x_dict = {}
	overlap = False
	
	gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
	thresh = cv2.adaptiveThreshold(gray,255,1,1,11,2)

	#Find contours using external so it draws a box on each digit 
	contours,hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
	
	samples =  np.empty((0,100))
	responses = []
	keys = [i for i in range(48,58)]
	
	#For each countour in this image and if the area is greater than 50
	for cnt in contours:
		
		if cv2.contourArea(cnt)>50:
			
			[x,y,w,h] = cv2.boundingRect(cnt)
			
	        ## Set some constraint to avoid detecting none digit area
			if  h>30 and w>10 and abs(w-h) > 10:
				
				#Check for overlap rectangles, e.g, Zero may be detected more than once
				for stored_x in x_array:
					
					#If a detected area is less or equal to 10 than an already detected area, it means the both are too close
					#Which results in overlapping rectangles
					#If overlaps then break out of this loop and try next contours
					if abs(stored_x - x) <= 10:
						
						overlap = True
						break
						
				#If no overlap is occured		
				if overlap == False:
					
					#Crop digit into a roi
					roi = cropDigit(thresh, x, y, w, h)
					ori = cropDigit(im, x, y, w, h)
					
	            
					#Denoise image if blurred
					ori = cv2.fastNlMeansDenoisingColored(ori,None,10,10,7,21)
					
					#Save temp image 
					saveTempImage(barcodeNum, ori)
					
					#Save temp image's x value as a key to a dictionary along with its name
					x_dict[x] = "a" + str(index) + ".png"
					
					#Store x value into an array that is used for checking overlapping
					x_array.append(x)
					
				else:
					overlap = False
	
	#Sort image in order using their x values
	
	sortImage(barcodeNum, x_dict)
Beispiel #53
0
thresh = cv2.threshold(image,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
labels = measure.label(thresh,neighbors=8,background=0)
charCandidates = np.zeros(thresh.shape,dtype="uint8")

for label in np.unique(labels):
    if label == 0:
        continue

    labelMask = np.zeros(thresh.shape, dtype="uint8")
    labelMask[labels == label] = 255
    cnts = cv2.findContours(labelMask, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]

    if len(cnts) > 0:
        c = max(cnts, key=cv2.contourArea)
        (boxX, boxY, boxW, boxH) = cv2.boundingRect(c)

        aspectRatio = boxW / float(boxH)
        solidity = cv2.contourArea(c) / float(boxW * boxH)
        heightRatio = boxH / float(image.shape[0])

        keepAspectRatio = aspectRatio < 1.0
        keepSolidity = solidity > 0.15
        keepHeight = heightRatio > 0.4 and heightRatio < 0.95

        if keepAspectRatio and keepSolidity and keepHeight:
            hull = cv2.convexHull(c)
            cv2.drawContours(charCandidates, [hull], -1, 255, -1)


def process(options: TAOptions) -> List[TAResult]:
    results = []
    output_prefix = join(options.output_directory, options.input_stem)
    print(f"Extracting traits from '{options.input_name}'")

    # read grayscale image
    gray_image = imageio.imread(options.input_file, as_gray=True)
    if len(gray_image) == 0:
        raise ValueError(f"Image is empty: {options.input_name}")

    # read color image
    color_image = imageio.imread(options.input_file, as_gray=False)
    if len(color_image) == 0:
        raise ValueError(f"Image is empty: {options.input_name}")

    # kernel = np.ones((7, 7), np.uint8)
    # dilated_image = cv2.dilate(blurred_image, kernel, iterations=1)
    # eroded_image = cv2.erode(dilated_image, kernel, iterations=1)
    # imageio.imwrite(f"{output_prefix}.dilated.png", dilated_image)
    # imageio.imwrite(f"{output_prefix}.eroded.png", eroded_image)

    # binary threshold
    masked_image = thresholding.binary_threshold(gray_image.astype(np.uint8))
    imageio.imwrite(f"{output_prefix}.mask.png", skimage.img_as_uint(masked_image))

    # closing (dilation/erosion)
    kernel = np.ones((7, 7), np.uint8)
    dilated_image = cv2.dilate(masked_image, kernel, iterations=1)
    eroded_image = cv2.erode(dilated_image, kernel, iterations=1)
    imageio.imwrite(f"{output_prefix}.dilated.png", dilated_image)
    imageio.imwrite(f"{output_prefix}.eroded.png", eroded_image)

    # circle detection
    # print(f"Finding circles")
    # detected_circles = cv2.HoughCircles(cv2.blur(eroded_image.copy(), (5, 5)),
    #                                     cv2.HOUGH_GRADIENT, 1, 40, param1=40,
    #                                     param2=39, minRadius=20, maxRadius=500)

    # circle_detection_copy = color_image.copy()
    # if detected_circles is not None:
    #     detected_circles = np.uint16(np.around(detected_circles))
    #     for pt in detected_circles[0, :]:
    #         a, b, r = pt[0], pt[1], pt[2]
    #         cv2.circle(circle_detection_copy, (a, b), r, (0, 255, 0), 2)
    #         cv2.circle(circle_detection_copy, (a, b), 1, (0, 0, 255), 3)

    #     cv2.imwrite(f"{output_prefix}.circles.png", circle_detection_copy)

    # contour detection
    # TODO exclude shapes which are square or rectangular within a certain error range
    # TODO compute and return area/curvature/solidity for each contour
    print(f"Finding contours")
    closed_image = cv2.morphologyEx(dilated_image.copy(), cv2.MORPH_CLOSE, kernel)
    contours, hierarchy = cv2.findContours(closed_image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours_image = color_image.copy()
    min_area = 10000
    max_area = 200000
    filtered_counters = []
    i = 0
    for contour in contours:
        i += 1
        cnt = cv2.approxPolyDP(contour, 0.035 * cv2.arcLength(contour, True), True)
        bounding_rect = cv2.boundingRect(cnt)
        (x, y, w, h) = bounding_rect
        min_rect = cv2.minAreaRect(cnt)
        area = cv2.contourArea(contour)
        rect_area = w * h
        if max_area > area > min_area and abs(area - rect_area) > 0.3:
            filtered_counters.append(contour)

            # draw and label contours
            cv2.drawContours(contours_image, [contour], 0, (0, 255, 0), 3)
            cv2.putText(contours_image, str(i), (x + 30, y + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

            # draw min bounding box
            # box = np.int0(cv2.boxPoints(min_rect))
            # cv2.drawContours(contours_image, [box], 0, (0, 0, 255), 2)

            # draw min bounding box
            # box = np.int0(cv2.boxPoints(bounding_rect))
            # cv2.drawContours(contours_image, [bounding_rect], 0, (0, 0, 255), 2)

            result = TAResult(
                id=str(i),
                area=area,
                solidity=min(round(area / rect_area, 4), 1),
                max_height=h,
                max_width=w)
            results.append(result)

    print(f"Kept {len(filtered_counters)} of {len(contours)} total contours")
    cv2.imwrite(f"{output_prefix}.contours.png", contours_image)

    # edge detection
    print(f"Finding edges")
    edges_image = cv2.Canny(color_image, 100, 200)
    cv2.imwrite(f"{output_prefix}.edges.png", edges_image)

    return results
Beispiel #55
0
    # resize the image by 0.5
    imgThres = cv2.resize(imgThres, (0, 0), None, 0.5, 0.5)
    frame = cv2.resize(frame, (0, 0), None, 0.5, 0.5)
    img = cv2.resize(img, (0, 0), None, 0.5, 0.5)

    # find contours
    image, contours, hierarchy = cv2.findContours(imgThres, cv2.RETR_TREE,
                                                  cv2.CHAIN_APPROX_SIMPLE)
    image2, contours2, hierarchy2 = cv2.findContours(img, cv2.RETR_TREE,
                                                     cv2.CHAIN_APPROX_SIMPLE)

    # find contour and match rectangles
    # contours from HSV image
    rectangles = []
    for i in range(0, len(contours)):
        x, y, w, h = cv2.boundingRect(contours[i])
        if w >= 5 and h >= 5:
            rectangles.append((x, y, w, h))
            #    cv2.rectangle(frame, (x, y), (x + w, y + h), (160, 32, 240), 2)
    rectangles = rectangleFilter(rectangles)

    for i in rectangles:
        x, y, w, h = i[0], i[1], i[2], i[3]
        cv2.rectangle(frame, (x, y), (x + w, y + h), (160, 32, 240), 2)

    # contours from frame difference
    rectangles2 = []
    for i in range(0, len(contours2)):
        x, y, w, h = cv2.boundingRect(contours2[i])
        if w >= 5 and h >= 5:
            rectangles2.append((x, y, w, h))
    def checktrigger(self):
        crop_top = 0
        crop_bottom = 1
        crop_left = 0
        crop_right = 1
        l = int(self.res[0] * crop_left)
        r = int(self.res[0] * crop_right)
        t = int(self.res[1] * crop_top)
        b = int(self.res[1] * crop_bottom)
        vid_frames = []
        avg_centroids = []
        total_frames = 0
        total_time = 0
        logging.info("WATCHDOG_USEC=" + self.watchdog_usec)
        reading = True
        while reading:
            reading, f = self.cam.read()
            self.notifier.notify("WATCHDOG=1")
            start = dt.now()
            if not self.cont:
                break
            frame = f[t:b, l:r]
            imgrey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            imgrey = cv2.GaussianBlur(imgrey, (21, 21), 0)
            if self.background is None:
                self.background = imgrey.copy().astype("float")
                self.save_bg()
                logging.info("initialised background")
                continue

            cv2.accumulateWeighted(imgrey, self.background, 0.5)
            frame_delta = cv2.absdiff(imgrey, cv2.convertScaleAbs(self.background))
            frame_threshold = cv2.threshold(frame_delta, 20, 255, cv2.THRESH_BINARY)[1]
            frame_dilated = cv2.dilate(frame_threshold, None, iterations=10)
            im2, contours, hier = cv2.findContours(frame_dilated, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)

            motion = [c for c in contours if cv2.contourArea(c) >= self.min_area]
            frame_centroids = []

            for c in motion:
                x, y, w, h = cv2.boundingRect(c)
                # cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                m = cv2.moments(c)
                cx = int(m['m10'] / m['m00'])
                frame_centroids.append(cx)

            if len(motion) == 0:
                if self.moving_frames != 0:
                    self.silent_frames += 1
                    if self.silent_frames == self.max_silent_frames:
                        if self.moving_frames >= self.min_moving_frames:
                            logging.info("movement ended")
                            self.make_vid(vid_frames, avg_centroids)
                        else:
                            logging.info("false alarm, sorry")
                        self.save_bg()
                        self.moving_frames = 0
                        vid_frames.clear()
                        avg_centroids.clear()
                    else:
                        vid_frames.append(f)
                        avg_centroids.append(avg_centroids[-1])
            else:
                self.moving_frames += 1
                self.silent_frames = 0
                vid_frames.append(f)
                avg_centroids.append(np.mean(frame_centroids))
                if self.moving_frames == 1:
                    logging.info("what was that??")
                logging.info("Chance of rain in 3 hours beginning " + \
                             str(int(self.weatherPerson.slot / 60)).ljust(4, '0') + \
                             " is " + self.weatherPerson.lastRainForecast + "%")
                if self.moving_frames == self.min_moving_frames:
                    logging.info("movement detected")
                if self.moving_frames == self.max_moving_frames:
                    logging.warning("this has been going on too long, stopping")
                    self.make_vid(vid_frames, avg_centroids)
                    self.save_bg()
                    self.moving_frames = 0
                    vid_frames.clear()
                    avg_centroids.clear()

            total_time += (dt.now() - start).microseconds / 1000000
            total_frames += 1
            self.fps = total_frames / total_time
            frame_centroids.clear()

        self.cam.release()
        logging.info("camera closed, thread terminated")
    for j in range(0, len(A)):
        Ant = np.zeros((1, len(A[j])))
        for i in range(0, len(A[j])):
            Ant[0][i] = float(A[j][i])
        Ant = map(tuple, Ant)
        Dic.update({j + 1: Ant[0]})

##########################################Representation and classification of the coded targets################################################

#### finding the ROI of each shot-code

_, contours, _ = cv2.findContours(img_5, cv2.RETR_EXTERNAL,
                                  cv2.CHAIN_APPROX_SIMPLE)

for (counter, cnt) in enumerate(contours):
    (x, y, w, h) = cv2.boundingRect(cnt)
    ROI = img_prime[4 * y - 5:4 * (y + h) + 5, 4 * x - 5:4 * (x + w) + 5]

    #### obtain all the subedges of the ROI, if there is just one subregion, ignore it

    EDG = cv2.Canny(ROI, 0, 255)
    _, subcontours, _ = cv2.findContours(EDG.copy(), cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)
    if len(subcontours) == 1:
        break

#### fitting an ellipse to each subedge and finding the one with minimum fitting error

    I = np.zeros(ROI.shape)
    E = []
    CENTER = []
Beispiel #58
0
# plt.title('Original Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(edges,cmap = 'gray')
# plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
#
# plt.show()

image = cv2.imread("manga2.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)  # grayscale
# thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,2) # threshold
thresh = cv2.Canny(gray, 100, 200)
#hopefully this would get rid of some noise as text is relatively dense
thresh = cv2.medianBlur(thresh, 1)
cv2.imshow('image', thresh)
cv2.waitKey(0)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
dilated = cv2.dilate(thresh, kernel, iterations=12)  # dilate
s, contours, hierarchy = cv2.findContours(
    dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)  # get contours

# for each contour found, draw a rectangle around it on original image
for contour in contours:
    [x, y, w, h] = cv2.boundingRect(contour)

    if h > 200 and w > 200:
        continue

    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
    print("done")

# write original image with added contours to disk
cv2.imwrite("contoured.jpg", image)
    while True:
        ret, frame = cap.read()
        if ret:
            fgmask = bgsMOG.apply(frame, None, 0.01)
            # To find the contours of the objects
            _, contours, hierarchy = cv2.findContours(fgmask,
                                                      cv2.RETR_EXTERNAL,
                                                      cv2.CHAIN_APPROX_SIMPLE)
            # cv2.drawContours(frame,contours,-1,(0,255,0),cv2.cv.CV_FILLED,32)
            try:
                hierarchy = hierarchy[0]
            except:
                hierarchy = []
            a = []
            for contour, hier in zip(contours, hierarchy):
                (x, y, w, h) = cv2.boundingRect(contour)

                if w > 30 and h > 30:
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                                  2)
                    (x, y, w, h) = cv2.boundingRect(contour)

                    x1 = w / 2
                    y1 = h / 2
                    cx = x + x1
                    cy = y + y1
                    a.append([cx, cy])
                    # print(len(a))

            cv2.imshow('BGS', fgmask)
            cv2.imshow('Ori+Bounding Box', frame)
Beispiel #60
0
def get_contour_precedence(contour, cols, tolerance_factor=10):
    origin = cv2.boundingRect(contour)
    return ((origin[1] // tolerance_factor) * tolerance_factor) * cols + origin[0]