예제 #1
0
def find_lines(frame):
    # Resize to 640x480
    frame_small = cv.CreateMat(480, 640, cv.CV_8UC3)
    cv.Resize(frame, frame_small)

    # Threshold by distance: blank out all top pixels
    cv.Rectangle(frame_small, (0, 0), (640, 80), (0, 0, 0, 0), cv.CV_FILLED)

    frame_size = cv.GetSize(frame_small)
    frame_gray = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
    edges = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
    cv.CvtColor(frame_small, frame_gray, cv.CV_BGR2GRAY)

    cv.Canny(frame_gray, edges, 400, 400)
    cv.Dilate(edges, edges,
              cv.CreateStructuringElementEx(3, 3, 0, 0, cv.CV_SHAPE_RECT))
    cv.Erode(edges, edges,
             cv.CreateStructuringElementEx(1, 1, 0, 0, cv.CV_SHAPE_RECT))

    line_storage = cv.CreateMemStorage()
    lines = cv.HoughLines2(edges, line_storage, cv.CV_HOUGH_PROBABILISTIC, 1,
                           cv.CV_PI / 180.0, 300, 100, 40)
    print len(lines), 'lines found'
    for i in range(len(lines)):
        line = lines[i]
        cv.Line(frame_small, line[0], line[1],
                hv2rgb(360.0 * i / len(lines), 1.0), 3, 8)

    cv.ShowImage('frame', frame_small)
    cv.ShowImage('edges', edges)
예제 #2
0
def get_hand(img):
    global h_lower, h_upper, s_lower, s_upper
    storage = cv.CreateMemStorage(0)
    contours, hull, max_contours, max_hull = (0, 0, 0, 0)
    max_rect = (1, 1, 100, 100)
    dst = cv.CreateImage((img.width, img.height), 8, 3)
    hsv = cv.CreateImage((img.width, img.height), 8, 3)
    frame = cv.CreateImage((img.width, img.height), 8, 1)
    con = cv.CreateImage((img.width, img.height), 8, 1)
    cv.Zero(con)
    cv.Smooth(img, dst, cv.CV_GAUSSIAN, 5, 5)
    for i in range(3): cv.Smooth(dst, dst, cv.CV_GAUSSIAN, 5, 5)
    cv.CvtColor(dst, hsv, cv.CV_RGB2HSV)
    cv.InRangeS(hsv, (h_lower, s_lower, 0), (h_upper, s_upper, 256), frame) 
    kernel = cv.CreateStructuringElementEx(3, 3, 0, 0, cv.CV_SHAPE_RECT)
    #cv.MorphologyEx(frame, frame, None, kernel, cv.CV_MOP_CLOSE , 7)
#    cv.MorphologyEx(frame, frame, None, kernel, cv.CV_MOP_OPEN , 3)
    #contours = im.find_contours(frame)
    #hull = im.find_convex_hull(contours)
    print contours

    #max_hull_area, max_contour_area = (0, 0)
    #print "xxxxxxxx"
    #contour = contours.h_next()
    #print "........"
    #while (contour != 0):
    #    hull = cv.ConvexHull2(contour, storage, cv.CV_CLOCKWISE, 1);
    #    maxv = cv.ContourArea(hull)
    #    contour = contour.h_next()
    #cv.DrawContours(con, contours, red, blue, 1, 3, 8)
    

    cv.ShowImage("result", con)
예제 #3
0
    def __init__(self,img):

        small_img = cv.CreateImage((cv.Round(img.width / image_scale),cv.Round(img.height / image_scale)), 8, 3)
        cv.Resize(img, small_img, cv.CV_INTER_LINEAR)

        if H!=0 and S !=0:
            getSkinColor(small_img, hasColor)

        imgHSV = cv.CreateImage(cv.GetSize(small_img), 8, 3)
        cv.CvtColor(small_img, imgHSV, cv.CV_BGR2HSV);

        hueImg = cv.CreateMat(small_img.height, small_img.width, cv.CV_8UC1)
        satImg = cv.CreateMat(small_img.height, small_img.width, cv.CV_8UC1)
        valImg = cv.CreateMat(small_img.height, small_img.width, cv.CV_8UC1)
        cv.Split(imgHSV, hueImg, satImg, valImg, None)

        cv.ShowImage("hueImg", hueImg)

        hueTrshld = cv.CreateMat(hueImg.height, hueImg.width, cv.CV_8UC1)
        hueDiff = 30
        satDiff = 80
        for x in range(0, hueTrshld.height):
            for y in range(0, hueTrshld.width):
                hueTrshld[x,y] = 0
                if hueImg[x,y]>(H-hueDiff) and hueImg[x,y]>(1) and hueImg[x,y]<(H+hueDiff):
                    if satImg[x,y]>(S-satDiff) and satImg[x,y]<(S+satDiff):
                        hueTrshld[x,y] = 255

        hueTrshldErode = cv.CreateMat(hueImg.height, hueImg.width, cv.CV_8UC1)        
        hueTrshldDilate = cv.CreateMat(hueImg.height, hueImg.width, cv.CV_8UC1)        


        kernel10 = cv.CreateStructuringElementEx(10,10,0,0, cv.CV_SHAPE_RECT)
        kernel8 = cv.CreateStructuringElementEx(8,8,0,0, cv.CV_SHAPE_RECT)
        kernel6 = cv.CreateStructuringElementEx(6,6,0,0, cv.CV_SHAPE_RECT)
        kernel4 = cv.CreateStructuringElementEx(4,4,0,0, cv.CV_SHAPE_RECT)

        cv.Erode(hueTrshld, hueTrshldErode, kernel6, 1)
        cv.Dilate(hueTrshldErode, hueTrshldDilate, kernel10, 1)

        
        cv.ShowImage("hueTrshldOr", hueTrshld) #original
        cv.ShowImage("hueTrshldDi", hueTrshldDilate) #dilated
        cv.ShowImage("hueTrshldEr", hueTrshldErode)  #eroded
예제 #4
0
    def find_blobs(self, frame, debug_image):
        '''Find blobs in an image.

        Hopefully this gets blobs that correspond with
        buoys, but any intelligent checking is done outside of this function.

        '''

        # Get Channels
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        saturation = libvision.misc.get_channel(hsv, 1)
        red = libvision.misc.get_channel(frame, 2)

        # Adaptive Threshold
        cv.AdaptiveThreshold(
            saturation,
            saturation,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY_INV,
            self.saturation_adaptive_thresh_blocksize -
            self.saturation_adaptive_thresh_blocksize % 2 + 1,
            self.saturation_adaptive_thresh,
        )
        cv.AdaptiveThreshold(
            red,
            red,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY,
            self.red_adaptive_thresh_blocksize -
            self.red_adaptive_thresh_blocksize % 2 + 1,
            -1 * self.red_adaptive_thresh,
        )

        kernel = cv.CreateStructuringElementEx(9, 9, 4, 4, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(saturation, saturation, kernel, 1)
        cv.Dilate(saturation, saturation, kernel, 1)
        cv.Erode(red, red, kernel, 1)
        cv.Dilate(red, red, kernel, 1)

        buoys_filter = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.And(saturation, red, buoys_filter)

        if debug_image:
            svr.debug("Saturation", saturation)
            svr.debug("Red", red)
            svr.debug("AdaptiveThreshold", buoys_filter)

        # Get blobs
        labeled_image = cv.CreateImage(cv.GetSize(buoys_filter), 8, 1)
        blobs = libvision.blob.find_blobs(buoys_filter, labeled_image,
                                          MIN_BLOB_SIZE, 10)

        return blobs, labeled_image
예제 #5
0
    def detect_ball(self, hsv_img, erd_mat, erd, dil_mat, dil):
        size = cv.GetSize(hsv_img)
        # colours on pitch2 (note conversion is from BGR2HSV not RGB2HSV!)
        trsh_im = self.red_color_.in_range_s(hsv_img)

        cv.Dilate(
            trsh_im, trsh_im,
            cv.CreateStructuringElementEx(dil_mat[0], dil_mat[1], 0, 0, 0),
            dil)
        cv.Erode(
            trsh_im, trsh_im,
            cv.CreateStructuringElementEx(erd_mat[0], erd_mat[1], 0, 0, 0),
            erd)

        tmp_im = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
        cv.Copy(trsh_im, tmp_im)
        largest = find_largest_contour(
            cv.FindContours(tmp_im, cv.CreateMemStorage(0),
                            cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_NONE))
        if not largest: return trsh_im, None
        return trsh_im, Math.int_vec(get_contour_center(cv.Moments(largest)))
예제 #6
0
def max_contrast(image):
    """Maximise the contrast of an image using top and bottom hat filters."""
    size = cv.GetSize(image)
    bh = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
    th = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
    s1 = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
    s2 = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
    el = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_ELLIPSE)
    cv.MorphologyEx(image, th, None, el, cv.CV_MOP_TOPHAT, 1)
    cv.MorphologyEx(image, bh, None, el, cv.CV_MOP_BLACKHAT, 1)
    cv.Add(image, th, s1)
    cv.Sub(s1, bh, s2)
    return s2
예제 #7
0
def get_angel(src):
    pos = 6
    element = cv.CreateStructuringElementEx(pos * 2 + 1, pos * 2 + 1, pos, pos,
                                            element_shape)
    ni = cv.CreateImage(cv.GetSize(src), src.depth, src.nChannels)
    cv.Erode(src, ni, element, 1)

    image_gray = cv.CreateImage(cv.GetSize(ni), 8, 1)
    cv.CvtColor(ni, image_gray, cv.CV_RGB2GRAY)
    pi = Image.fromstring("L", cv.GetSize(ni), image_gray.tostring())
    first = 0 if pi.getpixel((0, 0)) < 240 else 255
    xstart = xend = ystart = yend = 0
    for x in xrange(1, pi.size[0]):
        v = 0 if pi.getpixel((x, 0)) < 240 else 255
        if first == 0 and v != 0:
            xstart = x
            xend = pi.size[0]
            break
        if first == 255 and v != 255:
            xstart = 0
            xend = x
            break

    if first == 255:
        for y in xrange(pi.size[1]):
            v = 0 if pi.getpixel((0, y)) < 240 else 255
            if v != 255:
                yend = y
                break
    else:
        for y in xrange(pi.size[1]):
            v = 0 if pi.getpixel((pi.size[0] - 1, y)) < 240 else 255
            if v != 255:
                yend = y
                break

    a = yend - ystart
    b = xend - xstart or 1
    alpha = atan(a * 1.0 / b) / (PI * 1.0 / 180)
    if first == 255:
        alpha = -alpha

    return (alpha, pi.size[0] * 1.0 / 2, pi.size[1] * 1.0 / 2)
예제 #8
0
def camera():
	print "# Starting initialization..."
	#camera capture
	#cap = cv.CaptureFromCAM(0)
	intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
	cv.Zero(intrinsics)
	#camera data
	intrinsics[0, 0] = 1100.850708957251072
	intrinsics[1, 1] = 778.955239997982062 
	intrinsics[2, 2] = 1.0
	intrinsics[0, 2] = 348.898495232253822
	intrinsics[1, 2] = 320.213734835526282
	dist_coeffs = cv.CreateMat(1, 4, cv.CV_64FC1)
	cv.Zero(dist_coeffs)
	dist_coeffs[0, 0] = -0.326795877008420
	dist_coeffs[0, 1] = 0.139445565548056
	dist_coeffs[0, 2] = 0.001245710462327
	dist_coeffs[0, 3] = -0.001396618726445
	#pFrame = cv.QueryFrame(cap)
	print "# intrinsics loaded!"

	#prepare memory
	capture = cv.CaptureFromCAM(0)
	src = cv.QueryFrame(capture)
	size = GetSize(src)
	dst0 = cv.CreateImage(size, src.depth, src.nChannels)
	# bg = cv.LoadImage("00000005.jpg")
	image_ROI = (0,70,640,340)
	size = (640,340)

	red = cv.CreateImage(size, 8, 1)
	green = cv.CreateImage(size, 8, 1)
	blue = cv.CreateImage(size, 8, 1)

	hue = cv.CreateImage(size, 8, 1)
	sat = cv.CreateImage(size, 8, 1)
	val = cv.CreateImage(size, 8, 1)
	ball = cv.CreateImage(size, 8, 1)
	yellow = cv.CreateImage(size, 8, 1)

	ballx = 0
	bally = 0

	ballmiss = 0
	yellowmiss = 0
	bluemiss = 0

	dst2 = cv.CreateImage(size, 8, 3)
	hsv = cv.CreateImage(size,8,3)
	print "# base images created..."
#####------------------ajustment data---------------------###############
#shadow
	high = 40
	low = 300

#threshold
	thresBallInit = 116
	thresYellowInit = 94
	thresBlueInit = 18
	ballRangeInit = 8.0
	yellowRangeInit = 5.0
	blueRangeInit = 8.0
	ballRange = ballRangeInit
	yellowRange = yellowRangeInit
	blueRange = blueRangeInit
	ballMinRange = 1.5
	yellowMinRange = 2.5
	blueMinRange = 8.0
	thresBall = thresBallInit
	thresYellow = thresYellowInit
	thresBlue = thresBlueInit

#dilate
	ex = cv.CreateStructuringElementEx(3,3,1,1,cv.CV_SHAPE_RECT)
	ex2 = cv.CreateStructuringElementEx(2,2,1,1,cv.CV_SHAPE_RECT)
	ex5 = cv.CreateStructuringElementEx(5,5,1,1,cv.CV_SHAPE_RECT)

#ball
	ballcount = 15.0
	ballAreaInit = 105.0
	ballAreaRangeInit = 60.0
	ballArea = ballAreaInit
	ballAreaRange = ballAreaRangeInit
	ballMinAreaRange = 40.0
	ballcompact = 3.0

#blue
	bluecount = 30.0
	blueAreaInit = 300.0
	blueAreaRangeInit = 150.0
	blueArea = blueAreaInit
	blueAreaRange = blueAreaRangeInit
	blueMiniAreaRange = 50.0
	bluemaxdepth = 8.0
	blueminidepth = 2.5

#yellow
	yellowcount = 30.0
	yellowAreaInit = 450.0
	yellowAreaRangeInit = 200.0
	yellowArea = yellowAreaInit
	yellowAreaRange = yellowAreaRangeInit
	yellowMinAreaRange = 50.0
	yellowmaxdepth = 10.0
	yellowminidepth = 3.2




#####----------------------------------------




	#create window
	NamedWindow("camera",cv.CV_WINDOW_AUTOSIZE)
	#NamedWindow("ball",cv.CV_WINDOW_AUTOSIZE)
	#NamedWindow("yellow",cv.CV_WINDOW_AUTOSIZE)
	#NamedWindow("blue",cv.CV_WINDOW_AUTOSIZE)

	#NamedWindow("hue",cv.CV_WINDOW_AUTOSIZE)
	#NamedWindow("sat",cv.CV_WINDOW_AUTOSIZE)
	#NamedWindow("val",cv.CV_WINDOW_AUTOSIZE)
	timecount = 0

	onesec =  time.clock()
	storage = cv.CreateMemStorage()
	print "# starting capture..."
	print ''
	capture = cv.CaptureFromCAM(0)
	aa =  time.clock()
	while(True):
		timecount = timecount + 1
		src = cv.QueryFrame(capture)
		
		#barrel undistortion
		cv.Undistort2(src, dst0, intrinsics, dist_coeffs)
		#ROI = Region of Interests, crop the image 
		cv.SetImageROI(dst0,image_ROI)
		dst = GetImage(dst0)
		CvtColor(dst,hsv,CV_RGB2HSV)
		cv.Split(hsv,hue,sat,val,None)

#		ShowImage("hue",hue)
#		ShowImage("val",val)
#		ShowImage("sat",sat)

		# BALL
		cv.Threshold(hue,ball,thresBallInit+ballRange, 255,cv.CV_THRESH_TOZERO_INV)
		cv.Threshold(hue,ball,thresBallInit-ballRange, 255,cv.CV_THRESH_BINARY)
		cv.Erode(ball,ball,ex2,1)
		cv.Dilate(ball,ball,ex2,1)
		ShowImage("ball",ball)
		# YELLOW
		cv.Threshold(hue,yellow,thresYellowInit+yellowRange,255,cv.CV_THRESH_TOZERO_INV)
		cv.Threshold(yellow,yellow,thresYellowInit-yellowRange,255,cv.CV_THRESH_BINARY)
		cv.Erode(yellow,yellow,ex2,1)
		cv.Dilate(yellow,yellow,ex2,1)
		ShowImage("yellow",yellow)

		# BLUE
#		CvtColor(dst,hsv,CV_BGR2HSV)
#		cv.Split(hsv,hue,sat,val,None)

		cv.Threshold(hue,blue,thresBlue+blueRange,255,cv.CV_THRESH_BINARY_INV)
#		cv.Threshold(blue,blue,4,255,cv.CV_THRESH_BINARY)
		cv.Erode(blue,blue,ex2,1)
		cv.Dilate(blue,blue,ex2,1)
		ShowImage("blue",blue)

		cv.Threshold(val,val,80,255,cv.CV_THRESH_BINARY_INV)
		cv.Threshold(sat,sat,80,255,cv.CV_THRESH_BINARY_INV)
		ShowImage("sat2",sat)
		ShowImage("val2",val)
		# Removes the walls
		Sub(blue,val,blue)
		Sub(blue,sat,blue)
		Sub(yellow,val,yellow)
		Sub(yellow,sat,yellow)
		Sub(ball,val,ball)
		Sub(ball,sat,ball)

		Set2D(ball,4,4,255)
		Set2D(blue,4,4,255)
		Set2D(yellow,4,4,255)
		ShowImage("yellow3",yellow)
		ShowImage("ball3",ball)
		ShowImage("blue3",blue)


		#find ball

		seq = cv.FindContours(ball,storage,cv.CV_RETR_LIST, cv.CV_LINK_RUNS)
		if seq != None:
			count = 0
			while (seq != None and count <= ballcount):
				count =count + 1
				area = cv.ContourArea(seq)+0.01
				compact =  ArcLength(seq)*ArcLength(seq)/(4*area*math.pi)
				if (area < 4 or area > (ballArea+ballAreaRange) or area < (ballArea-ballAreaRange)): #or compact >= ballcompact ):
					seq = seq.h_next()
					continue
				else:
					ballx = 0
					bally = 0
					for p in seq:
						ballx = ballx + p[0]
						bally = bally + p[1]
					ballx = int(float(ballx)/len(seq))
					bally = int(float(bally)/len(seq))

###############--------------Auto ajustment
					print "ball area %f" %area
					print "ball hue: %f" %hue[bally,ballx]
#					thresBall = 0.2*hue[bally,ballx]+0.2*thresBall + 0.5*thresBallInit
#					ballArea = area
#					if(ballRange > ballMinRange):
#						ballRange = ballRange -0.1
#					if(ballAreaRange > ballMinAreaRange):
#						ballAreaRange = ballAreaRange -1.0
					cv.Circle(dst,(ballx,bally),4,cv.CV_RGB(255,255,255),2,8,0)
					cv.Circle(dst,(ballx,bally),10,cv.CV_RGB(255,0,0),9,8,0)
					break
			if(count > ballcount or seq == None):
#				thresBall = thresBallInit
#				ballRange = 0.5*ballRange + 0.5*ballRangeInit
#				ballArea = ballArea + 10.0
#				ballAreaRange = ballAreaRange + 0.1
				print ballAreaRange
				ballx = 0
				bally = 0
				ballmiss = ballmiss + 1
				print "\r# error: ball not found  "



		#find blue
		seq = cv.FindContours(blue,storage,cv.CV_RETR_LIST, cv.CV_LINK_RUNS)
		if seq != None:
			count = 0
			while (seq != None and count <= bluecount):
				count =count + 1
				area = cv.ContourArea(seq)
				if(area < blueArea-blueAreaRange or area > blueArea+blueAreaRange):
					seq = seq.h_next()
					continue
				else:
					hull = None
					convex = None
					hull =cv.ConvexHull2(seq,storage)
					convex = cv.ConvexityDefects(seq,hull,storage)
					if (len(convex) > 1):
						convex = sorted(convex , key = lambda(k1,k2,k3,k4):k4)#sort by depth of the convex defect
						if (convex[len(convex)-1][3] < blueminidepth or convex[len(convex)-2][3] < blueminidepth or convex[len(convex)-1][3] > bluemaxdepth or convex[len(convex)-2][3] > bluemaxdepth ):
							seq = seq.h_next()
							continue
						else:
							#find the T
							blue_start1 = convex[len(convex)-1][0]
							blue_end1 = convex[len(convex)-1][1]
							blue_depth1 = convex[len(convex)-1][2]

							#draw the side line of T


							blue_start2 = convex[len(convex)-2][0]
							blue_end2 = convex[len(convex)-2][1]
							blue_depth2 = convex[len(convex)-2][2]


							blue_from = ((blue_depth1[0]+blue_depth2[0])/2,(blue_depth1[1]+blue_depth2[1])/2)#calculate the center of robot

							#calculate the end of direction vector, the two end point of the smaller distans
							if math.hypot(blue_start1[0]-blue_end2[0],blue_start1[1]-blue_end2[1])>math.hypot(blue_end1[0]-blue_start2[0],blue_end1[1]-blue_start2[1]):
								blue_to = ((blue_end1[0]+blue_start2[0])/2,(blue_end1[1]+blue_start2[1])/2)
							else:
								blue_to = ((blue_start1[0]+blue_end2[0])/2,(blue_start1[1]+blue_end2[1])/2)
							cv.Line(dst,blue_from,blue_to,cv.CV_RGB(255,0,255),2,8,0)
							cv.Circle(dst,blue_from,1,cv.CV_RGB(255,0,0),2,8,0)
							cv.Circle(dst,blue_to,1,cv.CV_RGB(0,0,0),2,8,0)
							cv.Circle(dst,blue_from,10,cv.CV_RGB(255,0,0),9,8,0)

#######---------------------------Auto Ajusting
							print "blue area %f" %area
#							print "blue hue: %f" %hue[blue_from[1],blue_from[0]]
#							thresBlue = hue[blue_from[1],blue_from[0]] #+ 0.4*thresBlue + 0.5*thresBlueInit
#							blueArea = area
#							if(blueAreaRange > blueMiniAreaRange):
#								blueAreaRange = blueAreaRange -1.0
#							if(blueRange > blueMinRange):
#								blueRange = blueRange - 0.1
							break
					else:
						seq = seq.h_next()
						continue
			if(count > bluecount or seq == None):
#				thresBlue = thresBlueInit
#				blueAreaRange = blueAreaRange + 10.0
#				blueArea = blueArea + 10.0
#				blueRange = 0.5*blueRange + 0.5*blueRangeInit
				bluemiss = bluemiss + 1
				blue_from = (0,0);
				blue_to = (0,0);
				print "\r# error: blue not found  "

		#find yellow
		seq = cv.FindContours(yellow,storage,cv.CV_RETR_LIST, cv.CV_LINK_RUNS)
		if seq != None:			
			count = 0
			while (seq != None and count <= yellowcount):
				count =count + 1
				area = cv.ContourArea(seq)
				if(area < yellowArea-yellowAreaRange or area > yellowArea + yellowAreaRange):
					seq = seq.h_next()
					continue
				else:
					hull = None
					convex = None
					hull =cv.ConvexHull2(seq,storage)
					convex = cv.ConvexityDefects(seq,hull,storage)
					if (len(convex) > 1):
						convex = sorted(convex , key = lambda(k1,k2,k3,k4):k4)#sort by depth of the convex defect
						if (convex[len(convex)-1][3] < yellowminidepth or convex[len(convex)-2][3] < yellowminidepth or convex[len(convex)-1][3] > yellowmaxdepth or convex[len(convex)-2][3] > yellowmaxdepth ):
							seq = seq.h_next()
							continue
						else:
							#find the T
							yellow_start1 = convex[len(convex)-1][0]
							yellow_end1 = convex[len(convex)-1][1]
							yellow_depth1 = convex[len(convex)-1][2]
					
							#draw the side line of T

							yellow_start2 = convex[len(convex)-2][0]
							yellow_end2 = convex[len(convex)-2][1]
							yellow_depth2 = convex[len(convex)-2][2]
					
							yellow_from = ((yellow_depth1[0]+yellow_depth2[0])/2,(yellow_depth1[1]+yellow_depth2[1])/2)#calculate the center of robot
				
							#calculate the end of direction vector, the two end point of the smaller distans
							if math.hypot(yellow_start1[0]-yellow_end2[0],yellow_start1[1]-yellow_end2[1])>math.hypot(yellow_end1[0]-yellow_start2[0],yellow_end1[1]-yellow_start2[1]):
								yellow_to = ((yellow_end1[0]+yellow_start2[0])/2,(yellow_end1[1]+yellow_start2[1])/2)
							else:
								yellow_to = ((yellow_start1[0]+yellow_end2[0])/2,(yellow_start1[1]+yellow_end2[1])/2)


###########------------------------------Auto Ajusting
#							print cv.ContourArea(seq)
							print "yellow area %f" %area
							print "yellow hue: %f" %hue[yellow_from[1],yellow_from[0]]
#							thresYellow = hue[yellow_from[1],yellow_from[0]] #+ 0.4*thresYellow + 0.5*thresYellowInit
#							yellowArea = area
#							if(yellowRange > yellowMinRange):
#								yellowRange = yellowRange -0.1
#							if(yellowAreaRange > yellowMinAreaRange):
#								yellowAreaRange = yellowAreaRange - 1.0
#							yellow_miss = ((yellow_from[0]+yellow_to[0])/2,(yellow_from[1]+yellow_to[1])/2)

							cv.Line(dst,yellow_from,yellow_to,cv.CV_RGB(255,0,255),2,8,0)
							cv.Circle(dst,yellow_from,1,cv.CV_RGB(255,0,0),2,8,0)
							cv.Circle(dst,yellow_to,1,cv.CV_RGB(0,0,0),2,8,0)
							cv.Circle(dst,yellow_from,10,cv.CV_RGB(255,0,0),9,8,0)
							break
					else:
						seq = seq.h_next()
						continue
			if(count > yellowcount or seq == None):
				#thresYellow = thresYellowInit
#				yellowRange = 0.5*yellowRange + 0.5*yellowRangeInit
				#yellowArea = yellowArea
#				yellowAreaRange = yellowAreaRange + 10.0
				print "area:%d" %yellowArea
				yellowmiss = yellowmiss + 1
				yellow_from = (0,0);
				yellow_to = (0,0);
				print "\r# error: yellow not found"
		ballpos = (ballx,bally)
		#output(ballpos,blue_from,blue_to,yellow_from,yellow_to)
		ShowImage("camera",dst)
		cv.SetImageROI(dst0,(0,0,640,480))
#		ShowImage("camera",dst0)
		
		if( cv.WaitKey(2) >= 0 ):
			bb =  time.clock()
			print "frame rate: %f" %(timecount/(bb-aa))
			print "ball miss rate: %f" %(ballmiss)
			print "blue miss rate: %f" %(bluemiss)
			print "yellow miss rate: %f" %(yellowmiss)
			break;
예제 #9
0
        self.aktif = True

    def getDimensions(self):
        return (self.x, self.y, self.width, self.height)

    def centerOrigin(self):
        return (self.x - self.width / 2, self.y - self.height / 2)

    def update(self):
        self.x += self.hiz[0]
        self.y += self.hiz[1]


python_opencv_modulu.NamedWindow("Python - Bahçeşehir University Game",
                                 python_opencv_modulu.CV_WINDOW_AUTOSIZE)
elips_sekil_kalibi = python_opencv_modulu.CreateStructuringElementEx(
    9, 9, 4, 4, python_opencv_modulu.CV_SHAPE_ELLIPSE)
kamera = python_opencv_modulu.CaptureFromCAM(-1)
cerceve_boyutu = (int(
    python_opencv_modulu.GetCaptureProperty(
        kamera, python_opencv_modulu.CV_CAP_PROP_FRAME_WIDTH)),
                  int(
                      python_opencv_modulu.GetCaptureProperty(
                          kamera,
                          python_opencv_modulu.CV_CAP_PROP_FRAME_HEIGHT)))
yazdir_goruntuyu = False
fourcc_degeri = python_opencv_modulu.FOURCC('M', 'J', 'P', 'G')
fps_degeri = 30

if yazdir_goruntuyu:
    video_yazdir = python_opencv_modulu.CreateVideoWriter(
        "film.avi", fourcc_degeri, fps_degeri, cerceve_boyutu)
예제 #10
0
def find_loop(input_data, IterationClosing=6):
    """
        This function detect support (or loop) and return the coordinates if there is a detection,
        and -1 if not.
        in : filename : string image Filename / Format accepted :
        in : IterationClosing : int : Number of iteration for closing contour procedure
        Out : tupple of coordiante : (string, coordinate X, coordinate Y) where string take value
             'Coord' or 'No loop detected depending if loop was detected or not. If no loop was
              detected coordinate X and coordinate y take the value -1.
     """
    #Definition variable Global
    global AIRE_MIN_REL
    global AIRE_MIN
    global NORM_IMG
    global NiteClosing
    global pointRef
    #Chargement image
    try:
        if type(input_data) == str:
            #Image filename is passed
            img_ipl = cv.LoadImageM(input_data)
        elif type(input_data) == np.ndarray:
            img_ipl = cv.fromarray(input_data)
        else:
            print "ERROR : Input image could not be opened, check format or path"
            return (
                "ERROR : Input image could not be opened, check format or path",
                -10, -10)
    except:
        print "ERROR : Input image could not be opened, check format or path"
        return (
            "ERROR : Input image could not be opened, check format or path",
            -10, -10)
    img_cont = img_ipl  # img used for
    NORM_IMG = img_ipl.width * img_ipl.height
    AIRE_MIN = NORM_IMG * AIRE_MIN_REL
    #traitement
    #Converting input image in Grey scale image
    img_gray_ini = cv.CreateImage((img_ipl.width, img_ipl.height), 8, 1)
    cv.CvtColor(img_ipl, img_gray_ini, cv.CV_BGR2GRAY)
    #Removing Offset from image
    img_gray_resize = cv.CreateImage(
        (img_ipl.width - 2 * Offset[0], img_ipl.height - 2 * Offset[1]), 8, 1)
    cv.SetImageROI(img_gray_ini,
                   (Offset[0], Offset[1], img_ipl.width - 2 * Offset[0],
                    img_ipl.height - 2 * Offset[1]))
    cv.Copy(img_gray_ini, img_gray_resize)
    #    #creat image used for treatment
    img_gray = cv.CreateImage((img_gray_resize.width, img_gray_resize.height),
                              8, 1)
    img_trait = cv.CreateImage((img_gray.width, img_gray.height), 8, 1)
    # image used for treatment is the same than img_gray_resize
    cv.Copy(img_gray_resize, img_gray)
    #Img is smooth with asymetric kernel
    cv.Smooth(img_gray, img_gray, param1=11, param2=9)
    cv.Canny(img_gray, img_trait, 40, 60)
    # Laplacian treatment
    # Creating buffer image
    img_lap_ini = cv.CreateImage((img_gray.width, img_gray.height), 32, 1)
    img_lap = cv.CreateImage((img_lap_ini.width - 2 * Offset[0],
                              img_lap_ini.height - 2 * Offset[1]), 32, 1)
    # Creating buffer img
    img_lap_tmp = cv.CreateImage((img_lap.width, img_lap.height), 32, 1)
    #Computing laplacian
    cv.Laplace(img_gray, img_lap_ini, 5)
    #Applying Offset to avoid border effect
    cv.SetImageROI(img_lap_ini,
                   (Offset[0], Offset[1], img_lap_ini.width - 2 * Offset[0],
                    img_lap_ini.height - 2 * Offset[1]))
    #Copying laplacian treated image to final laplacian image
    cv.Copy(img_lap_ini, img_lap)
    #Apply an asymetrique  smoothing
    cv.Smooth(img_lap, img_lap, param1=21, param2=11)
    #Define the Kernel for closing algorythme
    MKernel = cv.CreateStructuringElementEx(7, 3, 3, 1, cv.CV_SHAPE_RECT)
    # Closing contour procedure
    cv.MorphologyEx(img_lap, img_lap, img_lap_tmp, MKernel, cv.CV_MOP_CLOSE,
                    NiteClosing)
    # Conveting img in 8bit image
    img_lap8_ini = cv.CreateImage((img_lap.width, img_lap.height), 8, 1)
    cv.Convert(img_lap, img_lap8_ini)
    # Add white border to image
    mat_bord = WhiteBorder(np.asarray(img_lap8_ini[:]), XSize, YSize)
    img_lap8 = cv.CreateImage(
        (img_lap.width + 2 * XSize, img_lap.height + 2 * YSize), 8, 1)
    img_lap8 = cv.fromarray(mat_bord)
    #Compute threshold
    seuil_tmp = Seuil_var(img_lap8)
    #If Seuil_tmp is not null
    if seuil_tmp != 0:
        seuil = seuil_tmp
    #Else seuil is fixed to 20, which prevent from wrong positiv detection
    else:
        seuil = 20
    #Compute thresholded image
    img_lap_bi = cv.CreateImage((img_lap8.width, img_lap8.height), 8, 1)
    img_lap_color = cv.CreateImage((img_lap8.width, img_lap8.height), 8, 3)
    img_trait_lap = cv.CreateImage((img_lap8.width, img_lap8.height), 8, 1)
    #Compute thresholded image
    cv.Threshold(img_lap8, img_lap_bi, seuil, 255, cv.CV_THRESH_BINARY)
    #Gaussian smoothing on laplacian
    cv.Smooth(img_lap_bi, img_lap_bi, param1=11, param2=11)
    #Convert grayscale laplacian image to binarie image using "seuil" as threshold value
    cv.Threshold(img_lap_bi, img_lap_bi, 1, 255, cv.CV_THRESH_BINARY_INV)
    cv.CvtColor(img_lap_bi, img_lap_color, cv.CV_GRAY2BGR)
    #Compute edge in laplacian image
    cv.Canny(img_lap_bi, img_trait_lap, 0, 2)
    #Find contour
    seqlapbi = cv.FindContours(img_trait_lap, cv.CreateMemStorage(),
                               cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
    #contour is filtered
    try:
        contour_list = parcourt_contour(seqlapbi, img_lap_color)
    except:
        #      If error is traped then there is no loop detected
        return (0, 0, ("No loop detected", -1, -1))


#    If there contours's list is not empty
    NCont = len(contour_list)
    if (NCont > 0):
        #      The CvSeq is inversed : X(i) became i(X)
        indice = MapCont(contour_list[0], img_lap_color.width,
                         img_lap_color.height)
        #      The coordinate of target is computed in the traited image
        point_shift = integreCont(indice, contour_list[0])
        #      The coordinate in original image are computed taken into account Offset and white bordure
        point = (point_shift[0], point_shift[1] + 2 * Offset[0] - XSize,
                 point_shift[2] + 2 * Offset[1] - YSize)
    else:
        #Else no loop is detected
        point = ("No loop detected", -1, -1)
        Aire_Max = 0

    return point
예제 #11
0
def test_morph(image, do_show=False):
    element = cv.CreateStructuringElementEx(30, 30, 15, 15, cv.CV_SHAPE_RECT)

    temp = cv.CreateImage(cv.GetSize(image), image.depth, image.channels)
    morphed = morph(image, temp, element, cv.CV_MOP_OPEN)
    if do_show: show(morphed=morphed)
예제 #12
0
def test_erode(image, do_show=False):
    element = cv.CreateStructuringElementEx(3, 3, 2, 2, cv.CV_SHAPE_RECT)
    eroded = erode(image, element)
    if do_show: show(eroded=eroded)
예제 #13
0
image = loadGreyscale()
threshold = 100
color = 255
cv.Threshold(image, image, threshold, color, cv.CV_THRESH_BINARY)
showWindow("binary threshold")

# Otsu threshold
image = loadGreyscale()
cv.Threshold(image, image, threshold, color, cv.CV_THRESH_OTSU)
showWindow("Otsu threshold")

# Dilation
image = loadGreyscale()
element_shape = cv.CV_SHAPE_RECT
pos = 1
element = cv.CreateStructuringElementEx(pos * 2 + 1, pos * 2 + 1, pos, pos,
                                        element_shape)
cv.Dilate(image, image, element, 2)
showWindow("Dilate")

# Erosion
image = loadGreyscale()
cv.Erode(image, image, element, 2)
showWindow("Erode")

# Morphology
image = loadGreyscale()
cv.MorphologyEx(image, image, image, element, cv.CV_MOP_CLOSE, 2)
showWindow("Morphology")

# Laplace
image = loadGreyscale()
예제 #14
0
    def process_frame(self, frame):
        debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.Copy(frame, debug_frame)

        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Set binary image to have saturation channel
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binary = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        cv.SetImageCOI(hsv, 2)
        cv.Copy(hsv, binary)
        cv.SetImageCOI(hsv, 0)

        cv.AdaptiveThreshold(
            binary,
            binary,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY_INV,
            self.adaptive_thresh_blocksize,
            self.adaptive_thresh,
        )

        # Morphology
        kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(binary, binary, kernel, 1)
        cv.Dilate(binary, binary, kernel, 1)

        # Get Edges
        #cv.Canny(binary, binary, 30, 40)

        cv.CvtColor(binary, debug_frame, cv.CV_GRAY2RGB)

        # Hough Transform
        line_storage = cv.CreateMemStorage()
        raw_lines = cv.HoughLines2(binary,
                                   line_storage,
                                   cv.CV_HOUGH_STANDARD,
                                   rho=1,
                                   theta=math.pi / 180,
                                   threshold=self.hough_threshold,
                                   param1=0,
                                   param2=0)

        line_groups = []  # A list of line groups which are each a line list

        for line in raw_lines:
            group_found = False
            for line_group in line_groups:

                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                line_groups.append([line])

            # Average line groups into lines
            lines = []
            for line_group in line_groups:
                rhos = map(lambda line: line[0], line_group)
                angles = map(lambda line: line[1], line_group)
                line = (sum(rhos) / len(rhos),
                        circular_average(angles, math.pi))
                lines.append(line)

        libvision.misc.draw_lines(debug_frame, raw_lines)
        # cv.CvtColor(color_filtered,debug_frame, cv.CV_GRAY2RGB)
        svr.debug("Bins", debug_frame)
예제 #15
0
class Preprocessor:

    #cropRect = (0, 80, 640, 400) # Primary pitch
    cropRect = (0, 45, 640, 400)  # Alt. pitch

    bgLearnRate = 0  #.15

    bgsub_kernel = \
        cv.CreateStructuringElementEx(5,5, #size
                                        2,2, #X,Y offsets
                                        cv.CV_SHAPE_RECT)

    def __init__(self, rawSize, threshold, simulator=None, bg=None):
        self.rawSize = rawSize
        self.cropSize = self.cropRect[2:]

        if rawSize[0] < 640:
            self.cropSize = rawSize

        logging.info("Captured image size: %s", dim2string(self.rawSize))
        logging.info("Cropped image size: %s", dim2string(self.cropSize))

        self.initMatrices()

        self.Idistort = cv.CreateImage(self.rawSize, cv.IPL_DEPTH_8U, 3)

        self.Icrop = cv.CreateImage(self.cropSize, cv.IPL_DEPTH_8U, 3)
        self.Igray = cv.CreateImage(self.cropSize, cv.IPL_DEPTH_8U, 1)
        self.Imask = cv.CreateImage(self.cropSize, cv.IPL_DEPTH_8U, 3)
        self.Iobjects = cv.CreateImage(self.cropSize, cv.IPL_DEPTH_8U, 3)
        self.bg = cv.CreateImage(self.cropSize, cv.IPL_DEPTH_8U, 3)
        self.Ieq = cv.CreateImage(self.cropSize, cv.IPL_DEPTH_8U, 3)
        self.R = cv.CreateImage(self.cropSize, cv.IPL_DEPTH_8U, 1)
        self.G = cv.CreateImage(self.cropSize, cv.IPL_DEPTH_8U, 1)
        self.B = cv.CreateImage(self.cropSize, cv.IPL_DEPTH_8U, 1)

        if bg:
            logging.debug("Loading dummy background image")
            self.bg = cv.CreateImage(self.rawSize, cv.IPL_DEPTH_8U, 3)
            cv.Zero(self.bg)
        else:
            logging.debug("Loading the background image")
            #self.bg = cv.LoadImage('alt-pitch-bg.png')
            self.bg = cv.LoadImage('alt-pitch-bg.jpg')
            logging.debug("Processing the background image:")
            self.bg = cv.CloneImage(self.crop(self.undistort(self.bg)))
            # cv.SaveImage("calibrated-background.png", self.bg)

        self.standardised = simulator is not None

        self.threshold = threshold

    def get_standard_form(self, frame):
        """Undistort an image, i.e. convert to standard format

        Returns an internal buffer.
        """
        if self.standardised:
            return frame
        else:
            return self.crop(self.undistort(frame))

    def bgsub(self, frame):
        """Preprocess a frame
        :: CvMat -> (CvMat, CvMat, CvMat)

        This method preprocesses a frame by undistorting it using
        prior camera calibration data and then removes the background
        using an image of the background.
        """
        # if not self.standardised:
        #     frame = self.get_standard_form(frame)
        self.continuousLearnBackground(frame)
        bgsub, mask = self.remove_background_values(frame)
        return bgsub, mask

    def crop(self, frame):
        logging.debug("Cropping a frame")
        sub_region = cv.GetSubRect(frame, self.cropRect)
        cv.Copy(sub_region, self.Icrop)
        return self.Icrop

    def preprocessBG(self, frame):
        ballmask = self.threshold.ball(frame)

    def continuousLearnBackground(self, frame):
        if self.bgLearnRate == 0: return
        cv.AddWeighted(frame, self.bgLearnRate, self.bg,
                       1.0 - self.bgLearnRate, 0, self.bg)

    def remove_background(self, frame):
        """Remove background, leaving robots and some noise.

        It is not safe to modify the returned image, as it will be
        re-initialised each time preprocess is run.
        """
        logging.debug("Performing background subtraction")

        #cv.CvtColor(frame, self.Igray, cv.CV_BGR2GRAY)
        cv.Sub(frame, self.bg, self.Imask)

        return self.Imask

    def remove_background_values(self, frame):
        self.Imask = self.remove_background(frame)

        logging.debug(
            "Using thresholded background subtracted image as a mask")
        #cv.ShowImage("ASD", self.Imask)
        self.Igray = self.threshold.foreground(self.Imask)
        cv.CvtColor(self.Imask, self.Igray, cv.CV_BGR2GRAY)
        cv.Threshold(self.Igray, self.Igray, 200, 255, cv.CV_THRESH_OTSU)
        #cv.EqualizeHist(self.Igray, self.Igray)
        cv.CvtColor(self.Igray, self.Imask, cv.CV_GRAY2BGR)

        #Finally, return the salient bits of the original frame
        cv.And(self.Imask, frame, self.Iobjects)

        return self.Iobjects, self.Igray

    def background_mask(self, frame):
        bgsub_eq = cv.CreateImage(self.pre.cropSize, cv.IPL_DEPTH_8U, 1)
        cv.CvtColor(bgsub, bgsub_eq, cv.CV_BGR2GRAY)
        cv.EqualizeHist(bgsub_eq, bgsub_eq)

    def hist_eq(self, frame):
        cv.Split(frame, self.B, self.G, self.R, None)
        cv.EqualizeHist(self.R, self.R)
        cv.EqualizeHist(self.R, self.B)
        cv.EqualizeHist(self.G, self.G)
        cv.Merge(self.B, self.G, self.R, None, self.Ieq)
        return self.Ieq

    def hsv_normalise(self, frame):
        """Should normalise scene lighting

        Works by setting the HSV Value component to a constant.
        However, turns out that this does not adequately remove shadows.
        Maybe parameter tweaking the Value constant might do something? TODO
        """
        tmp = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.CvtColor(frame, tmp, cv.CV_BGR2HSV)

        H, S, V = [cv.CreateImage(cv.GetSize(frame), 8, 1) for _ in range(3)]
        cv.Split(tmp, H, S, V, None)

        cv.Set(V, 140)

        cv.Merge(H, S, V, None, tmp)
        cv.CvtColor(tmp, tmp, cv.CV_HSV2BGR),
        out = tmp

        return out

    def undistort(self, frame):
        logging.debug("Undistorting a frame")

        assert frame.width == self.Idistort.width
        assert frame.height == self.Idistort.height

        cv.Undistort2(frame, self.Idistort, self.Intrinsic, self.Distortion)
        return self.Idistort

    def initMatrices(self):
        "Initialise matrices for camera distortion correction."
        logging.debug("Initialising camera matrices")

        dmatL = [
            -3.1740235091903346e-01, -8.6157434640872499e-02,
            9.2026812110876845e-03, 4.4950266773574115e-03
        ]

        imatL = [
            8.6980146658682384e+02, 0., 3.7426130495414304e+02, 0.,
            8.7340754327613899e+02, 2.8428760615670581e+02, 0., 0., 1.
        ]

        imat = cv.CreateMat(3, 3, cv.CV_32FC1)
        dmat = cv.CreateMat(1, 4, cv.CV_32FC1)

        for i in range(3):
            for j in range(3):
                imat[i, j] = imatL[3 * i + j]

        for i in range(4):
            dmat[0, i] = dmatL[i]

        self.Distortion = dmat
        self.Intrinsic = imat
    def processBag( self, bag ):
    
        FLIP_IMAGE = bool( self.options.frameFlip == "True" )
        USING_OPTICAL_FLOW_FOR_MOTION = False
        print "frameFlip = ", FLIP_IMAGE
    
        bagFrameIdx = 0
        frameIdx = 0
        impactFrameIdx = None
        
        # Setup filters
        opticalFlowFilter = OpticalFlowFilter(
            self.OPTICAL_FLOW_BLOCK_WIDTH, self.OPTICAL_FLOW_BLOCK_HEIGHT, 
            self.OPTICAL_FLOW_RANGE_WIDTH, self.OPTICAL_FLOW_RANGE_HEIGHT )
            
        motionDetectionFilter = MotionDetectionFilter()
        imageFlowFilter = ImageFlowFilter()
        residualSaliencyFilter = ResidualSaliencyFilter()
            
        # Process bag file
        for topic, msg, t in bag.read_messages():
            
            if self.workCancelled:
                # We've been given the signal to quit
                break
            
            if msg._type == "sensor_msgs/Image":
                
                bagFrameIdx += 1
                if (bagFrameIdx-1)%self.PROCESSED_FRAME_DIFF != 0:
                    continue
                
                print "Processing image", frameIdx
                
                # Get input image
                image = cv.CreateMatHeader( msg.height, msg.width, cv.CV_8UC3 )
                cv.SetData( image, msg.data, msg.step )
                
                if FLIP_IMAGE:
                    cv.Flip( image, None, 1 )
                
                # Convert to grayscale
                grayImage = cv.CreateMat( msg.height, msg.width, cv.CV_8UC1 )
                cv.CvtColor( image, grayImage, cv.CV_BGR2GRAY )
                grayImageNumpPy = np.array( grayImage )
                
                # Calculate optical flow
                opticalFlowArrayX, opticalFlowArrayY = \
                    opticalFlowFilter.calcOpticalFlow( grayImage )
                    
                # Detect motion
                if USING_OPTICAL_FLOW_FOR_MOTION:
                    if frameIdx == 0:
                        motionImage = PyVarFlowLib.createMotionMask( 
                            grayImageNumpPy, grayImageNumpPy )
                    else:
                        motionImage = PyVarFlowLib.createMotionMask( 
                            np.array( self.grayScaleImageList[ frameIdx - 1 ] ), 
                            grayImageNumpPy )
                else:
                    motionImage = motionDetectionFilter.calcMotion( grayImage )
                
                
                # Work out the left most point in the image where motion appears
                motionTest = np.copy( motionImage )
                
                cv.Erode( motionTest, motionTest )
                if frameIdx == 0:
                    leftMostMotion = motionImage.shape[ 1 ]
                else:
                    leftMostMotion = self.leftMostMotionList[ frameIdx - 1 ]
                
                leftMostMotionDiff = 0
                for i in range( leftMostMotion ):
                    if motionTest[ :, i ].max() > 0:
                        leftMostMotionDiff = abs( leftMostMotion - i )
                        leftMostMotion = i
                        break
                
                segmentationMask = np.zeros( ( msg.height, msg.width ), dtype=np.uint8 )
                
                FRAMES_BACK = 3
                
                if impactFrameIdx == None:        
                    if leftMostMotionDiff > 18 and leftMostMotion < 0.75*msg.width:
                        
                        # Found impact frame
                        impactFrameIdx = frameIdx
                    
                else:
                    PROCESS_IMPACT = False
                    if PROCESS_IMPACT and frameIdx - impactFrameIdx == FRAMES_BACK:
                        
                        # Should now have enough info to segment object
                        impactMotionImage = self.motionImageList[ impactFrameIdx ]
                        
                        print "Aligning"
                        postImpactRealFarFlow = imageFlowFilter.calcImageFlow( impactMotionImage, motionImage )
                        print "Aligning"
                        postImpactFarFlow = imageFlowFilter.calcImageFlow( impactMotionImage, self.motionImageList[ impactFrameIdx + 2 ] )
                        print "Aligning"
                        postImpactNearFlow = imageFlowFilter.calcImageFlow( impactMotionImage, self.motionImageList[ impactFrameIdx + 1 ] )
                        
                        segmentationMask = np.maximum( np.maximum( np.maximum( 
                            impactMotionImage, postImpactNearFlow[ 3 ] ), postImpactFarFlow[ 3 ] ), postImpactRealFarFlow[ 3 ] )
                        cv.Dilate( segmentationMask, segmentationMask )
                        
                        print "Aligning"
                        preImpactRealFarFlow = imageFlowFilter.calcImageFlow( impactMotionImage, self.motionImageList[ impactFrameIdx - 8 ] )
                        print "Aligning"
                        preImpactFarFlow = imageFlowFilter.calcImageFlow( impactMotionImage, self.motionImageList[ impactFrameIdx - 6 ] )
                        print "Aligning"
                        preImpactNearFlow = imageFlowFilter.calcImageFlow( impactMotionImage, self.motionImageList[ impactFrameIdx - 4 ] )
                        
                        subMask = np.maximum( np.maximum( 
                            preImpactRealFarFlow[ 3 ], preImpactFarFlow[ 3 ] ), preImpactNearFlow[ 3 ] )
                        cv.Erode( subMask, subMask )
                        cv.Dilate( subMask, subMask )
                        cv.Dilate( subMask, subMask )
                        cv.Dilate( subMask, subMask )
                        
                        subMask[ subMask > 0 ] = 255
                        diffImage = segmentationMask.astype( np.int32 ) - subMask.astype( np.int32 )
                        diffImage[ diffImage < 0 ] = 0
                        diffImage = diffImage.astype( np.uint8 )
                        cv.Erode( diffImage, diffImage )
                        #diffImage[ diffImage > 0 ] = 255

                        #segmentationMask = subMask
                        segmentationMask = diffImage
                        #segmentationMask = np.where( diffImage > 128, 255, 0 ).astype( np.uint8 )
                
                # Calculate image flow
                #imageFlow = imageFlowFilter.calcImageFlow( motionImage )
                
                ## Calculate saliency map
                #saliencyMap, largeSaliencyMap = residualSaliencyFilter.calcSaliencyMap( grayImageNumpPy )
                
                #blobMap = np.where( largeSaliencyMap > 128, 255, 0 ).astype( np.uint8 )
                
                #blobMap, numBlobs = PyBlobLib.labelBlobs( blobMap )
                #print "found", numBlobs, "blobs"
                
                #largeSaliencyMap = np.where( largeSaliencyMap > 128, 255, 0 ).astype( np.uint8 )
                
                
                
                
                
                
                # Threshold the saliency map
                #largeSaliencyMap = (largeSaliencyMap > 128).astype(np.uint8) * 255
                #cv.AdaptiveThreshold( largeSaliencyMap, largeSaliencyMap, 255 )
                
                # Detect clusters within the saliency map
                #NUM_CLUSTERS = 5
                
                #numSamples = np.sum( saliencyMap )
                #sampleList = np.ndarray( ( numSamples, 2 ), dtype=np.float32 )
                
                #sampleListIdx = 0
                #for y in range( saliencyMap.shape[ 0 ] ):
                    #for x in range( saliencyMap.shape[ 1 ] ):
                        
                        #numNewSamples = saliencyMap[ y, x ]
                        #if numNewSamples > 0:
                            #sampleList[ sampleListIdx:sampleListIdx+numNewSamples, 0 ] = x
                            #sampleList[ sampleListIdx:sampleListIdx+numNewSamples, 1 ] = y
                            #sampleListIdx += numNewSamples
                            
                #sampleList[ 0:numSamples/2 ] = ( 20, 20 )
                #sampleList[ numSamples/2: ] = ( 200, 200 )
                
                #labelList = np.ndarray( ( numSamples, 1 ), dtype=np.int32 )
                #cv.KMeans2( sampleList, NUM_CLUSTERS, labelList, 
                    #(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 10, 0.01) )
                    
                #saliencyScaleX = float( largeSaliencyMap.shape[ 1 ] ) / saliencyMap.shape[ 1 ]
                #saliencyScaleY = float( largeSaliencyMap.shape[ 0 ] ) / saliencyMap.shape[ 0 ]
                clusterList = []
                #for clusterIdx in range( NUM_CLUSTERS ):
                    
                    #clusterSamples = sampleList[ 
                        #np.where( labelList == clusterIdx )[ 0 ], : ]

                    #if clusterSamples.size <= 0:
                        #mean = ( 0.0, 0.0 )
                        #stdDev = 0.0
                    #else:
                        #mean = clusterSamples.mean( axis=0 )
                        #mean = ( mean[ 0 ]*saliencyScaleX, mean[ 1 ]*saliencyScaleY )
                        #stdDev = clusterSamples.std()*saliencyScaleX
                    
                    #clusterList.append( ( mean, stdDev ) )
                
                
                
                
                # Work out the maximum amount of motion we've seen in a single frame so far
                #motionCount = motionImage[ motionImage > 0 ].size
                
                #if frameIdx == 0:
                    #lastMotionCount = 0
                #else:
                    #lastMotionCount = self.maxMotionCounts[ frameIdx - 1 ]
                    
                #if motionCount < lastMotionCount:
                    #motionCount = lastMotionCount
                
                ## Work out diffImage    
                #diffImage = np.array( motionImage, dtype=np.int32 ) \
                     #- np.array( imageFlow[ 3 ], dtype=np.int32 )
                #diffImage = np.array( np.maximum( diffImage, 0 ), dtype=np.uint8 )
                
                
                
                
                
                # Segment the image
                #workingMask = np.copy( motionImage )
                #workingMask = np.copy( diffImage )
                workingMask = np.copy( segmentationMask )
                kernel = cv.CreateStructuringElementEx( 
                    cols=3, rows=3, 
                    anchorX=1, anchorY=1, shape=cv.CV_SHAPE_CROSS )
                cv.Erode( workingMask, workingMask, kernel )
                cv.Dilate( workingMask, workingMask )
                
                extraExtraMask = np.copy( workingMask )
                cv.Dilate( extraExtraMask, extraExtraMask )
                cv.Dilate( extraExtraMask, extraExtraMask )
                cv.Dilate( extraExtraMask, extraExtraMask )
                cv.Dilate( extraExtraMask, extraExtraMask )
                cv.Dilate( extraExtraMask, extraExtraMask )
                cv.Dilate( extraExtraMask, extraExtraMask )
                
                allMask = np.copy( extraExtraMask )
                cv.Dilate( allMask, allMask )
                cv.Dilate( allMask, allMask )
                cv.Dilate( allMask, allMask )
                cv.Dilate( allMask, allMask )
                cv.Dilate( allMask, allMask )
                cv.Dilate( allMask, allMask )
                
                possibleForeground = workingMask > 0
            
                if workingMask[ possibleForeground ].size >= 100 \
                    and frameIdx >= 16:
                        
                    print "Msk size", workingMask[ possibleForeground ].size
                    print workingMask[ 0, 0:10 ]
                    
                    fgModel = cv.CreateMat( 1, 5*13, cv.CV_64FC1 )
                    bgModel = cv.CreateMat( 1, 5*13, cv.CV_64FC1 )
                    #workingMask[ possibleForeground ] = self.GC_FGD
                    #workingMask[ possibleForeground == False ] = self.GC_PR_BGD
                    
                    #workingMask[ : ] = self.GC_PR_BGD
                    #workingMask[ possibleForeground ] = self.GC_FGD
                    
                    workingMask[ : ] = self.GC_BGD
                    workingMask[ allMask > 0 ] = self.GC_PR_BGD
                    workingMask[ extraExtraMask > 0 ] = self.GC_PR_FGD
                    workingMask[ possibleForeground ] = self.GC_FGD
                    
                    
                    if frameIdx == 16:
                        # Save mask
                        maskCopy = np.copy( workingMask )
                        maskCopy[ maskCopy == self.GC_BGD ] = 0
                        maskCopy[ maskCopy == self.GC_PR_BGD ] = 64
                        maskCopy[ maskCopy == self.GC_PR_FGD ] = 128
                        maskCopy[ maskCopy == self.GC_FGD ] = 255
                        print "Unused pixels", \
                            maskCopy[ (maskCopy != 255) & (maskCopy != 0) ].size
                          
                        outputImage = cv.CreateMat( msg.height, msg.width, cv.CV_8UC3 )
                        cv.CvtColor( maskCopy, outputImage, cv.CV_GRAY2BGR )
                        
                        cv.SaveImage( "output.png", image );
                        cv.SaveImage( "outputMask.png", outputImage ); 
                        
                        print "Saved images"
                        #return 
                        
                    
                    #print "Set Msk size", workingMask[ workingMask == self.GC_PR_FGD ].size
                
                    imageToSegment = image #self.inputImageList[ frameIdx - FRAMES_BACK ]
                
                    imageCopy = np.copy( imageToSegment )
                    cv.CvtColor( imageCopy, imageCopy, cv.CV_BGR2RGB )
                
                    print "Start seg"
                    cv.GrabCut( imageCopy, workingMask, 
                        (0,0,0,0), fgModel, bgModel, 12, self.GC_INIT_WITH_MASK )
                    print "Finish seg"
                
                    segmentation = np.copy( imageToSegment )
                    segmentation[ (workingMask != self.GC_PR_FGD) & (workingMask != self.GC_FGD) ] = 0
                
                    
                    black = (workingMask != self.GC_PR_FGD) & (workingMask != self.GC_FGD)
                    #motionImage = np.where( black, 0, 255 ).astype( np.uint8 )
                    
                    # Refine the segmentation
                    REFINE_SEG = False
                    if REFINE_SEG:
                        motionImageCopy = np.copy( motionImage )
                        cv.Erode( motionImageCopy, motionImageCopy )
                        #cv.Erode( motionImageCopy, motionImageCopy )
                        #cv.Erode( motionImageCopy, motionImageCopy )
                        
                        workingMask[ motionImageCopy > 0 ] = self.GC_PR_FGD
                        workingMask[ motionImageCopy == 0 ] = self.GC_PR_BGD
                        
                        cv.Dilate( motionImageCopy, motionImageCopy )
                        cv.Dilate( motionImageCopy, motionImageCopy )
                        cv.Dilate( motionImageCopy, motionImageCopy )
                        cv.Dilate( motionImageCopy, motionImageCopy )
                        workingMask[ motionImageCopy == 0 ] = self.GC_BGD
                        
                        print "Other seg"
                        cv.GrabCut( imageCopy, workingMask, 
                            (0,0,0,0), fgModel, bgModel, 12, self.GC_INIT_WITH_MASK )
                        print "Other seg done"
                            
                        segmentation = np.copy( imageToSegment )
                        segmentation[ (workingMask != self.GC_PR_FGD) & (workingMask != self.GC_FGD) ] = 0
                    
                        
                        black = (workingMask != self.GC_PR_FGD) & (workingMask != self.GC_FGD)
                        motionImage = np.where( black, 0, 255 ).astype( np.uint8 )
                    
                
                else:
                    segmentation = np.zeros( ( image.height, image.width ), dtype=np.uint8 )
                
                
                # Save output data
                self.inputImageList[ frameIdx ] = image
                self.grayScaleImageList[ frameIdx ] = grayImage
                self.opticalFlowListX[ frameIdx ] = opticalFlowArrayX
                self.opticalFlowListY[ frameIdx ] = opticalFlowArrayY
                self.motionImageList[ frameIdx ] = motionImage
                self.segmentationList[ frameIdx ] = segmentation
                self.segmentationMaskList[ frameIdx ] = segmentationMask
                #self.maxMotionCounts[ frameIdx ] = motionCount
                #self.imageFlowList[ frameIdx ] = imageFlow
                #self.saliencyMapList[ frameIdx ] = largeSaliencyMap
                #self.saliencyClusterList[ frameIdx ] = clusterList
                self.leftMostMotionList[ frameIdx ] = leftMostMotion
                
                frameIdx += 1
                self.numFramesProcessed += 1
                
        if not self.workCancelled:
            
            
            SAVE_MOTION_IMAGES = True
            BASE_MOTION_IMAGE_NAME = self.scriptPath + "/../../test_data/motion_images/motion_{0:03}.png"
            
            if SAVE_MOTION_IMAGES and len( self.motionImageList ) > 0:
                
                width = self.motionImageList[ 0 ].shape[ 1 ]
                height = self.motionImageList[ 0 ].shape[ 0 ]
                colourImage = np.zeros( ( height, width, 3 ), dtype=np.uint8 )
                
                for frameIdx, motionImage in enumerate( self.motionImageList ):
                    
                    colourImage[ :, :, 0 ] = motionImage
                    colourImage[ :, :, 1 ] = motionImage
                    colourImage[ :, :, 2 ] = motionImage
                    
                    outputName = BASE_MOTION_IMAGE_NAME.format( frameIdx + 1 )
                    cv.SaveImage( outputName, colourImage )
            
            # Recalculate impactFrameIdx
            width = self.motionImageList[ 0 ].shape[ 1 ]
            
            totalMotionDiff = 0
            maxMotionDiff = 0
            impactFrameIdx = None
            for motionIdx in range( 1, len( self.leftMostMotionList ) ):
            
                motionDiff = abs( self.leftMostMotionList[ motionIdx ] \
                    - self.leftMostMotionList[ motionIdx - 1 ] )
                totalMotionDiff += motionDiff
                    
                if motionDiff > maxMotionDiff and totalMotionDiff > 0.5*width:
                    maxMotionDiff = motionDiff
                    impactFrameIdx = motionIdx
            
            if maxMotionDiff <= 18:
                impactFrameIdx = None
                    
            
            if impactFrameIdx != None:
                
                preMotionImages = []
                postMotionImages = []
                impactMotionImage = None
                
                NUM_FRAMES_BEFORE = 3
                
                prefix = self.options.outputPrefix
                if prefix != "":
                    prefix += "_"
                
                BASE_MOTION_IMAGE_NAME = self.scriptPath + "/../../test_data/impact_images/" + prefix + "motion_{0:03}.png"
                START_MOTION_IMAGE_NAME = self.scriptPath + "/../../test_data/impact_images/" + prefix + "start_motion.png"
                START_IMAGE_NAME = self.scriptPath + "/../../test_data/impact_images/" + prefix + "start.png"
                IMPACT_IMAGE_NAME = self.scriptPath + "/../../test_data/impact_images/" + prefix + "impact.png"
                SEGMENTATION_IMAGE_NAME = self.scriptPath + "/../../test_data/impact_images/" + prefix + "segmentation.png"
                NUM_FRAMES_AFTER = 3
                
                width = self.motionImageList[ 0 ].shape[ 1 ]
                height = self.motionImageList[ 0 ].shape[ 0 ]
                colourImage = np.zeros( ( height, width, 3 ), dtype=np.uint8 )
                
                for frameIdx in range( impactFrameIdx - NUM_FRAMES_BEFORE,
                    impactFrameIdx + NUM_FRAMES_AFTER + 1 ):
                    
                    motionImage = self.motionImageList[ frameIdx ]  
                    
                    if frameIdx < impactFrameIdx:
                        preMotionImages.append( motionImage )
                    elif frameIdx == impactFrameIdx:
                        impactMotionImage = motionImage
                    else: # frameIdx > impactFrameIdx
                        postMotionImages.append( motionImage )
                    
                    colourImage[ :, :, 0 ] = motionImage
                    colourImage[ :, :, 1 ] = motionImage
                    colourImage[ :, :, 2 ] = motionImage
                    
                    outputName = BASE_MOTION_IMAGE_NAME.format( frameIdx - impactFrameIdx )
                    cv.SaveImage( outputName, colourImage )
                
                motionDetectionFilter.calcMotion( self.grayScaleImageList[ 0 ] )
                startMotionImage = motionDetectionFilter.calcMotion( 
                    self.grayScaleImageList[ impactFrameIdx ] )
                colourImage[ :, :, 0 ] = startMotionImage
                colourImage[ :, :, 1 ] = startMotionImage
                colourImage[ :, :, 2 ] = startMotionImage  
                cv.SaveImage( START_MOTION_IMAGE_NAME, colourImage )
                
                cv.CvtColor( self.inputImageList[ 0 ], colourImage, cv.CV_RGB2BGR )    
                cv.SaveImage( START_IMAGE_NAME, colourImage )
                cv.CvtColor( self.inputImageList[ impactFrameIdx ], colourImage, cv.CV_RGB2BGR )    
                cv.SaveImage( IMPACT_IMAGE_NAME, colourImage )
                
                print "Segmenting..."
                segmentation = self.produceSegmentation( self.inputImageList[ 0 ], 
                    impactMotionImage, preMotionImages, postMotionImages )
                cv.CvtColor( segmentation, colourImage, cv.CV_RGB2BGR )    
                cv.SaveImage( SEGMENTATION_IMAGE_NAME, colourImage )
                    
            self.refreshGraphDisplay()
            
            
        print "Finished processing bag file"
        if bool( self.options.quitAfterFirstSegmentation == "True" ):
            print "Trying to quit"
            self.onWinMainDestroy( None )
        else:
            print "Not trying to quit so neeah"
예제 #17
0
def find_lines(frame):
    # Resize to 640x480
    frame_size = cv.GetSize(frame)
    if frame_size[0] != 640:
        frame_small = cv.CreateMat(480, 640, cv.CV_8UC3)
        cv.Resize(frame, frame_small)
    else:
        frame_small = frame

    # Threshold by distance: blank out all top pixels
    cv.Rectangle(frame_small, (0, 0), (640, 80), (0, 0, 0, 0), cv.CV_FILLED)

    # Convert to grayscale
    frame_size = cv.GetSize(frame_small)
    frame_gray = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
    cv.CvtColor(frame_small, frame_gray, cv.CV_BGR2GRAY)

    # Use color thresholding to get white lines
    threshold = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
    cv.Threshold(frame_gray, threshold, 190, 255, cv.CV_THRESH_BINARY)

    # Morphological ops to reduce noise
    # TODO try to reduce sizes to increase detection of faraway lines
    openElement = cv.CreateStructuringElementEx(7, 7, 3, 3, cv.CV_SHAPE_RECT)
    closeElement = cv.CreateStructuringElementEx(11, 11, 5, 5,
                                                 cv.CV_SHAPE_RECT)
    cvOpen(threshold, threshold, openElement)
    cvClose(threshold, threshold, closeElement)

    # Use Canny edge detection to find edges
    edges = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
    cv.Canny(threshold, edges, 100, 200)

    # Use Hough transform to find equations for lines
    line_storage = cv.CreateMemStorage()

    lines = cv.HoughLines2(edges, line_storage, cv.CV_HOUGH_STANDARD, 1,
                           cv.CV_PI / 180.0, 120)
    lines = list(lines)

    # Remove spurious line from the black rectangle up top
    for line in lines[:]:
        if (abs(180 - line[0]) < 10
                and abs(util.normalizeRadians(cv.CV_PI / 2 - line[1])) < 0.01):
            lines.remove(line)

    # Group lines that are within r +/-12 and theta +/- 5 degrees
    grouped_lines = []
    r_threshold = 12  # in px
    theta_threshold = cv.CV_PI * 5 / 180  # in radians
    while len(lines) > 0:
        line1 = normalizeLine(lines.pop())
        avg_line = line1
        matched_lines = [line1]
        for j in range(len(lines) - 1, -1, -1):
            line2 = normalizeLine(lines[j])
            if verbose:
                # Print which criteria were matched
                if (abs(avg_line[0] - line2[0]) < r_threshold):
                    print 1,
                if (abs(util.normalizeRadians(avg_line[1] - line2[1])) <
                        theta_threshold):
                    print 2,
                print avg_line, line2
            if (abs(avg_line[0] - line2[0]) < r_threshold and
                abs(util.normalizeRadians(avg_line[1] - line2[1])) < \
                  theta_threshold):
                matched_lines.append(line2)
                avg_line = avgLines(matched_lines)
                lines.pop(j)
        if verbose: print matched_lines
        grouped_lines.append(avg_line)
    lines = grouped_lines

    # Group possible pairs of lines by smallest angle difference
    grouped_lines = []
    while len(lines) > 0:
        line1 = normalizeLine(lines.pop())
        closest = None
        for j in range(len(lines) - 1, -1, -1):
            line2 = normalizeLine(lines[j])
            # Find the closest match
            if ((closest is None
                 or abs(util.normalizeRadians(line1[1] - line2[1])) < \
                     abs(util.normalizeRadians(line1[1] - closest[1])))
                # Make sure difference < pi/4 to reduce errors
                and abs(util.normalizeRadians(line1[1] - line2[1])) < \
                    cv.CV_PI / 4):
                closest = line2

        if closest is not None:
            lines.remove(closest)
            # Sort list by line[0] (radius)
            if line1[0] > closest[0]:
                line1, closest = closest, line1
            # Make a tuple (line1, line2) or (line,) if no match found
            grouped_lines.append((line1, closest))
        else:
            grouped_lines.append((line1, ))

    # Print lines
    if len(grouped_lines) > 0:
        print 'Groups of lines:', grouped_lines
    i = 0
    for group in grouped_lines:
        for j in range(len(group)):
            rho, theta = group[j]
            a, b = np.cos(theta), np.sin(theta)
            x0, y0 = a * rho, b * rho
            pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a)))
            pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a)))
            #cv.Line(frame_small, pt1, pt2,
            #        hv2rgb(360.0*i/len(grouped_lines), 1.0), 1, 8)
        i += 1

    # If 2+ groups of lines, find corners (intersection point of lines)
    intersection_pts = []
    if len(grouped_lines) > 1:
        for i in range(len(grouped_lines)):
            pair1 = grouped_lines[i]
            for j in range(i + 1, len(grouped_lines)):
                pair2 = grouped_lines[j]

                # Make sure their angles differ by more than 10 deg to
                #  reduce errors
                if (abs(util.normalizeRadians(pair1[0][1] - pair2[0][1])) <
                        cv.CV_PI * 10 / 180):
                    break

                # Enumerate intersection points
                pts = []
                for line1 in pair1:
                    for line2 in pair2:
                        pts.append(lineIntersection(line1, line2))
                # Find average of intersection points
                x = sum(pt[0] for pt in pts) / len(pts)
                y = sum(pt[1] for pt in pts) / len(pts)
                pt = (x, y)
                print 'Intersection:', pt,
                intersection_pts.append(pt)
                pt = (cv.Round(x), cv.Round(y))
                cv.Circle(frame_small, pt, 4, (0, 255, 0, 0))

                # Find direction of intersection by following each line
                #  (direction is defined as the point of the T)
                angles = []
                for pair in grouped_lines:
                    angles.append(pair[0][1] + cv.CV_PI / 2)
                    angles.append(pair[0][1] - cv.CV_PI / 2)
                for angle in angles:
                    # Look 50px down the line for white pixels
                    # TODO look a variable amount
                    x1 = x + 50 * np.cos(angle)
                    y1 = y + 50 * np.sin(angle)

                    # Enforce limits
                    # TODO handle when intersection is off the bounds of the image
                    #  Currently orientation of the intersection is not being used
                    #  by the particle filter
                    x1 = min(max(0, x1), frame_size[0] - 1)
                    y1 = min(max(0, y1), frame_size[1] - 1)
                    srchPt = cv.Round(x1), cv.Round(y1)

                    if threshold[srchPt[1], srchPt[0]] == 0:
                        x1 = x + 50 * np.cos(angle + cv.CV_PI)
                        y1 = y + 50 * np.sin(angle + cv.CV_PI)
                        invSrchPt = cv.Round(x1), cv.Round(y1)
                        cv.Line(frame_small, pt, invSrchPt, (0, 255, 0, 0), 1,
                                8)
                        print 'Angle:', angle + cv.CV_PI
                        break

    # Convert line equations into line segments
    line_segments = []
    for group in grouped_lines:

        if len(group) == 2:
            # Get the average of the lines in a pair
            line1, line2 = group
            line = ((line1[0] + line2[0]) / 2.0, (line1[1] + line2[1]) / 2.0)
        else:
            line = group[0]

        # Look down the line for the endpoints of the white region
        line_segment = lineToVisibleSegment(line, threshold)
        if line_segment != None:
            line_segments.append(line_segment)

    print 'Line segments:', line_segments
    i = 0
    for pt1, pt2 in line_segments:
        pt1 = cv.Round(pt1[0]), cv.Round(pt1[1])
        pt2 = cv.Round(pt2[0]), cv.Round(pt2[1])
        cv.Line(frame_small, pt1, pt2,
                hv2rgb(360.0 * i / len(line_segments), 1.0), 2, 8)
        i += 1

    cv.ShowImage('frame', frame_small)
    cv.ShowImage('edges', threshold)

    return line_segments, intersection_pts
예제 #18
0
cv.Resize(image2, small, cv.CV_INTER_LINEAR)
size = cv.GetSize(small)
x = size[0]
y = size[1]

temp = cv.CreateImage(size, 8, 3)

sat = cv.CreateImage(size, 8, 1)
notsat = cv.CreateImage(size, 8, 1)
new = cv.CreateImage(size, 8, 1)

cv.Split(small, sat, None, None, None)
cv.Threshold(sat, notsat, 100, 255, cv.CV_THRESH_BINARY)
cv.Dilate(notsat, notsat,
          cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_ELLIPSE), 2)

storage = cv.CreateMemStorage(0)
contour = cv.FindContours(notsat, storage, cv.CV_RETR_EXTERNAL,
                          cv.CV_CHAIN_APPROX_NONE)
while contour:

    if cv.ContourArea(contour) < 10:
        contour2 = contour
        cv.DrawContours(new, contour2, cv.RGB(255, 255, 255),
                        cv.RGB(255, 255, 255), -1, cv.CV_FILLED, cv.CV_AA)
    contour = contour.h_next()

cv.SaveImage("stars.png", new)
cv.SaveImage("small.png", small)
예제 #19
0
  # The magic number M determines how deep the polar transformation goes.
  M = 69

  #This is the main loop
  while True:
    cam = cv.QueryFrame(capture)
    cv.LogPolar(cam, polar, (centerX, centerY), M+1, cv.CV_INTER_NN) #possible speedup - get subrect src
    #unwrapped = cv.GetSubRect(polar,(280,0,40,360))
    #cv.Transpose(unwrapped, unwrapped)
    cv.Transpose(cv.GetSubRect(polar,(280,0,40,360)), unwrapped)
    cv.Flip(unwrapped) #just for viewing (possible speedup)

    cv.InRangeS(unwrapped, lower, upper, cones)
    cv.Erode(cones, cones) # just once might be too much, but unavoidable

    k = cv.CreateStructuringElementEx(3, 43, 1, 1, cv.CV_SHAPE_RECT) # create a 3x43 rectangular dilation element k
    cv.Dilate(cones, cones, k) 

    #Display (should probably be disabled with a usage flag)
    cv.ShowImage('cam', cam)
    cv.ShowImage('unwrapped', unwrapped)
    cv.ShowImage('cones', cones)
    #cv.ShowImage('polar', polar)
    #cv.ShowImage('hsvcopy', hsvcopy)

    #scan top row of thresholded, eroded, dilated image, find the number of contiguous segments and their location
    s = 0 # size of contiguous segment
    ss = 0 #number of contiguous segments
    bearingToLandmarks = []
    for i in xrange(360-2):
        c = cones[0, i] #current
예제 #20
0
def Closing(pos):
    element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, element_shape)
    cv.Dilate(src, image, element, 1)
    cv.Erode(image, dest, element, 1)
    cv.ShowImage("Opening & Closing", dest)
예제 #21
0
def test_dilate(image, do_show=False):
    element = cv.CreateStructuringElementEx(3, 3, 2, 2, cv.CV_SHAPE_RECT)
    dilated = dilate(image, element)
    if do_show: show(dilated=dilated)
예제 #22
0
def Dilation(pos):
    element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, element_shape)
    cv.Dilate(src, dest, element, 1)
    cv.ShowImage("Erosion & Dilation", dest)
예제 #23
0
files = os.listdir('data/examples')
counter = 0
for f in files:
    image = cv.LoadImage('data/examples/' + f)
    for plate in anpr.detect_plates(image):
        zzz = cv.CreateImage(cv.GetSize(plate), cv.IPL_DEPTH_8U, 3)
        cv.Smooth(plate, zzz)
        #
        cv.PyrMeanShiftFiltering(plate, zzz, 40, 15)
        foo = anpr.greyscale(plate)
        segmented = cv.CreateImage(cv.GetSize(plate), cv.IPL_DEPTH_8U, 1)
        bar = cv.CreateImage(cv.GetSize(plate), cv.IPL_DEPTH_8U, 1)
        cv.EqualizeHist(foo, segmented)
        cv.AdaptiveThreshold(
            segmented, bar, 255, cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,
            cv.CV_THRESH_BINARY_INV,
            plate.height % 2 == 0 and (plate.height + 1) or plate.height,
            plate.height / 2)
        baz = cv.CreateImage(cv.GetSize(plate), cv.IPL_DEPTH_8U, 1)
        el = cv.CreateStructuringElementEx(1, 2, 0, 0, cv.CV_SHAPE_RECT)
        cv.Erode(bar, baz, el)
        #quick_show(plate)
        #quick_show(segmented)
        #quick_show(bar)
        quick_show(baz)
        for char in anpr.find_characters(foo, baz):
            cv.Rectangle(plate, (int(char.x1), int(char.y1)),
                         (int(char.x2), int(char.y2)), (255, 0, 0))
        quick_show(plate)
예제 #24
0
파일: main.py 프로젝트: AlexKordic/obj_att
# some constants
OPENNI_INITIALIZATION_FILE = "../config/BasicColorAndDepth.xml"
KEY_ESC     = 27
hist_height = 64

# initialization stuff
print 'loading ..'
save_count  = 0
objects     = []
stats       = []
current_key = -1
timing      = {'t_draw': 0, 't_histo': 0, 't_track': 0, 't_pre': 0}
loop_c      = 0
font    = cv.InitFont(cv.CV_FONT_HERSHEY_PLAIN, 1, 1, 0, 1, 8)          # opencv font
elem    = cv.CreateStructuringElementEx(8, 8, 4, 4, cv.CV_SHAPE_RECT)   # used by the morphological operator (erode)
storage = cv.CreateMemStorage(0)                                        # needed by find contours
writer  = None

# compute a timestamp for output file
outpath += datetime.datetime.now().strftime('%d%m%y_%H%M%S')
os.mkdir(outpath)
# create a list of random colors
color_tab = [random_color() for i in range(n_colors)]

help = """
        press 'v' to start or stop video recording
        press 's' to save the current images
        press 'c' to draw the contours
        press 'b' to draw the boxes
        press 'k' to draow the keypoints
예제 #25
0
파일: gateTest.py 프로젝트: tarora2/seawolf
    def process_frame(self, frame):
        ################
        #setup CV ######
        ################
        print "processing frame"
        (w, h) = cv.GetSize(frame)

        #generate hue selection frames
        ones = np.ones((h, w, 1), dtype='uint8')
        a = ones * (180 - self.target_hue)
        b = ones * (180 - self.target_hue + 20)
        a_array = cv.fromarray(a)
        b_array = cv.fromarray(b)

        #create locations for the test frame and binary frame
        frametest = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binarytest = cv.CreateImage(cv.GetSize(frame), 8, 1)

        #use the red channel for the binary frame (just for debugging purposes)
        cv.Copy(frame, frametest)
        cv.SetImageCOI(frametest, 3)
        cv.Copy(frametest, binarytest)

        #reset the COI for test frame to RGB.
        cv.SetImageCOI(frametest, 0)

        # Resize image to 320x240
        #copy = cv.CreateImage(cv.GetSize(frame), 8, 3)
        #cv.Copy(frame, copy)
        #cv.SetImageROI(frame, (0, 0, 320, 240))
        #cv.Resize(copy, frame, cv.CV_INTER_NN)
        found_gate = False

        #create a new frame for comparison purposes
        unchanged_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.Copy(frame, unchanged_frame)

        #apply noise filter #1
        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Set binary image to have saturation channel
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binary = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        cv.SetImageCOI(hsv, 1)
        cv.Copy(hsv, binary)

        #spin the color wheel (psuedo-code for later if necessary)
        # truncate spectrum marked as end
        # shift all values up based on truncating value (mask out 0 regions)
        # take truncated bits, and flip them (180->0, 179->1...)
        # dnow that truncated bits are flipped, add them back in to final image

        #Reset hsv COI
        cv.SetImageCOI(hsv, 0)

        #correct for wraparound on red spectrum
        cv.InRange(binary, a_array, b_array, binarytest)  #generate mask
        cv.Add(binary, cv.fromarray(ones * 180), binary,
               mask=binarytest)  #use mask to selectively add values

        #run adaptive threshold for edge detection
        cv.AdaptiveThreshold(
            binary,
            binary,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY_INV,
            self.adaptive_thresh_blocksize,
            self.adaptive_thresh,
        )

        # Morphology
        kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(binary, binary, kernel, 1)
        cv.Dilate(binary, binary, kernel, 1)
        if self.debug:
            color_filtered = cv.CloneImage(binary)

        # Get Edges
        cv.Canny(binary, binary, 30, 40)

        # Hough Transform
        line_storage = cv.CreateMemStorage()
        raw_lines = cv.HoughLines2(binary,
                                   line_storage,
                                   cv.CV_HOUGH_STANDARD,
                                   rho=1,
                                   theta=math.pi / 180,
                                   threshold=self.hough_threshold,
                                   param1=0,
                                   param2=0)

        # Get vertical lines
        vertical_lines = []
        i = 0
        for line in raw_lines:
            if line[1] < self.vertical_threshold or \
                line[1] > (math.pi-self.vertical_threshold):

                #absolute value does better grouping currently
                vertical_lines.append((abs(line[0]), line[1]))
            i += 1

        # print message to user for performance purposes
        logging.debug("{} possibilities reduced to {} lines".format(
            i, len(vertical_lines)))

        # Group vertical lines
        vertical_line_groups = [
        ]  #A list of line groups which are each a line list
        i = 0
        for line in vertical_lines:
            group_found = False
            for line_group in vertical_line_groups:
                i += 1
                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                vertical_line_groups.append([line])

        #quick debugging statement
        logging.debug("{} internal iterations for {} groups".format(
            i, len(vertical_line_groups)))

        # Average line groups into lines
        vertical_lines = []
        for line_group in vertical_line_groups:
            rhos = map(lambda line: line[0], line_group)  #get rho of each line
            angles = map(lambda line: line[1], line_group)
            line = (sum(rhos) / len(rhos), circular_average(angles, math.pi))
            vertical_lines.append(line)

        self.left_pole = None
        self.right_pole = None
        self.returning = 0
        self.found = False

        if len(vertical_lines) is 2:
            roi = cv.GetImageROI(frame)
            width = roi[2]
            height = roi[3]
            self.left_pole = round(
                min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2
            self.right_pole = round(
                max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2

            self.returning = (self.left_pole + self.right_pole) / 2
            logging.info("Returning {}".format(self.returning))

            #If this is first iteration, count this as seeing the gate
            if self.last_seen < 0:
                self.last_center = None
                self.last_seen = 0

            #increment a counter if result is good.
            if self.last_center is None:
                self.last_center = self.returning
                self.seen_count = 1
            elif math.fabs(self.last_center -
                           self.returning) < self.center_trans_thresh:
                self.seen_count += 1
                self.last_seen += 2
            else:
                self.last_seen -= 1

            #if not convinced, forget left/right pole. Else, proclaim success.
            if self.seen_count < self.seen_count_thresh:
                self.left_pole = None
                self.right_pole = None
            else:
                print "FOUND CENTER AND RETURNED IT"
                self.found = True

        else:
            self.returning = 0

            if self.last_seen < 0:
                self.last_center = None
                self.last_seen = 0

            self.last_seen -= 1
            self.left_pole = None
            self.right_POLE = None

        #extra debugging stuff
        if self.debug:
            cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB)
            libvision.misc.draw_lines(frame, vertical_lines)

            if self.found:
                cv.Circle(frame, (int(frame.width / 2 + self.returning),
                                  int(frame.height / 2)), 15, (0, 255, 0), 2,
                          8, 0)
                font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 1, 3)
                cv.PutText(frame, "Gate Sent to Mission Control", (100, 400),
                           font, (255, 255, 0))
                #print frame.width

        #cv.ShowImage("Gate", cv.CloneImage(frame))
        svr.debug("Gate", cv.CloneImage(frame))
        svr.debug("Unchanged", cv.CloneImage(unchanged_frame))

        self.return_output()
예제 #26
0
파일: solo.py 프로젝트: sdp-2011/sdp-10
def camera():
    found_goals = False
    print "# Starting initialization..."
    intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
    cv.Zero(intrinsics)

    #camera data
    intrinsics[0, 0] = 850.850708957251072
    intrinsics[1, 1] = 778.955239997982062
    intrinsics[2, 2] = 1
    intrinsics[0, 2] = 320.898495232253822
    intrinsics[1, 2] = 380.213734835526282
    dist_coeffs = cv.CreateMat(1, 4, cv.CV_64FC1)
    cv.Zero(dist_coeffs)
    dist_coeffs[0, 0] = -0.226795877008420
    dist_coeffs[0, 1] = 0.139445565548056
    dist_coeffs[0, 2] = 0.001245710462327
    dist_coeffs[0, 3] = -0.001396618726445
    print "# intrinsics loaded!"

    #prepare memory
    capture = cv.CaptureFromCAM(0)
    src = cv.QueryFrame(capture)
    size = GetSize(src)
    dst0 = cv.CreateImage(size, src.depth, src.nChannels)
    image_ROI = (0, 60, 640, 340)
    size = (640, 340)

    hue = cv.CreateImage(size, 8, 1)
    sat = cv.CreateImage(size, 8, 1)
    val = cv.CreateImage(size, 8, 1)
    ball = cv.CreateImage(size, 8, 1)
    yellow = cv.CreateImage(size, 8, 1)
    blue = cv.CreateImage(size, 8, 1)
    Set2D(hue, 4, 4, 255)
    Set2D(sat, 4, 4, 255)
    Set2D(val, 4, 4, 255)
    Set2D(ball, 4, 4, 255)
    Set2D(yellow, 4, 4, 255)
    Set2D(blue, 4, 4, 255)

    ballx = 0
    bally = 0

    print "# base images created..."
    #####------------------adjustment data---------------------###############
    #shadow
    high = 40
    low = 300

    #threshold
    thresred = 160
    thresgreen = 220
    thresblue = 254

    #dilate
    ex = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT)
    ex2 = cv.CreateStructuringElementEx(2, 2, 1, 1, cv.CV_SHAPE_RECT)
    ex5 = cv.CreateStructuringElementEx(5, 5, 1, 1, cv.CV_SHAPE_RECT)
    tHack = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_CROSS)

    #ball
    ballcount = 15
    ballmaxarea = 200
    ballminiarea = 45
    ballcompact = 1.3

    #blue
    bluecount = 30
    bluemaxarea = 1500
    blueminiarea = 50
    bluemaxdepth = 10
    blueminidepth = 2

    #yellow
    yellowcount = 30
    yellowmaxarea = 1000
    yellowminiarea = 50
    yellowmaxdepth = 10
    yellowminidepth = 3.2

    #####----------------------------------------

    aa = time.time()
    storage = cv.CreateMemStorage()
    first = True
    pitch = 0  # 0 for main pitch, 1 for alt pitch
    countf = 0
    print "# starting capture..."
    print ''
    capture = cv.CaptureFromCAM(0)
    while (True):
        src = cv.QueryFrame(capture)
        #ShowImage('src',src)
        cv.SetImageROI(dst0, (0, 0, 640, 480))
        average = cv.CreateImage(size, 8, 3)
        #barrel undistortion
        cv.Undistort2(src, dst0, intrinsics, dist_coeffs)
        #ROI = Region of Interests, crop the image
        cv.SetImageROI(dst0, image_ROI)
        dst = GetImage(dst0)
        dst2 = cv.CreateImage(size, 8, 3)
        Set2D(dst2, 4, 4, 255)
        hsv = cv.CreateImage(size, 8, 3)
        CvtColor(dst, hsv, CV_RGB2HSV)
        cv.Split(hsv, hue, sat, val, None)
        if (first):
            #hist = cv.CreateHist([32,64], CV_HIST_ARRAY, [[0,180], [0,256]], 1)
            #cv.CalcHist([hue, sat], hist, 0, None)
            #values = cv.GetMinMaxHistValue(hist)

            #print values
            #tweak = values[3][0]
            #if tweak >= 12:
            #	pitch = 1
            #print ">>> tweak=",tweak,"pitch selected =",pitch

            pitch = pitchSet
            if pitch == 1:
                base = cv.LoadImage("base.jpg", cv.CV_LOAD_IMAGE_UNCHANGED)
                baseInv = cv.CreateImage(size, 8, 1)
                cv.Not(base, baseInv)
                #huecorr = cv.LoadImage("huecorr.jpg",cv.CV_LOAD_IMAGE_UNCHANGED)
                #cv.Smooth(huecorr,huecorr)
                #ShowImage("base",base)
            #base = cv.CreateImage(size,8,1)
            #base = GetImage(val)
            #cv.Threshold(hue,hue,75,255,cv.CV_THRESH_BINARY_INV)
            #cv.SaveImage("huecorr.jpg", hue)
            #cv.Threshold(base,base,110,255,cv.CV_THRESH_BINARY)
            #cv.SaveImage("base.jpg", base)

            #cv.WaitKey(-1)
            first = False
        if (debug):
            ShowImage("hue", hue)
            ShowImage("sat", sat)
            ShowImage("val", val)

        if pitch == 1:
            walls = cv.CreateImage(size, 8, 1)
            cv.Threshold(val, walls, 50, 255, cv.CV_THRESH_BINARY_INV)
            Set2D(walls, 4, 4, 255)

            # BALL
            # fixed this cause with another robot it was finding the ball on it. seems to work
            Add(sat, hue, ball)

            Sub(ball, walls, ball)
            cv.SubS(ball, 60, ball, baseInv)
            cv.Threshold(ball, ball, 170, 255, cv.CV_THRESH_BINARY)
            cv.Erode(ball, ball, ex, 1)
            cv.Dilate(ball, ball, ex2, 1)
            Set2D(ball, 4, 4, 255)

            # YELLOW
            # cv.Threshold(hue,yellow,80,255,cv.CV_THRESH_BINARY)
            cv.Threshold(val, yellow, 250, 255, cv.CV_THRESH_BINARY)
            Sub(yellow, walls, yellow)
            cv.Erode(yellow, yellow, ex, 1)
            Set2D(yellow, 4, 4, 255)

            # blue
            cv.Add(walls, hue, blue)
            cv.Threshold(blue, blue, 40, 255, cv.CV_THRESH_BINARY_INV)
            cv.Erode(blue, blue, ex2, 2)
            Set2D(blue, 4, 4, 255)
            cv.Dilate(blue, blue, tHack, 2)

        if pitch == 0:
            ballcompact = 2.0
            walls = cv.CreateImage(size, 8, 1)
            cv.Threshold(val, walls, 50, 255, cv.CV_THRESH_BINARY_INV)
            Set2D(walls, 4, 4, 255)

            # BALL
            #cv.Add(sat,val,ball)
            #ShowImage("rawB",ball)
            cv.Threshold(hue, ball, 110, 255, cv.CV_THRESH_BINARY)
            cv.Erode(ball, ball, ex2, 1)
            cv.Dilate(ball, ball, ex, 1)

            # YELLOW
            cv.Threshold(val, yellow, 240, 255, cv.CV_THRESH_BINARY)
            # cv.Threshold(hue,yellow,80,255,cv.CV_THRESH_TOZERO)
            # cv.Threshold(yellow,yellow,105,255,cv.CV_THRESH_TOZERO_INV)
            # cv.Threshold(yellow,yellow,50,255,cv.CV_THRESH_BINARY)
            cv.Erode(yellow, yellow, ex, 1)
            cv.Dilate(yellow, yellow, tHack, 1)

            # BLUE
            CvtColor(dst, hsv, CV_BGR2HSV)
            cv.Split(hsv, hue, sat, val, None)
            cv.Threshold(hue, blue, 80, 255, cv.CV_THRESH_BINARY)
            cv.Threshold(val, val, 80, 255, cv.CV_THRESH_BINARY_INV)

            # Removes the walls
            Sub(blue, val, blue)
            Sub(yellow, val, yellow)
            Sub(ball, val, ball)
            cv.Erode(blue, blue, ex, 1)

            Set2D(ball, 4, 4, 255)
            Set2D(yellow, 4, 4, 255)
            Set2D(blue, 4, 4, 255)

        if (debug):
            ShowImage("blue", blue)
            ShowImage("yellow", yellow)
            ShowImage("ball", ball)
        #find ball
        #seq = None
        seq = cv.FindContours(ball, storage, cv.CV_RETR_LIST, cv.CV_LINK_RUNS)
        if seq != None:
            count = 0
            #print seq
            while seq != None:
                compact = 0
                count = count + 1
                if (count > ballcount):
                    break
                #removed and pitch==0 no idea why it was there
                if (cv.ContourArea(seq) != 0):
                    compact = ArcLength(seq) * ArcLength(seq) / (
                        4 * cv.ContourArea(seq) * math.pi)
                    if compact >= ballcompact:
                        print ">> compact: ", compact, ballcompact
                        seq = seq.h_next()
                        continue
                area = cv.ContourArea(seq)
                if (area == 0 or area > ballmaxarea
                        or area < ballminiarea):  # or compact > ballcompact):

                    print ">> area: ", area, ballmaxarea, ballminiarea
                    seq = seq.h_next()
                    continue
                else:
                    ballx = 0
                    bally = 0
                    for p in seq:
                        ballx = ballx + p[0]
                        bally = bally + p[1]
                    ballx = int(float(ballx) / len(seq))
                    bally = int(float(bally) / len(seq))
                    #	print "compact=%f,area=%f" %(compact,area)
                    cv.Circle(dst, (ballx, bally), 4, cv.CV_RGB(255, 255, 255),
                              2, 8, 0)
                    cv.Circle(dst2, (ballx, bally), 4,
                              cv.CV_RGB(255, 255, 255), 2, 8, 0)
                    break
            if (count > 15 or seq == None):
                ballx = -1
                bally = -1
                print "# error: ball not found  "

        #find blue
        seq = None
        seq = cv.FindContours(blue, storage, cv.CV_RETR_LIST, cv.CV_LINK_RUNS)
        if seq != None:
            count = 0
            while seq != None:
                count = count + 1
                if (count > bluecount):
                    break
                if (cv.ContourArea(seq) < blueminiarea
                        or cv.ContourArea(seq) > bluemaxarea):
                    seq = seq.h_next()
                    continue
                else:
                    hull = None
                    convex = None
                    #
                    hull = cv.ConvexHull2(seq, storage)
                    convex = cv.ConvexityDefects(seq, hull, storage)
                    if (len(convex) > 1):
                        convex = sorted(convex,
                                        key=lambda (k1, k2, k3, k4): k4
                                        )  #sort by depth of the convex defect
                        if (convex[len(convex) - 1][3] < blueminidepth
                                or convex[len(convex) - 2][3] < blueminidepth
                                or convex[len(convex) - 1][3] > bluemaxdepth
                                or convex[len(convex) - 2][3] > bluemaxdepth):
                            cv.Line(dst, convex[len(convex) - 1][0],
                                    convex[len(convex) - 1][2],
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst, convex[len(convex) - 1][2],
                                    convex[len(convex) - 1][1],
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)
                            cv.Line(dst, convex[len(convex) - 2][0],
                                    convex[len(convex) - 2][2],
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst, convex[len(convex) - 2][2],
                                    convex[len(convex) - 2][1],
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)
                            seq = seq.h_next()
                            continue
                        else:
                            #find the T
                            blue_start1 = convex[len(convex) - 1][0]
                            blue_end1 = convex[len(convex) - 1][1]
                            blue_depth1 = convex[len(convex) - 1][2]

                            #draw the side line of T
                            cv.Line(dst, blue_start1, blue_depth1,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst, blue_depth1, blue_end1,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            cv.Line(dst2, blue_start1, blue_depth1,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst2, blue_depth1, blue_end1,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            blue_start2 = convex[len(convex) - 2][0]
                            blue_end2 = convex[len(convex) - 2][1]
                            blue_depth2 = convex[len(convex) - 2][2]
                            cv.Line(dst, blue_start2, blue_depth2,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst, blue_depth2, blue_end2,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            cv.Line(dst2, blue_start2, blue_depth2,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst2, blue_depth2, blue_end2,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            blue_from = ((blue_depth1[0] + blue_depth2[0]) / 2,
                                         (blue_depth1[1] + blue_depth2[1]) / 2
                                         )  #calculate the center of robot

                            #calculate the end of direction vector, the two end point of the smaller distans
                            if math.hypot(blue_start1[0] - blue_end2[0],
                                          blue_start1[1] -
                                          blue_end2[1]) > math.hypot(
                                              blue_end1[0] - blue_start2[0],
                                              blue_end1[1] - blue_start2[1]):
                                blue_to = ((blue_end1[0] + blue_start2[0]) / 2,
                                           (blue_end1[1] + blue_start2[1]) / 2)
                            else:
                                blue_to = ((blue_start1[0] + blue_end2[0]) / 2,
                                           (blue_start1[1] + blue_end2[1]) / 2)
                            cv.Line(dst, blue_from, blue_to,
                                    cv.CV_RGB(255, 0, 255), 2, 8, 0)
                            cv.Circle(dst, blue_from, 1, cv.CV_RGB(255, 0, 0),
                                      2, 8, 0)
                            cv.Circle(dst, blue_to, 1, cv.CV_RGB(0, 0, 0), 2,
                                      8, 0)

                            cv.Line(dst2, blue_from, blue_to,
                                    cv.CV_RGB(255, 0, 255), 2, 8, 0)
                            cv.Circle(dst2, blue_from, 1, cv.CV_RGB(255, 0, 0),
                                      2, 8, 0)
                            cv.Circle(dst2, blue_to, 1,
                                      cv.CV_RGB(255, 255, 255), 2, 8, 0)
                            break
                    else:
                        seq = seq.h_next()
                        continue
            if (count > bluecount or seq == None):
                blue_from = (0, 0)
                blue_to = (0, 0)
                print "# error: blue not found  "
        #find yellow
        seq = None
        seq = cv.FindContours(yellow, storage, cv.CV_RETR_LIST,
                              cv.CV_LINK_RUNS)

        if seq != None:
            count = 0
            while seq != None:
                count = count + 1
                if (count > yellowcount):
                    break
                area = cv.ContourArea(seq)
                if (area < yellowminiarea or area > yellowmaxarea):
                    seq = seq.h_next()
                    continue
                else:
                    hull = None
                    convex = None
                    #
                    hull = cv.ConvexHull2(seq, storage)
                    convex = cv.ConvexityDefects(seq, hull, storage)
                    if (len(convex) > 1):
                        convex = sorted(convex,
                                        key=lambda (k1, k2, k3, k4): k4
                                        )  #sort by depth of the convex defect
                        if (convex[len(convex) - 1][3] < yellowminidepth
                                or convex[len(convex) - 2][3] < yellowminidepth
                                or convex[len(convex) - 1][3] > yellowmaxdepth
                                or
                                convex[len(convex) - 2][3] > yellowmaxdepth):
                            seq = seq.h_next()
                            continue
                        else:
                            #find the T
                            yellow_start1 = convex[len(convex) - 1][0]
                            yellow_end1 = convex[len(convex) - 1][1]
                            yellow_depth1 = convex[len(convex) - 1][2]

                            #draw the side line of T
                            cv.Line(dst, yellow_start1, yellow_depth1,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst, yellow_depth1, yellow_end1,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            cv.Line(dst2, yellow_start1, yellow_depth1,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst2, yellow_depth1, yellow_end1,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            yellow_start2 = convex[len(convex) - 2][0]
                            yellow_end2 = convex[len(convex) - 2][1]
                            yellow_depth2 = convex[len(convex) - 2][2]
                            cv.Line(dst, yellow_start2, yellow_depth2,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst, yellow_depth2, yellow_end2,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            cv.Line(dst2, yellow_start2, yellow_depth2,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst2, yellow_depth2, yellow_end2,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            yellow_from = (
                                (yellow_depth1[0] + yellow_depth2[0]) / 2,
                                (yellow_depth1[1] + yellow_depth2[1]) / 2
                            )  #calculate the center of robot

                            #calculate the end of direction vector, the two end point of the smaller distans
                            if math.hypot(
                                    yellow_start1[0] - yellow_end2[0],
                                    yellow_start1[1] -
                                    yellow_end2[1]) > math.hypot(
                                        yellow_end1[0] - yellow_start2[0],
                                        yellow_end1[1] - yellow_start2[1]):
                                yellow_to = (
                                    (yellow_end1[0] + yellow_start2[0]) / 2,
                                    (yellow_end1[1] + yellow_start2[1]) / 2)
                            else:
                                yellow_to = (
                                    (yellow_start1[0] + yellow_end2[0]) / 2,
                                    (yellow_start1[1] + yellow_end2[1]) / 2)
                            # print cv.ContourArea(seq)
                            cv.Line(dst, yellow_from, yellow_to,
                                    cv.CV_RGB(255, 0, 255), 2, 8, 0)
                            cv.Circle(dst, yellow_from, 1,
                                      cv.CV_RGB(255, 0, 0), 2, 8, 0)
                            cv.Circle(dst, yellow_to, 1, cv.CV_RGB(0, 0, 0), 2,
                                      8, 0)

                            cv.Line(dst2, yellow_from, yellow_to,
                                    cv.CV_RGB(255, 0, 255), 2, 8, 0)
                            cv.Circle(dst2, yellow_from, 1,
                                      cv.CV_RGB(255, 0, 0), 2, 8, 0)
                            cv.Circle(dst2, yellow_to, 1,
                                      cv.CV_RGB(255, 255, 255), 2, 8, 0)
                            break
                    else:
                        seq = seq.h_next()
                        continue
            if (count > yellowcount or seq == None):
                yellow_from = (0, 0)
                yellow_to = (0, 0)
                print "# error: yellow not found"
        ballpos = (ballx, bally)
        ShowImage("camera", dst)
        if (found_goals == False):
            if (us == "yellow"):
                goals = find_goals(size, yellow_from)
                stewies_goal = goals[0]
                loiss_goal = goals[1]
                found_goals = True
            elif (us == "blue"):
                goals = find_goals(size, blue_from)
                stewies_goal = goals[0]
                loiss_goal = goals[1]
                found_goals = True
        #if (ballx >= 0):
        output(ballpos, blue_from, blue_to, yellow_from, yellow_to,
               stewies_goal, loiss_goal)
        time_passed = time.time() - aa
        countf += 1
        if (time_passed >= 1):
            print "frame per second: " + str(countf),
            countf = 0
            aa = time.time()
        cv.WaitKey(2)
예제 #27
0
                            maxProbInt = probIntensity
                cv.ShowImage("skinProb",
                             skinProbImg)  #Original skin probability image

                #threshold
                cv.InRangeS(skinProbImg, 100, 255, skinProbImg)
                cv.ShowImage(
                    "skinProbThresholded1", skinProbImg
                )  #Original skin probability image after thresholding
                #smooth
                cv.Smooth(skinProbImg, skinProbImg, cv.CV_BLUR_NO_SCALE)
                cv.ShowImage(
                    "skinProbSmoothed", skinProbImg
                )  #Original skin probability image after thresholding
                #erode
                kernelEr = cv.CreateStructuringElementEx(
                    4, 4, 0, 0, cv.CV_SHAPE_RECT)
                cv.Erode(skinProbImg, skinProbImg, kernelEr, 1)
                cv.ShowImage(
                    "skinProbEroded", skinProbImg
                )  #Original skin probability image after thresholding
                #dilate
                kernelDi = cv.CreateStructuringElementEx(
                    14, 14, 0, 0, cv.CV_SHAPE_ELLIPSE)
                cv.Dilate(skinProbImg, skinProbImg, kernelDi, 1)
                cv.ShowImage(
                    "skinProbDilated", skinProbImg
                )  #Original skin probability image after thresholding
                #smooth
                cv.Smooth(skinProbImg, skinProbImg, cv.CV_BLUR_NO_SCALE)
                cv.ShowImage("skinProbSmoothed", skinProbImg)  #Original image
예제 #28
0
    def process_frame(self, frame):
        self.debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
        og_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.Copy(frame, self.debug_frame)
        cv.Copy(self.debug_frame, og_frame)

        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Set binary image to have saturation channel
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binary = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        cv.SetImageCOI(hsv, 1)  #3 before competition #2 at competition
        cv.Copy(hsv, binary)
        cv.SetImageCOI(hsv, 0)

        cv.AdaptiveThreshold(binary, binary,
                             255,
                             cv.CV_ADAPTIVE_THRESH_MEAN_C,
                             cv.CV_THRESH_BINARY_INV,
                             self.adaptive_thresh_blocksize,
                             self.adaptive_thresh,
        )

        # Morphology
        kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(binary, binary, kernel, 1)
        cv.Dilate(binary, binary, kernel, 1)

        # Get Edges
        #cv.Canny(binary, binary, 30, 40)

        cv.CvtColor(binary, self.debug_frame, cv.CV_GRAY2RGB)

        # Hough Transform
        line_storage = cv.CreateMemStorage()
        raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_PROBABILISTIC,
                                   rho=1,
                                   theta=math.pi / 180,
                                   threshold=self.hough_threshold,
                                   param1=self.min_length,
                                   param2=self.max_gap
        )

        lines = []

        for line in raw_lines:
            lines.append(line)

        #Grouping lines depending on endpoint similarities

        for line1 in lines[:]:
            for line2 in lines[:]:
                if line1 in lines and line2 in lines and line1 != line2:
                    if math.fabs(line1[0][0] - line2[0][0]) < self.max_corner_range and \
                       math.fabs(line1[0][1] - line2[0][1]) < self.max_corner_range and \
                       math.fabs(line1[1][0] - line2[1][0]) < self.max_corner_range and \
                       math.fabs(line1[1][1] - line2[1][1]) < self.max_corner_range:
                        if line_distance(line1[0], line1[1]) > line_distance(line2[0], line2[1]):
                            lines.remove(line2)
                        else:
                            lines.remove(line1)
                    elif math.fabs(line1[0][0] - line2[1][0]) < self.max_corner_range and \
                         math.fabs(line1[0][1] - line2[1][1]) < self.max_corner_range and \
                         math.fabs(line1[1][0] - line2[0][0]) < self.max_corner_range and \
                         math.fabs(line1[1][1] - line2[0][1]) < self.max_corner_range:
                        if line_distance(line1[0], line1[1]) > line_distance(line2[0], line2[1]):
                            lines.remove(line2)
                        else:
                            lines.remove(line1)

        self.hough_corners = []
        for line in lines:
            self.hough_corners.append(line[0])
            self.hough_corners.append(line[1])

        for corner1 in self.hough_corners[:]:
            for corner2 in self.hough_corners[:]:
                if corner1 is not corner2 and corner1 in self.hough_corners and corner2 in self.hough_corners:
                    if math.fabs(corner1[0] - corner2[0]) < self.max_corner_range4 and \
                       math.fabs(corner1[1] - corner2[1]) < self.max_corner_range4:
                        corner1 = [(corner1[0] + corner2[0]) / 2, (corner1[1] + corner2[1]) / 2]
                        self.hough_corners.remove(corner2)

        for line1 in lines:
            #cv.Line(self.debug_frame,line1[0],line1[1], (0,0,255), 10, cv.CV_AA, 0)
            for line2 in lines:
                if line1 is not line2:
                    self.find_corners(line1, line2)

        for corner1 in self.corners:
            for corner2 in self.corners:
                if math.fabs(corner1[1][0] - corner2[1][0]) < self.max_corner_range2 and \
                   math.fabs(corner1[1][1] - corner2[1][1]) < self.max_corner_range2 and \
                   math.fabs(corner1[2][0] - corner2[2][0]) < self.max_corner_range2 and \
                   math.fabs(corner1[2][1] - corner2[2][1]) < self.max_corner_range2 and \
                   math.fabs(corner1[0][0] - corner2[0][0]) > self.max_corner_range2 and \
                   math.fabs(corner1[0][1] - corner2[0][1]) > self.max_corner_range2:
                    pt1 = (int(corner1[0][0]), int(corner1[0][1]))
                    pt4 = (int(corner2[0][0]), int(corner2[0][1]))
                    pt3 = (int(corner1[1][0]), int(corner1[1][1]))
                    pt2 = (int(corner1[2][0]), int(corner1[2][1]))
                    #line_color = (0,255,0)s
                    #cv.Line(self.debug_frame,pt1,pt2, line_color, 10, cv.CV_AA, 0)                  
                    #cv.Line(self.debug_frame,pt1,pt3, line_color, 10, cv.CV_AA, 0)
                    #cv.Line(self.debug_frame,pt4,pt2, line_color, 10, cv.CV_AA, 0)                  
                    #cv.Line(self.debug_frame,pt4,pt3, line_color, 10, cv.CV_AA, 0)
                    new_bin = Bin(pt1, pt2, pt3, pt4)
                    new_bin.id = self.bin_id
                    self.bin_id += 1
                    if math.fabs(line_distance(pt1, pt2) - line_distance(pt3, pt4)) < self.parallel_sides_length_thresh and \
                       math.fabs(line_distance(pt1, pt3) - line_distance(pt2, pt4)) < self.parallel_sides_length_thresh:
                        self.Bins.append(new_bin)
                        print "new_bin"

                elif (math.fabs(corner1[1][0] - corner2[2][0]) < self.max_corner_range2 and
                      math.fabs(corner1[1][1] - corner2[2][1]) < self.max_corner_range2 and
                      math.fabs(corner1[2][0] - corner2[1][0]) < self.max_corner_range2 and
                      math.fabs(corner1[2][1] - corner2[1][1]) < self.max_corner_range2 and
                      math.fabs(corner1[0][0] - corner2[0][0]) > self.max_corner_range2 and
                      math.fabs(corner1[0][1] - corner2[0][1]) > self.max_corner_range2):
                    continue

        self.corners = []
        self.final_corners = self.sort_corners() #Results are not used. Experimental corners which have been seen twice, should be only the corners we want, but there were problems
        self.sort_bins()
        self.update_bins()
        self.group_bins()
        self.draw_bins()

        for corner in self.hough_corners:
            line_color = [255, 0, 0]
            cv.Circle(self.debug_frame, corner, 15, (255, 0, 0), 2, 8, 0)

        for line in lines:
            line_color = [255, 0, 0]
            cv.Line(self.debug_frame, line[0], line[1], line_color, 5, cv.CV_AA, 0)
            #cv.Circle(self.debug_frame, line[0], 15, (255,0,0), 2,8,0)
            #cv.Circle(self.debug_frame, line[1], 15, (255,0,0), 2,8,0)

        #Output bins
        self.output.bins = self.Bins
        anglesum = 0
        for bins in self.output.bins:
            bins.theta = (bins.center[0] - frame.width / 2) * 37 / (frame.width / 2)
            bins.phi = -1 * (bins.center[1] - frame.height / 2) * 36 / (frame.height / 2)
            anglesum += bins.angle
            # bins.orientation = bins.angle
        if len(self.output.bins) > 0:
            self.output.orientation = anglesum / len(self.output.bins)
        else:
            self.output.orientation = None
        self.return_output()

        svr.debug("Bins", self.debug_frame)
        svr.debug("Original", og_frame)

        #BEGIN SHAPE PROCESSING

        #constants
        img_width = 128
        img_height = 256

        number_x = 23
        number_y = 111
        number_w = 82
        number_h = 90

        bin_thresh_blocksize = 11
        bin_thresh = 1.9

        red_significance_threshold = 0.4

        #load templates - run once, accessible to number processor

        number_templates = [
            (10, cv.LoadImage("number_templates/10.png")),
            (16, cv.LoadImage("number_templates/16.png")),
            (37, cv.LoadImage("number_templates/37.png")),
            (98, cv.LoadImage("number_templates/98.png")),
        ]

        #Begin Bin Contents Processing

        for bin in self.Bins:
            #Take the bin's corners, and get an image containing an img_width x img_height rectangle of it
            transf = cv.CreateMat(3, 3, cv.CV_32FC1)
            cv.GetPerspectiveTransform(
                [bin.corner1, bin.corner2, bin.corner3, bin.corner4],
                [(0, 0), (0, img_height), (img_width, 0), (img_width, img_height)],
                transf
            )
            bin_image = cv.CreateImage([img_width, img_height], 8, 3)
            cv.WarpPerspective(frame, bin_image, transf)

            #AdaptaveThreshold to get black and white image highlighting the number (still works better than my yellow-vs-red threshold attempt
            hsv = cv.CreateImage(cv.GetSize(bin_image), 8, 3)
            bin_thresh_image = cv.CreateImage(cv.GetSize(bin_image), 8, 1)
            cv.CvtColor(bin_image, hsv, cv.CV_BGR2HSV)
            cv.SetImageCOI(hsv, 3)
            cv.Copy(hsv, bin_thresh_image)
            cv.SetImageCOI(hsv, 0)
            cv.AdaptiveThreshold(bin_thresh_image, bin_thresh_image,
                                 255,
                                 cv.CV_ADAPTIVE_THRESH_MEAN_C,
                                 cv.CV_THRESH_BINARY_INV,
                                 bin_thresh_blocksize,
                                 bin_thresh,
            )
            kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE)
            cv.Erode(bin_thresh_image, bin_thresh_image, kernel, 1)
            cv.Dilate(bin_thresh_image, bin_thresh_image, kernel, 1)

            #Here, we loop through all four different templates, and figure out which one we think is most likely.
            #The comparison function counts corresponding pixels that are non-zero in each image, and then corresponding pixels that are different in each image. The ratio of diff_count/both_count is our "unconfidence" ratio. The lower it is, the more confident we are.
            #There are two nearly identical pieces of code within this loop. One checks the bin right-side-up, and the other one checks it flipped 180.
            last_thought_number = -1
            last_unconfidence_ratio = number_w * number_h + 2
            for i in range(0, len(number_templates)):
                both_count = 0
                diff_count = 0
                this_number_image = number_templates[i][1]
                for y in range(0, number_h):
                    for x in range(0, number_w):
                        if (bin_thresh_image[y + number_y, x + number_x] != 0) and (this_number_image[y, x][0] != 0):
                            both_count += 1
                        elif (bin_thresh_image[y + number_y, x + number_x] != 0) or (this_number_image[y, x][0] != 0):
                            diff_count += 1
                if both_count == 0:
                    unconfidence_ratio = number_w * number_h + 1  # max unconfidence
                else:
                    unconfidence_ratio = 1.0 * diff_count / both_count
                if unconfidence_ratio < last_unconfidence_ratio:
                    last_thought_number = number_templates[i][0]
                    last_unconfidence_ratio = unconfidence_ratio
                both_count = 0
                diff_count = 0
                for y in range(0, number_h):
                    for x in range(0, number_w):
                        if (bin_thresh_image[img_height - number_y - 1 - y, img_width - number_x - 1 - x] != 0) and (
                                this_number_image[y, x][0] != 0):
                            both_count += 1
                        elif (bin_thresh_image[img_height - number_y - 1 - y, img_width - number_x - 1 - x] != 0) or (
                                this_number_image[y, x][0] != 0):
                            diff_count += 1
                if both_count == 0:
                    unconfidence_ratio = number_w * number_h + 1  # max unconfidence
                else:
                    unconfidence_ratio = 1.0 * diff_count / both_count
                if unconfidence_ratio < last_unconfidence_ratio:
                    last_thought_number = number_templates[i][0]
                    last_unconfidence_ratio = unconfidence_ratio

            print str(last_thought_number) + " | " + str(last_unconfidence_ratio)

            try: #check if it's defined
                bin.number_unconfidence_ratio
            except:
                bin.number_unconfidence_ratio = last_unconfidence_ratio
                bin.number = last_thought_number
                print "Set Speed Limit Number"
            else:
                if last_unconfidence_ratio < bin.number_unconfidence_ratio:
                    bin.number_unconfidence_ratio = last_unconfidence_ratio
                    if bin.number == last_thought_number:
                        print "More Confident on Same Number: Updated"
                    else:
                        print "More Confident on Different Number: Updated"
                        bin.icon = last_thought_number
예제 #29
0
    def process_frame(self, frame):
        self.debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
        self.test_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)

        cv.Copy(frame, self.debug_frame)
        cv.Copy(frame, self.test_frame)

        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Set binary image to have saturation channel
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binary = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        cv.SetImageCOI(hsv, 1)
        cv.Copy(hsv, binary)
        cv.SetImageCOI(hsv, 0)

        # Adaptive Threshold
        cv.AdaptiveThreshold(binary, binary,
                             255,
                             cv.CV_ADAPTIVE_THRESH_MEAN_C,
                             cv.CV_THRESH_BINARY_INV,
                             self.adaptive_thresh_blocksize,
                             self.adaptive_thresh,
                             )

        # Morphology
        kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(binary, binary, kernel, 1)
        cv.Dilate(binary, binary, kernel, 1)

        cv.CvtColor(binary, self.debug_frame, cv.CV_GRAY2RGB)

        # Find Corners
        temp1 = cv.CreateImage(cv.GetSize(frame), 8, 1)
        temp2 = cv.CreateImage(cv.GetSize(frame), 8, 1)
        self.corners = cv.GoodFeaturesToTrack(binary, temp1, temp2, self.max_corners, self.quality_level, self.min_distance, None, self.good_features_blocksize, 0, 0.4)

        # Display Corners
        for corner in self.corners:
            corner_color = (0, 0, 255)
            text_color = (0, 255, 0)
            font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, .6, .6, 0, 1, 1)
            cv.Circle(self.debug_frame, (int(corner[0]), int(corner[1])), 15, corner_color, 2, 8, 0)

        # Find Candidates
        for confirmed in self.confirmed:
            confirmed.corner1_repl_check = 0
            confirmed.corner2_repl_check = 0
            confirmed.corner3_repl_check = 0
            confirmed.corner4_repl_check = 0
            for corner in self.corners:
                if math.fabs(confirmed.corner1[0] - corner[0]) < self.MaxCornerTrans and \
                   math.fabs(confirmed.corner1[1] - corner[1]) < self.MaxCornerTrans:
                    confirmed.corner1_repl_check = 1
                    confirmed.corner1_repl = corner
                elif math.fabs(confirmed.corner2[0] - corner[0]) < self.MaxCornerTrans and \
                        math.fabs(confirmed.corner2[1] - corner[1]) < self.MaxCornerTrans:
                    confirmed.corner2_repl_check = 1
                    confirmed.corner2_repl = corner
                elif math.fabs(confirmed.corner3[0] - corner[0]) < self.MaxCornerTrans and \
                        math.fabs(confirmed.corner3[1] - corner[1]) < self.MaxCornerTrans:
                    confirmed.corner3_repl_check = 1
                    confirmed.corner3_repl = corner
                elif math.fabs(confirmed.corner4[0] - corner[0]) < self.MaxCornerTrans and \
                        math.fabs(confirmed.corner4[1] - corner[1]) < self.MaxCornerTrans:
                    confirmed.corner4_repl_check = 1
                    confirmed.corner4_repl = corner
            if confirmed.corner4_repl_check == 1 and confirmed.corner3_repl_check == 1 and confirmed.corner2_repl_check == 1 and confirmed.corner1_repl_check == 1:
                confirmed.corner1 = confirmed.corner1_repl
                confirmed.corner2 = confirmed.corner2_repl
                confirmed.corner3 = confirmed.corner3_repl
                confirmed.corner4 = confirmed.corner4_repl

                confirmed.midx = rect_midpointx(confirmed.corner1, confirmed.corner2, confirmed.corner3, confirmed.corner4)
                confirmed.midy = rect_midpointy(confirmed.corner1, confirmed.corner2, confirmed.corner3, confirmed.corner4)

                if confirmed.last_seen < self.last_seen_max:
                    confirmed.last_seen += 5

        for corner1 in self.corners:
            for corner2 in self.corners:
                for corner3 in self.corners:
                    for corner4 in self.corners:
                        # Checks that corners are not the same and are in the proper orientation
                        if corner4[0] != corner3[0] and corner4[0] != corner2[0] and corner4[0] != corner1[0] and \
                           corner3[0] != corner2[0] and corner3[0] != corner1[0] and corner2[0] != corner1[0] and \
                           corner4[1] != corner3[1] and corner4[1] != corner2[1] and corner4[1] != corner1[1] and \
                           corner3[1] != corner2[1] and corner3[1] != corner1[1] and corner2[1] != corner1[1] and \
                           corner2[0] >= corner3[0] and corner1[1] >= corner4[1] and corner2[0] >= corner1[0]:
                            # Checks that the side ratios are correct
                            if math.fabs(line_distance(corner1, corner3) - line_distance(corner2, corner4)) < self.size_threshold and \
                               math.fabs(line_distance(corner1, corner2) - line_distance(corner3, corner4)) < self.size_threshold and \
                               math.fabs(line_distance(corner1, corner3) / line_distance(corner1, corner2)) < self.ratio_threshold or \
                               math.fabs(line_distance(corner1, corner2) / line_distance(corner1, corner3)) < self.ratio_threshold:
                                # Checks that angles are roughly 90 degrees
                                angle_cnr_2 = math.fabs(angle_between_lines(line_slope(corner1, corner2), line_slope(corner2, corner4)))
                                if self.angle_min < angle_cnr_2 < self.angle_max:
                                    angle_cnr_3 = math.fabs(angle_between_lines(line_slope(corner1, corner3), line_slope(corner3, corner4)))
                                    if self.angle_min2 < angle_cnr_3 < self.angle_max2:
                                        new_bin = Bin(corner1, corner2, corner3, corner4)
                                        self.match_bins(new_bin)
        self.sort_bins()

        '''
        #START SHAPE PROCESSING

        #TODO load these ONCE somewhere
        samples = np.loadtxt('generalsamples.data',np.float32)
        responses = np.loadtxt('generalresponses.data',np.float32)
        responses = responses.reshape((responses.size,1))
        model = cv2.KNearest()
        model.train(samples,responses)

        for bin in self.confirmed:
                try:
                        bin.speedlimit
                except:
                        continue
                transf = cv.CreateMat(3, 3, cv.CV_32FC1)
                corner_orders = [
                        [bin.corner1, bin.corner2, bin.corner3, bin.corner4], #0 degrees
                        [bin.corner4, bin.corner3, bin.corner2, bin.corner1], #180 degrees
                        [bin.corner2, bin.corner4, bin.corner1, bin.corner3], #90 degrees
                        [bin.corner3, bin.corner1, bin.corner4, bin.corner2], #270 degrees
                        [bin.corner3, bin.corner4, bin.corner1, bin.corner2], #0 degrees and flipped X
                        [bin.corner2, bin.corner1, bin.corner4, bin.corner3], #180 degrees and flipped X
                        [bin.corner1, bin.corner3, bin.corner2, bin.corner4], #90 degrees and flipped X
                        [bin.corner4, bin.corner2, bin.corner3, bin.corner1]] #270 degrees andf flipped X
                for i in range(0, 8):
                        cv.GetPerspectiveTransform(
                                corner_orders[i],
                                [(0, 0), (0, 256), (128, 0), (128, 256)],
                                transf
                        )
                        shape = cv.CreateImage([128, 256], 8, 3)
                        cv.WarpPerspective(frame, shape, transf)

                        shape_thresh = np.zeros((256-104,128,1), np.uint8)
                        j = 104
                        while j<256:
                            i = 0
                            while i<128:
                                    pixel = cv.Get2D(shape, j, i)
                                if int(pixel[2]) > (int(pixel[1]) + int(pixel[0])) * 0.7:
                                    shape_thresh[j-104,i] = 255
                                else:
                                    shape_thresh[j-104,i] = 0
                                i = i+1
                            j = j+1
                        cv2.imshow("Bin " + str(i), shape_thresh)
                        contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
                        for cnt in contours:
                                    if cv2.contourArea(cnt)>50:
                                        [x,y,w,h] = cv2.boundingRect(cnt)
                                        if  h>54 and w>36:
                                                    roi = thresh[y:y+h,x:x+w]
                                                    roismall = cv2.resize(roi,(10,10))
                                                    roismall = roismall.reshape((1,100))
                                                    roismall = np.float32(roismall)
                                                    retval, results, neigh_resp, dists = model.find_nearest(roismall, k = 1)
                                                    digit_tuples.append( (x, int((results[0][0]))) )

                            if len(digit_tuples) == 2:
                                    digit_tuples_sorted = sorted(digit_tuples, key=lambda digit_tuple: digit_tuple[0])
                                speedlimit = 0
                                for i in range(0, len(digit_tuples_sorted)):
                                            speedlimit = speedlimit * 10 + digit_tuples_sorted[i][1]
                                    bin.speedlimit = speedlimit
                                    print "Found speed limit: " + str(speedlimit)
                                    break
                            else:
                                    print "Unable to determine speed limit"

        #... TODO more
        #END SHAPE PROCESSING
        '''

        svr.debug("Bins", self.debug_frame)
        svr.debug("Bins2", self.test_frame)

        # Output bins
        self.output.bins = self.confirmed
        anglesum = 0
        for bins in self.output.bins:
            bins.theta = (bins.midx - frame.width / 2) * 37 / (frame.width / 2)
            bins.phi = -1 * (bins.midy - frame.height / 2) * 36 / (frame.height / 2)
            bins.shape = bins.object
            anglesum += bins.angle
           # bins.orientation = bins.angle
        if len(self.output.bins) > 0:
            self.output.orientation = anglesum / len(self.output.bins)
        else:
            self.output.orientation = None
        self.return_output()
예제 #30
0
파일: gate.py 프로젝트: tarora2/seawolf
    def process_frame(self, frame):
        (w, h) = cv.GetSize(frame)

        #generate hue selection frames

        #create locations for the a pair of test frames
        frametest = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binarytest = cv.CreateImage(cv.GetSize(frame), 8, 1)

        #use the red channel for the binary frame (just for debugging purposes)
        cv.Copy(frame, frametest)
        cv.SetImageCOI(frametest, 3)
        cv.Copy(frametest, binarytest)
        cv.SetImageCOI(frametest, 0)  #reset COI
        #svr.debug("R?",binarytest)

        # Resize image to 320x240
        #copy = cv.CreateImage(cv.GetSize(frame), 8, 3)
        #cv.Copy(frame, copy)
        #cv.SetImageROI(frame, (0, 0, 320, 240))
        #cv.Resize(copy, frame, cv.CV_INTER_NN)

        found_gate = False

        #create a new frame just for comparison purposes
        unchanged_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.Copy(frame, unchanged_frame)

        #apply a course noise filter
        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Set binary image to have saturation channel
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binary = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        cv.SetImageCOI(hsv, 1)
        cv.Copy(hsv, binary)
        cv.SetImageCOI(hsv, 0)  #reset COI

        #shift hue of image such that orange->red are at top of spectrum
        '''
        binary = libvision.misc.cv_to_cv2(binary)
        binary = libvision.misc.shift_hueCV2(binary, self.target_shift)
        binary = libvision.misc.cv2_to_cv(binary)
	'''

        #correct for wraparound on red spectrum
        #cv.InRange(binary,a_array,b_array,binarytest) #generate mask
        #cv.Add(binary,cv.fromarray(ones*180),binary,mask=binarytest) #use mask to selectively add values
        svr.debug("R2?", binary)
        svr.debug("R2?", binary)

        #run adaptive threshold for edge detection and more noise filtering
        cv.AdaptiveThreshold(
            binary,
            binary,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY_INV,
            self.adaptive_thresh_blocksize,
            self.adaptive_thresh,
        )

        # Morphology
        kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(binary, binary, kernel, 1)
        cv.Dilate(binary, binary, kernel, 1)
        if self.debug:
            color_filtered = cv.CloneImage(binary)

        # Get Edges
        cv.Canny(binary, binary, 30, 40)

        # Hough Transform
        line_storage = cv.CreateMemStorage()
        raw_lines = cv.HoughLines2(binary,
                                   line_storage,
                                   cv.CV_HOUGH_STANDARD,
                                   rho=1,
                                   theta=math.pi / 180,
                                   threshold=self.hough_threshold,
                                   param1=0,
                                   param2=0)

        # Get vertical lines
        vertical_lines = []
        for line in raw_lines:
            if line[1] < self.vertical_threshold or \
               line[1] > math.pi-self.vertical_threshold:

                #absolute value does better grouping currently
                vertical_lines.append((abs(line[0]), line[1]))

        #print message to user for performance purposes
        logging.debug("{} possibilities reduced to {} lines".format(
            len(raw_lines), len(vertical_lines)))

        # Group vertical lines
        vertical_line_groups = [
        ]  # A list of line groups which are each a line list
        i = 0
        for line in vertical_lines:
            group_found = False
            for line_group in vertical_line_groups:
                i += 1
                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                vertical_line_groups.append([line])

        #quick debugging statement
        logging.debug("{} internal iterations for {} groups".format(
            i, len(vertical_line_groups)))

        # Average line groups into lines
        vertical_lines = []
        for line_group in vertical_line_groups:
            rhos = map(lambda line: line[0], line_group)
            angles = map(lambda line: line[1], line_group)
            line = (sum(rhos) / len(rhos), circular_average(angles, math.pi))
            vertical_lines.append(line)

        ####################################################
        #vvvv Horizontal line code isn't used for anything

        # Get horizontal lines
        horizontal_lines = []
        for line in raw_lines:
            dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi
            if dist_from_horizontal < self.horizontal_threshold or \
               dist_from_horizontal > math.pi-self.horizontal_threshold:

                horizontal_lines.append((abs(line[0]), line[1]))

        # Group horizontal lines
        horizontal_line_groups = [
        ]  # A list of line groups which are each a line list
        for line in horizontal_lines:
            group_found = False
            for line_group in horizontal_line_groups:

                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                horizontal_line_groups.append([line])

        if len(horizontal_line_groups) is 1:
            self.seen_crossbar = True
            if self.debug:
                rhos = map(lambda line: line[0], horizontal_line_groups[0])
                angles = map(lambda line: line[1], horizontal_line_groups[0])
                line = (sum(rhos) / len(rhos),
                        circular_average(angles, math.pi))
                horizontal_lines = [line]
        else:
            self.seen_crossbar = False
            horizontal_lines = []

        #^^^ Horizontal line code isn't used for anything
        ###################################################

        self.left_pole = None
        self.right_pole = None
        #print vertical_lines
        self.returning = 0
        self.found = False

        if len(vertical_lines) is 2:
            roi = cv.GetImageROI(frame)
            width = roi[2]
            height = roi[3]
            self.left_pole = round(
                min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2
            self.right_pole = round(
                max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2

            self.returning = (self.left_pole + self.right_pole) / 2
            logging.info("Returning {} as gate center delta.".format(
                self.returning))

            #initialize first iteration with 2 known poles
            if self.last_seen < 0:
                self.last_center = None
                self.last_seen = 0

            #increment a counter if result is good.
            if self.last_center is None:
                self.last_center = self.returning
                self.seen_count = 1
            elif math.fabs(self.last_center -
                           self.returning) < self.center_trans_thresh:
                self.seen_count += 1
                self.last_seen += 2
            else:
                self.last_seen -= 1

            #if not conviced, forget left/right pole. Else proclaim success.
            if self.seen_count < self.seen_count_thresh:
                self.left_pole = None
                self.right_pole = None
            else:
                print "FOUND CENTER AND RETURNED IT"
                self.found = True
        else:
            self.returning = 0
            if self.last_seen < 0:
                self.last_center = None
                self.last_seen = 0
            self.last_seen -= 1
            self.left_pole = None
            self.right_pole = None

        #TODO: If one pole is seen, is it left or right pole?

        if self.debug:
            cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB)
            libvision.misc.draw_lines(frame, vertical_lines)
            libvision.misc.draw_lines(frame, horizontal_lines)

            if self.found:
                cv.Circle(frame, (int(frame.width / 2 + self.returning),
                                  int(frame.height / 2)), 15, (0, 255, 0), 2,
                          8, 0)
                font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 1, 3)
                cv.PutText(frame, "Gate Sent to Mission Control", (100, 400),
                           font, (255, 255, 0))
                #print frame.width

            #cv.ShowImage("Gate", cv.CloneImage(frame))
            svr.debug("Gate", cv.CloneImage(frame))
            svr.debug("Unchanged", cv.CloneImage(unchanged_frame))

        #populate self.output with infos
        self.output.seen_crossbar = self.seen_crossbar
        self.output.left_pole = self.left_pole
        self.output.right_pole = self.right_pole

        self.return_output()