Exemplo n.º 1
0
				def circ(n): 
					pt  = cv.cvPoint(int(round(n[0,0])),int(round(n[1,0]))) 
					cv.cvCircle(self.disp.buffer, pt, size, 
							color_scalar, cv.CV_FILLED, cv.CV_AA) 
					pt2 = cv.cvPoint(pt.x + 2, pt.y + 2) 
					cv.cvPutText(self.disp.buffer, text, pt,  self.disp.font, cv.cvScalar(255,255,255)) 
					cv.cvPutText(self.disp.buffer, text, pt2, self.disp.font, cv.cvScalar(50,50,50)) 
Exemplo n.º 2
0
 def __findcurve(self, img):
     storage = cv.cvCreateMemStorage(0)
     nb_contours, cont = cv.cvFindContours(img, storage,
                                           cv.sizeof_CvContour,
                                           cv.CV_RETR_LIST,
                                           cv.CV_CHAIN_APPROX_NONE,
                                           cv.cvPoint(0, 0))
     cidx = int(random.random() * len(color))
     if (self.drawcontour):
         cv.cvDrawContours(self.drawimg, cont, _white, _white, 1, 1,
                           cv.CV_AA, cv.cvPoint(0, 0))
     idx = 0
     for c in cont.hrange():
         PointArray = cv.cvCreateMat(1, c.total, cv.CV_32SC2)
         PointArray2D32f = cv.cvCreateMat(1, c.total, cv.CV_32FC2)
         cv.cvCvtSeqToArray(c, PointArray,
                            cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX))
         fpoints = []
         for i in range(c.total):
             kp = myPoint()
             kp.x = cv.cvGet2D(PointArray, 0, i)[0]
             kp.y = cv.cvGet2D(PointArray, 0, i)[1]
             kp.index = idx
             idx += 1
             fpoints.append(kp)
         self.allcurve.append(fpoints)
     self.curvelength = idx
Exemplo n.º 3
0
 def __findcurve(self, img):
     storage = cv.cvCreateMemStorage(0)
     nb_contours, cont = cv.cvFindContours (img,
         storage,
         cv.sizeof_CvContour,
         cv.CV_RETR_LIST,
         cv.CV_CHAIN_APPROX_NONE,
         cv.cvPoint (0,0))
     cidx = int(random.random() * len(color))
     if (self.drawcontour):
         cv.cvDrawContours (self.drawimg, cont, _white, _white, 1, 1, cv.CV_AA, cv.cvPoint (0, 0))
     idx = 0
     for c in cont.hrange():
         PointArray = cv.cvCreateMat(1, c.total, cv.CV_32SC2)
         PointArray2D32f= cv.cvCreateMat( 1, c.total  , cv.CV_32FC2)
         cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX))
         fpoints = []
         for i in range(c.total):
             kp = myPoint()
             kp.x = cv.cvGet2D(PointArray,0, i)[0]
             kp.y = cv.cvGet2D(PointArray,0, i)[1]
             kp.index = idx
             idx += 1
             fpoints.append(kp)
         self.allcurve.append(fpoints)
     self.curvelength = idx
Exemplo n.º 4
0
def getMargins(cut, margin, factor=1):
	lines = []
	if cut.p1.x == cut.p2.x:
		dx = margin
		dy = 0
	elif cut.p1.y == cut.p2.y:
		dx = 0
		dy = margin
	else:
		raise OrientationException("The cut is not straight")
	
	lower_p1 = cv.cvPoint(cut.p1.x - dx, cut.p1.y - dy)
	lower_p2 = cv.cvPoint(cut.p2.x - dx, cut.p2.y - dy)
	lower_p1 = transformer.translatePoint(lower_p1, factor)
	lower_p2 = transformer.translatePoint(lower_p2, factor)
	
	upper_p1 = cv.cvPoint(cut.p1.x + dx, cut.p1.y + dy)
	upper_p2 = cv.cvPoint(cut.p2.x + dx, cut.p2.y + dy)
	upper_p1 = transformer.translatePoint(upper_p1, factor)
	upper_p2 = transformer.translatePoint(upper_p2, factor)

	lines.append(Line(lower_p1, lower_p2))
	lines.append(Line(upper_p1, upper_p2))

	return lines
Exemplo n.º 5
0
    def set_opencv(self, opencv):
        """
        Sets the Graphic's opencv object.

        Arguments:
        - self: The main object pointer.
        - opencv: The opencv object.
        """

        # Update the current attrs
        self.x = opencv.x
        self.y = opencv.y

        if self.__ocv is not None:
            # Update the last attr
            self.last = self.__ocv

            # Update the diff attr
            self.rel_diff = cv.cvPoint( self.last.x - self.x,
                                        self.last.y - self.y )

            self.abs_diff = cv.cvPoint( self.x - self.orig.x,
                                        self.y - self.orig.y )

        self.__ocv = opencv
Exemplo n.º 6
0
 def __GetCrossDist(self, p1, dx, dy, iPointIndex):
     bFound = 0
     fDist = 0
     bestPoint = cv.cvPoint(0, 0)
     bestLength = 1e10
     bigLength = -1
     nPoints = len(self.keypoints)
     for k in range(nPoints):
         if (k == iPointIndex or k == iPointIndex + 1):
             continue
         q1 = self.keypoints[(k - 1 + nPoints) % nPoints]
         q2 = self.keypoints[k]
         du = q2.x - q1.x
         dv = q2.y - q1.y
         dd = (dy * du - dx * dv)
         if (dd == 0):
             continue
         t = (dy * (p1.x - q1.x) - dx * (p1.y - q1.y)) / dd
         if (t >= -0.0001 and t <= 1.0001):  # found it
             ptt = cv.cvPoint(int(q1.x + t * du), int(q1.y + t * dv))
             l = math.sqrt((ptt.x - p1.x) * (ptt.x - p1.x) +
                           (ptt.y - p1.y) * (ptt.y - p1.y))
             l2 = ((dv * q1.x - du * q1.y) -
                   (dv * p1.x - du * p1.y)) / (dv * dx - du * dy)
             bFound = 1
             if (l <= bestLength and l2 > 0):
                 bestPoint = ptt
                 bestLength = l
     fDist = bestLength
     if (not bFound):
         fDist = 0
     if (self.img):
         cv.cvLine(self.img, cv.cvPoint(int(p1.x), int(p1.y)), bestPoint,
                   cv.cvScalar(255, 255, 255, 0))
     return fDist
Exemplo n.º 7
0
def draw_weighted_Pose2D(display, max_weight, particles):
    for p in particles:
        if type(p) is types.TupleType:
            part, weight = p
            rpos = part.pos
        else:
            part = p
            rpos = p.pos

        x = mt.cos(part.angle) * .07
        y = mt.sin(part.angle) * .07

        dir  = rpos.copy()
        dir[0,0] = dir[0,0] + x
        dir[1,0] = dir[1,0] + y

        pos  = display.to_screen(rpos)
        dirp = display.to_screen(dir)

        if type(p) is types.TupleType:
            color = round(255.0 * (weight/max_weight))
            cv.cvCircle(display.buffer, cv.cvPoint((int) (pos[0,0]), (int) (pos[1,0])), 
                        2, cv.cvScalar(255, 255-color, 255), cv.CV_FILLED, cv.CV_AA)
            cv.cvCircle(display.buffer, cv.cvPoint((int) (pos[0,0]), (int) (pos[1,0])), 
                        2, cv.cvScalar(200, 200, 200), 8, cv.CV_AA)
        else:
            cv.cvCircle(display.buffer, cv.cvPoint((int) (pos[0,0]), (int) (pos[1,0])), 
                        2, cv.cvScalar(150, 150, 150), cv.CV_FILLED, cv.CV_AA)

        cv.cvLine(display.buffer, cv.cvPoint((int) (pos[0,0]), (int) (pos[1,0])),
                                  cv.cvPoint((int) (dirp[0,0]), (int) (dirp[1,0])),
                                  cv.cvScalar(100,200,100), 1, cv.CV_AA, 0)
Exemplo n.º 8
0
def detect(image, cascade_file='haarcascade_data/haarcascade_frontalface_alt.xml'):
    image_size = cv.cvGetSize(image)

    # create grayscale version
    grayscale = cv.cvCreateImage(image_size, 8, 1)
    cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY)

    # create storage
    storage = cv.cvCreateMemStorage(0)
    cv.cvClearMemStorage(storage)

    # equalize histogram
    cv.cvEqualizeHist(grayscale, grayscale)

    # detect objects
    cascade = cv.cvLoadHaarClassifierCascade(cascade_file, cv.cvSize(1,1))
    faces = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, cv.cvSize(50, 50))

    positions = []
    if faces:
        for i in faces:
            positions.append({'x': i.x, 'y': i.y, 'width': i.width, 'height':
                i.height,})
            cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)),
                         cv.cvPoint(int(i.x + i.width), int(i.y + i.height)),
                         cv.CV_RGB(0, 255, 0), 3, 8, 0)
    return positions
Exemplo n.º 9
0
 def __GetCrossDist(self, p1, dx, dy, iPointIndex):
     bFound = 0
     fDist = 0
     bestPoint = cv.cvPoint(0, 0)
     bestLength = 1e10
     bigLength = -1
     nPoints = len(self.keypoints)
     for k in range(nPoints):
         if (k == iPointIndex or k == iPointIndex + 1):
             continue
         q1 = self.keypoints[(k - 1 + nPoints) % nPoints]
         q2 = self.keypoints[k]
         du = q2.x - q1.x
         dv = q2.y - q1.y
         dd = (dy * du - dx * dv)
         if (dd == 0):
             continue
         t =  (dy * (p1.x - q1.x) - dx * (p1.y - q1.y)) / dd
         if (t >= -0.0001 and t <= 1.0001): # found it
             ptt =  cv.cvPoint(int(q1.x + t * du), int(q1.y + t * dv))
             l = math.sqrt((ptt.x - p1.x ) * (ptt.x - p1.x ) + (ptt.y - p1.y ) * (ptt.y - p1.y))
             l2 = ((dv * q1.x - du * q1.y) - (dv * p1.x - du * p1.y)) / ( dv * dx - du * dy)
             bFound = 1
             if (l <= bestLength and l2 > 0):
                 bestPoint = ptt
                 bestLength = l
     fDist = bestLength
     if (not bFound):
         fDist = 0
     if (self.img):
         cv.cvLine(self.img, cv.cvPoint(int(p1.x), int(p1.y)), bestPoint, cv.cvScalar(255, 255, 255, 0))
     return fDist
Exemplo n.º 10
0
def draw_bounding_boxes(cascade_list, img, r, g, b, width):
    if cascade_list:
        for rect in cascade_list:
            opencv.cvRectangle(
                img, opencv.cvPoint(int(rect.x), int(rect.y)),
                opencv.cvPoint(int(rect.x + rect.width),
                               int(rect.y + rect.height)),
                opencv.CV_RGB(r, g, b), width)
Exemplo n.º 11
0
 def face_points(self, face):
     """ Get the points information from the face data """
     x_1 = int(face.x * self.image_scale)
     y_1 = int(face.y * self.image_scale)
     x_2 = int((face.x + face.width) * self.image_scale)
     y_2 = int((face.y + face.height) * self.image_scale)
     pt1 = cv.cvPoint(x_1, y_1)
     pt2 = cv.cvPoint(x_2, y_2)
     return pt1, pt2
Exemplo n.º 12
0
	def face_points(self, face):
		""" Get the points information from the face data """
		x_1										= int(face.x*self.image_scale)
		y_1										= int(face.y*self.image_scale)
		x_2										= int((face.x+face.width)*self.image_scale)
		y_2										= int((face.y+face.height)*self.image_scale)
		pt1										= cv.cvPoint(x_1, y_1)
		pt2										= cv.cvPoint(x_2, y_2)
		return pt1, pt2
Exemplo n.º 13
0
 def circ(n):
     pt = cv.cvPoint(int(round(n[0, 0])), int(round(n[1, 0])))
     cv.cvCircle(self.disp.buffer, pt, size, color_scalar,
                 cv.CV_FILLED, cv.CV_AA)
     pt2 = cv.cvPoint(pt.x + 2, pt.y + 2)
     cv.cvPutText(self.disp.buffer, text, pt, self.disp.font,
                  cv.cvScalar(255, 255, 255))
     cv.cvPutText(self.disp.buffer, text, pt2, self.disp.font,
                  cv.cvScalar(50, 50, 50))
Exemplo n.º 14
0
def draw_weighted_Pose2D(display, max_weight, particles):
    for p in particles:
        if type(p) is types.TupleType:
            part, weight = p
            rpos = part.pos
        else:
            part = p
            rpos = p.pos

        x = mt.cos(part.angle) * 0.07
        y = mt.sin(part.angle) * 0.07

        dir = rpos.copy()
        dir[0, 0] = dir[0, 0] + x
        dir[1, 0] = dir[1, 0] + y

        pos = display.to_screen(rpos)
        dirp = display.to_screen(dir)

        if type(p) is types.TupleType:
            color = round(255.0 * (weight / max_weight))
            cv.cvCircle(
                display.buffer,
                cv.cvPoint((int)(pos[0, 0]), (int)(pos[1, 0])),
                2,
                cv.cvScalar(255, 255 - color, 255),
                cv.CV_FILLED,
                cv.CV_AA,
            )
            cv.cvCircle(
                display.buffer,
                cv.cvPoint((int)(pos[0, 0]), (int)(pos[1, 0])),
                2,
                cv.cvScalar(200, 200, 200),
                8,
                cv.CV_AA,
            )
        else:
            cv.cvCircle(
                display.buffer,
                cv.cvPoint((int)(pos[0, 0]), (int)(pos[1, 0])),
                2,
                cv.cvScalar(150, 150, 150),
                cv.CV_FILLED,
                cv.CV_AA,
            )

        cv.cvLine(
            display.buffer,
            cv.cvPoint((int)(pos[0, 0]), (int)(pos[1, 0])),
            cv.cvPoint((int)(dirp[0, 0]), (int)(dirp[1, 0])),
            cv.cvScalar(100, 200, 100),
            1,
            cv.CV_AA,
            0,
        )
Exemplo n.º 15
0
 def DrawKeyPoints(self):
     ic = 0
     myfont = cv.cvInitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5)
     for ic, c in enumerate(self.mss.seqs):
         for k in c.points:
             if self.bDrawNumber:
                 cv.cvPutText(
                     self.drawimg, str(ic), cv.cvPoint(int(k.x), int(k.y)), myfont, cv.cvScalar(255, 255, 0, 0)
                 )
             cv.cvDrawCircle(self.drawimg, cv.cvPoint(int(k.x), int(k.y)), 4, cv.cvScalar(255, 0, 255, 0))
Exemplo n.º 16
0
def illuminate_faces(image):
    changed_image = threshold_image(image)
    faces = face_detector.detectObject(image)
    for face in faces:
        print( "Oject found at (x,y) = (%i,%i)" % (face.x*face_detector.image_scale,face.y*face_detector.image_scale) )
        pt1 = cvPoint(  int(face.x*face_detector.image_scale), int(face.y*face_detector.image_scale) )
        pt2 = cvPoint(  int((face.x*face_detector.image_scale + face.width*face_detector.image_scale)), 
                        int((face.y*face_detector.image_scale + face.height*face_detector.image_scale)) )
        cvRectangle( changed_image, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 )
    return changed_image
Exemplo n.º 17
0
    def show_rectangles(self, rectangles):
        """
        Show the rectangles added.

        Arguments:
        - self: The main object pointer.
        """
        #debug.debug("Camera", "Showing existing rectangles -> %d" % len(rectangles))

        for rect in rectangles:
            cv.cvRectangle( self.__image, cv.cvPoint(rect.x, rect.y), cv.cvPoint(rect.size[0], rect.size[1]), cv.CV_RGB(255,0,0), 3, 8, 0 )
Exemplo n.º 18
0
 def DrawKeyPoints(self):
     ic = 0
     myfont = cv.cvInitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5)
     for ic, c in enumerate(self.mss.seqs):
       cnt = 0
       for k in c.points:
         cnt += 1
         if (int(cnt/2) * 2 != cnt): continue
         cv.cvDrawCircle(self.drawimg, cv.cvPoint(int(k.x), int(k.y)), 4, cv.cvScalar(255,255,255,0))
         if (self.bDrawNumber and (cnt > self.start) and cnt < self.start + 8*4 and len(c.points) > 30):
             #cv.cvPutText(self.drawimg, str(cnt), cv.cvPoint(int(k.x) + 5, int(k.y)), myfont, cv.cvScalar(255, 255, 0,0))
             cv.cvDrawCircle(self.drawimg, cv.cvPoint(int(k.x), int(k.y)), 4, cv.cvScalar(255,0, 255,0))
Exemplo n.º 19
0
	def read(self):

		raw_thresh = self.thresh.read()

		cvt_red = cv.cvCreateImage(cv.cvSize(raw_thresh.width,raw_thresh.height),raw_thresh.depth,1)
		cv.cvSplit(raw_thresh,cvt_red,None,None,None)
		cvpt_min = cv.cvPoint(0,0)
		cvpt_max = cv.cvPoint(0,0)
		t = cv.cvMinMaxLoc(cvt_red,cvpt_min,cvpt_max)

		if cvpt_max.x == 0 and cvpt_max.y == 0 :
			return []
		return [(cvpt_max.x,cvpt_max.y)]
Exemplo n.º 20
0
	def read(self):
		src = self.camera.read()
		thresh = self.thresh2pg.read()
		red = self.red2pg.read()
		raw_thresh = self.thresh.read()

		cvt_red = cv.cvCreateImage(cv.cvSize(raw_thresh.width,raw_thresh.height),raw_thresh.depth,1)
		cv.cvSplit(raw_thresh,cvt_red,None,None,None)
		cvpt_min = cv.cvPoint(0,0)
		cvpt_max = cv.cvPoint(0,0)
		t = cv.cvMinMaxLoc(cvt_red,cvpt_min,cvpt_max)

		return src,thresh,red,(cvpt_max.x,cvpt_max.y)
Exemplo n.º 21
0
def showRegionsForResultId(resultId, color):
	painting = getPaintingInResultId(resultId)
	title = Result.select(Result.q.id==resultId)[0].painting.title
	image = painting.getImage()
	regions = getRegionsForResultId(resultId)

	for region in regions:
		rect = region.getBoundingBox()
		p1 = cv.cvPoint(rect.x, rect.y)
		p2 = cv.cvPoint(rect.x + rect.width, rect.y + rect.height)
		cv.cvRectangle(image, p1, p2, color, 2)

	g.showImage(image, title)
Exemplo n.º 22
0
def savePictureForResultId(resultId):
	painting = getPaintingInResultId(resultId)
	image = painting.getImage()
	regions = getRegionsForResultId(resultId)
	name = str(painting.id) + str(painting.title) + ".png"

	for region in regions:
		rect = region.getBoundingBox()
		p1 = cv.cvPoint(rect.x, rect.y)
		p2 = cv.cvPoint(rect.x + rect.width, rect.y + rect.height)
		cv.cvRectangle(image, p1, p2, color, 2)

	cv.highgui.cvSaveImage(name,painting)
Exemplo n.º 23
0
Arquivo: chroma.py Projeto: bmiro/vpc
def getBackground(frameWidht, frameHeight):
    cvNamedWindow("Background")
    
    text = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    frame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    background = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)

    font = cvInitFont(CV_FONT_HERSHEY_COMPLEX, 1.0, 1.0, 0.0, 2)
    pt1 = cvPoint(50, 100)
    pt2 = cvPoint(50, 150)
    center = cvPoint(frameWidth/2, frameHeight/2)
    cvPutText(text, "Press enter, run away and wait", pt1, font, CV_RGB(150, 100, 150))
    cvPutText(text, str(delayS) + " seconds to capture background", pt2, font, CV_RGB(150, 100, 150))
    cvShowImage("Background", text)
        
    key = -1
    while key == -1:
        key = cvWaitKey(10)    
        
    like = False
    while not like:
        for i in range(delayS):
            cvZero(text)
            cvPutText(text, str(delayS-i), center, font, CV_RGB(150, 100, 150))
            cvShowImage("Background", text)
            cvWaitKey(1000)
    
        csut = camStartUpTime
        while (csut): # Stats capturing frames in order to give time to the cam to auto-adjust colors
            if not cvGrabFrame(CAM):
                print "Could not grab a frame"
                exit
            cvWaitKey(10)
            csut -= 1
        frame = cvQueryFrame(CAM)
        cvCopy(frame, background)
        
        cvCopy(frame, text)
        cvPutText(text, "Is correct? [y/n]", center, font, CV_RGB(150, 100, 150))

        cvShowImage("Background", text)
        
        key = -1
        while key != 'n' and key != 'y':
            key = cvWaitKey(10)
            if key == 'y': 
                like = True
                
    return background        
    cvDestroyWindow("Background")
Exemplo n.º 24
0
def main():
	"""
	Just the test
	This method is a good resource on how to handle the results.
	Save images in this method if you have to.
	"""

	filename = sys.argv[1]
	image = highgui.cvLoadImage (filename)

	cutRatios = [lib.PHI]
	#cutRatios = [0.618]
	settings = Settings(cutRatios)
	image = highgui.cvLoadImage (filename)
	thickness = 4
	settings.setMarginPercentage(0.025)
	settings.setMethod(sys.argv[3])
	cut = int(sys.argv[2])
	winname = sys.argv[1]
	#settings.setThresholds(100,150)
	# Set the color for the boxes
	#color = lib.COL_BLACK
	#color = lib.COL_WHITE
	#color = lib.COL_RED
	color = lib.COL_GREEN
	#color = lib.COL_BLUE

	blobImg = blobResult(image, settings, cut)
	boxxImg = boundingBoxResult(image, settings, cut, thickness, color)
	cutt = lib.findMeans(cv.cvGetSize(image), settings.cutRatios[0])[cut]
	# cuttet verdi, dog skal det vi generaliseres lidt
	oriantesen = cutt.getPoints()[0].x == cutt.getPoints()[1].x
	if oriantesen:
		cutPixel = cutt.getPoints()[1].x
	else:
		cutPixel = cutt.getPoints()[1].y
	
	if oriantesen:
	#	print 'hej'
		cv.cvLine(boxxImg, cv.cvPoint(cutPixel, cutt.getPoints()[0].y), cv.cvPoint(cutPixel, cutt.getPoints()[1].y), lib.COL_RED)
	else:
		cv.cvLine(boxxImg, cv.cvPoint(cutt.getPoints()[0].x, cutPixel), cv.cvPoint(cutt.getPoints()[1].x, cutPixel), lib.COL_RED)
	# Save images
	highgui.cvSaveImage('flood_cut_%s.png' % cut, boxxImg)
	highgui.cvSaveImage('blobs_cut_%s.png' % cut, blobImg)

	# Show images
	compareImages(blobImg, boxxImg, "blob", winname)
Exemplo n.º 25
0
def gridIt(image, component_dictionary, step=1):
	#print len(component_dictionary), 'len'
	gridcordinates = []
	for entry in component_dictionary:
		tmpgridcondinates = []
		color = component_dictionary[entry][0]
		#print color, 'color'
		component = component_dictionary[entry][1]
		points = 0

		rect = component.rect
		lower_x = rect.x
		lower_y = rect.y
		upper_x = lower_x + rect.width
		upper_y = lower_y + rect.height
		#print lower_x, 'lower_x'
		#print upper_x, 'upper_x'
		#print step, 'step'
		#print lower_y, 'lower_y'
		#print upper_y, 'upper_y'
		for i in range(lower_x, upper_x, step):
			for j in range(lower_y, upper_y, step):
				if lib.isSameColor(color, image[j][i]):
					points = points + 1
					tmpgridcondinates.append(cv.cvPoint(i, j))
		gridcordinates.append(tmpgridcondinates)
		#print points, 'points'
	return gridcordinates
Exemplo n.º 26
0
def on_mouse( event, x, y, flags, param = [] ):
    global mouse_selection
    global mouse_origin
    global mouse_select_object
    if event == highgui.CV_EVENT_LBUTTONDOWN:
        print("Mouse down at (%i, %i)" % (x,y))
        mouse_origin = cv.cvPoint(x,y)

        mouse_selection = cv.cvRect(x,y,0,0)
        mouse_select_object = True
        return
    if event == highgui.CV_EVENT_LBUTTONUP:
        print("Mouse up at (%i,%i)" % (x,y))
        mouse_select_object = False
        if( mouse_selection.width > 0 and mouse_selection.height > 0 ):
            global track_object
            track_object = -1
        return
    if mouse_select_object:
        mouse_selection.x = min(x,mouse_origin.x)
        mouse_selection.y = min(y,mouse_origin.y)
        mouse_selection.width = mouse_selection.x + cv.CV_IABS(x - mouse_origin.x)
        mouse_selection.height = mouse_selection.y + cv.CV_IABS(y - mouse_origin.y)
        mouse_selection.x = max( mouse_selection.x, 0 )
        mouse_selection.y = max( mouse_selection.y, 0 )
        mouse_selection.width = min( mouse_selection.width, frame.width )
        mouse_selection.height = min( mouse_selection.height, frame.height )
        mouse_selection.width -= mouse_selection.x
        mouse_selection.height -= mouse_selection.y
Exemplo n.º 27
0
def on_trackbar(position):

    # create the image for putting in it the founded contours
    contours_image = cv.cvCreateImage(cv.cvSize(_SIZE, _SIZE), 8, 3)

    # compute the real level of display, given the current position
    levels = position - 3

    # initialisation
    _contours = contours

    if levels <= 0:
        # zero or negative value
        # => get to the nearest face to make it look more funny
        _contours = contours.h_next.h_next.h_next

    # first, clear the image where we will draw contours
    cv.cvSetZero(contours_image)

    # draw contours in red and green
    cv.cvDrawContours(contours_image, _contours, _red, _green, levels, 3,
                      cv.CV_AA, cv.cvPoint(0, 0))

    # finally, show the image
    highgui.cvShowImage("contours", contours_image)
Exemplo n.º 28
0
def on_trackbar (position):

    # create the image for putting in it the founded contours
    contours_image = cv.cvCreateImage (cv.cvSize (_SIZE, _SIZE), 8, 3)

    # compute the real level of display, given the current position
    levels = position - 3

    # initialisation
    _contours = contours
    
    if levels <= 0:
        # zero or negative value
        # => get to the nearest face to make it look more funny
        _contours = contours.h_next.h_next.h_next
        
    # first, clear the image where we will draw contours
    cv.cvSetZero (contours_image)
    
    # draw contours in red and green
    cv.cvDrawContours (contours_image, _contours,
                       _red, _green,
                       levels, 3, cv.CV_AA,
                       cv.cvPoint (0, 0))

    # finally, show the image
    highgui.cvShowImage ("contours", contours_image)
Exemplo n.º 29
0
 def __findContour(self, filename): #find the contour of images, and save all points in self.vKeyPoints
     self.img = highgui.cvLoadImage (filename)
     self.grayimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,1)
     self.drawimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,3)
     cv.cvCvtColor (self.img, self.grayimg, cv.CV_BGR2GRAY)
     cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9)
     cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9)
     cv.cvSmooth(self.grayimg, self.grayimg, cv.CV_BLUR, 9)
     cv.cvThreshold( self.grayimg, self.grayimg, self.threshold, self.threshold +100, cv.CV_THRESH_BINARY )
     cv.cvZero(self.drawimg)
     storage = cv.cvCreateMemStorage(0)
     nb_contours, cont = cv.cvFindContours (self.grayimg,
         storage,
         cv.sizeof_CvContour,
         cv.CV_RETR_LIST,
         cv.CV_CHAIN_APPROX_NONE,
         cv.cvPoint (0,0))
         
     cv.cvDrawContours (self.drawimg, cont, cv.cvScalar(255,255,255,0), cv.cvScalar(255,255,255,0), 1, 1, cv.CV_AA, cv.cvPoint (0, 0))
     self.allcurve = []
     idx = 0
     for c in cont.hrange():
         PointArray = cv.cvCreateMat(1, c.total  , cv.CV_32SC2)
         PointArray2D32f= cv.cvCreateMat( 1, c.total  , cv.CV_32FC2)
         cv.cvCvtSeqToArray(c, PointArray, cv.cvSlice(0, cv.CV_WHOLE_SEQ_END_INDEX))
         fpoints = []
         for i in range(c.total):
             kp = myPoint()
             kp.x = cv.cvGet2D(PointArray,0, i)[0]
             kp.y = cv.cvGet2D(PointArray,0, i)[1]
             kp.index = idx
             idx += 1
             fpoints.append(kp)
         self.allcurve.append(fpoints)
     self.curvelength = idx
Exemplo n.º 30
0
def on_mouse(event, x, y, flags, param):

    global select_object, selection, image, origin, select_object, track_object

    if image is None:
        return

    if image.origin:
        y = image.height - y

    if select_object:
        selection.x = min(x,origin.x)
        selection.y = min(y,origin.y)
        selection.width = selection.x + cv.CV_IABS(x - origin.x)
        selection.height = selection.y + cv.CV_IABS(y - origin.y)
        
        selection.x = max( selection.x, 0 )
        selection.y = max( selection.y, 0 )
        selection.width = min( selection.width, image.width )
        selection.height = min( selection.height, image.height )
        selection.width -= selection.x
        selection.height -= selection.y

    if event == highgui.CV_EVENT_LBUTTONDOWN:
        origin = cv.cvPoint(x,y)
        selection = cv.cvRect(x,y,0,0)
        select_object = 1
    elif event == highgui.CV_EVENT_LBUTTONUP:
        select_object = 0
        if( selection.width > 0 and selection.height > 0 ):
            track_object = -1
def detectObject(image):
  grayscale = cv.cvCreateImage(size, 8, 1)
  cv.cvFlip(image, None, 1)
  cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY)
  storage = cv.cvCreateMemStorage(0)
  cv.cvClearMemStorage(storage)
  cv.cvEqualizeHist(grayscale, grayscale)
  cascade = cv.cvLoadHaarClassifierCascade(haar_file, cv.cvSize(1,1))
  objects = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, 
                                   cv.CV_HAAR_DO_CANNY_PRUNING,
                                   cv.cvSize(100,100))

  # Draw dots where hands are
  if objects:
    for i in objects:
      #cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)),
      #               cv.cvPoint(int(i.x+i.width), int(i.y+i.height)),
      #               cv.CV_RGB(0,255,0), 3, 8, 0)
      center = cv.cvPoint(int(i.x+i.width/2), int(i.y+i.height/2))
      cv.cvCircle(image, center, 10, cv.CV_RGB(0,0,0), 5,8, 0)
      # Left side check
      if center.x > box_forward_left[0].x and center.x < box_backwards_left[1].x and center.y > box_forward_left[0].y and center.y < box_backwards_left[1].y:
        set_speed('left', center)
      # Right side check
      if center.x > box_forward_right[0].x and center.x < box_backwards_right[1].x and center.y > box_forward_right[0].y and center.y < box_backwards_right[1].y:
        set_speed('right', center)
Exemplo n.º 32
0
def get_sub_image( image, coord, sizex=10, sizey=10, save=True ):

        high_x = coord.x + int(round(sizex/2.0))
        low_x  = coord.x - int(sizex/2.0)
        high_y = coord.y + int(round(sizey/2.0))
        low_y  = coord.y - int(sizey/2.0)

        if high_x > image.width - 1 :
            high_x = image.width - 1
            low_x  = image.width - 1 - sizex
        elif low_x < 0 :
            low_x  = 0
            high_x = sizex - 1
        
        if high_y > image.height - 1 :
            high_y = image.height - 1
            low_y  = image.height - 1 - sizey
        elif low_y < 0 :
            low_y  = 0
            high_y = sizey - 1

        b = image[low_y:high_y,low_x:high_x]

        if save:
            curtime = time.localtime()
	    curtime_raw = time.time()
	    i = float( 100*(curtime_raw - int(curtime_raw)))
            date_name = time.strftime('%Y%m%d%I%M%S_' + str(i), curtime)
            highgui.cvSaveImage( date_name+'dot.png' , b )

        return {'sub_img':b,'sub_img_top_left': cv.cvPoint(low_x,low_y) }
Exemplo n.º 33
0
def getData():
	frame = highgui.cvQueryFrame(capture)
	if frame is None:
		return None

	cv.cvSplit(frame, b_img, g_img, r_img, None)
	cv.cvInRangeS(r_img, 150, 255, r_img)
	cv.cvInRangeS(g_img, 0, 100, g_img)
	cv.cvInRangeS(b_img, 0, 100, b_img)

	cv.cvAnd(r_img, g_img, laser_img)
	cv.cvAnd(laser_img, b_img, laser_img)
	cv.cvErode(laser_img,laser_img) #,0,2)
	cv.cvDilate(laser_img,laser_img)
		
	c_count, contours = cv.cvFindContours (laser_img, 
											storage,
											cv.sizeof_CvContour,
											cv.CV_RETR_LIST,
											cv.CV_CHAIN_APPROX_NONE,
											cv.cvPoint (0,0))
	if c_count:
		return returnEllipses(contours)
	else:
		return None
Exemplo n.º 34
0
def naiveBWLineScanner(edgeImageBW, line):
	"""B/W version of the naiveLineScanner"""
	# Get the points from the line
	(p1, p2) = line.getPoints()

	# We check the direction of the line then
	# get the appropriate row or column.
	# XXX: Any smarter way around this problem??
	if p1.y == p2.y:
		# Get the row, defined by y
		dx = 1
		dy = 0
		slice = edgeImageBW[p1.y]
	elif p1.x == p2.x:
		# Get the column, defined by x
		dx = 0
		dy = 1
		slice = edgeImageBW[:,p1.x]
	else:
		raise lib.OrientationException("The orientation is f****d up")

	p = p1
	points = []
	# Now we can traverse every point in the row/column
	for point in slice:
		if not int(point) == 0:
			# Save this point
			points.append(p)
		# Update the coordinate
		p = (cv.cvPoint(p.x + dx, p.y + dy))

	return points
Exemplo n.º 35
0
def on_mouse(event, x, y, flags, param=[]):
    global mouse_selection
    global mouse_origin
    global mouse_select_object
    if event == highgui.CV_EVENT_LBUTTONDOWN:
        print("Mouse down at (%i, %i)" % (x, y))
        mouse_origin = cv.cvPoint(x, y)

        mouse_selection = cv.cvRect(x, y, 0, 0)
        mouse_select_object = True
        return
    if event == highgui.CV_EVENT_LBUTTONUP:
        print("Mouse up at (%i,%i)" % (x, y))
        mouse_select_object = False
        if (mouse_selection.width > 0 and mouse_selection.height > 0):
            global track_object
            track_object = -1
        return
    if mouse_select_object:
        mouse_selection.x = min(x, mouse_origin.x)
        mouse_selection.y = min(y, mouse_origin.y)
        mouse_selection.width = mouse_selection.x + cv.CV_IABS(x -
                                                               mouse_origin.x)
        mouse_selection.height = mouse_selection.y + cv.CV_IABS(y -
                                                                mouse_origin.y)
        mouse_selection.x = max(mouse_selection.x, 0)
        mouse_selection.y = max(mouse_selection.y, 0)
        mouse_selection.width = min(mouse_selection.width, frame.width)
        mouse_selection.height = min(mouse_selection.height, frame.height)
        mouse_selection.width -= mouse_selection.x
        mouse_selection.height -= mouse_selection.y
Exemplo n.º 36
0
def get_sub_image(image, coord, sizex=10, sizey=10, save=True):

    high_x = coord.x + int(round(sizex / 2.0))
    low_x = coord.x - int(sizex / 2.0)
    high_y = coord.y + int(round(sizey / 2.0))
    low_y = coord.y - int(sizey / 2.0)

    if high_x > image.width - 1:
        high_x = image.width - 1
        low_x = image.width - 1 - sizex
    elif low_x < 0:
        low_x = 0
        high_x = sizex - 1

    if high_y > image.height - 1:
        high_y = image.height - 1
        low_y = image.height - 1 - sizey
    elif low_y < 0:
        low_y = 0
        high_y = sizey - 1

    b = image[low_y:high_y, low_x:high_x]

    if save:
        curtime = time.localtime()
        curtime_raw = time.time()
        i = float(100 * (curtime_raw - int(curtime_raw)))
        date_name = time.strftime('%Y%m%d%I%M%S_' + str(i), curtime)
        highgui.cvSaveImage(date_name + 'dot.png', b)

    return {'sub_img': b, 'sub_img_top_left': cv.cvPoint(low_x, low_y)}
Exemplo n.º 37
0
def findContours(image, getPolygon):
    storage = cv.cvCreateMemStorage(0)
    polyContourArray = []
    polyStorage = cv.cvCreateMemStorage(0)
    nb_contours, contours = cv.cvFindContours(image, storage,
                                              cv.sizeof_CvContour,
                                              cv.CV_RETR_TREE,
                                              cv.CV_CHAIN_APPROX_SIMPLE,
                                              cv.cvPoint(0, 0))

    if contours == None:
        return None

    contoursList = list(contours.hrange())
    if not getPolygon:
        ret = contoursList
    else:
        for contour in contoursList:
            per = cvContourPerimeter(contour)
            polyContourArray.append(
                cv.cvApproxPoly(contour, cv.sizeof_CvContour, storage,
                                cv.CV_POLY_APPROX_DP, per / PER_TOLERANCE, 0))
        ret = polyContourArray

    return ret
Exemplo n.º 38
0
def on_mouse(event, x, y, flags, param):

    global select_object, selection, image, origin, select_object, track_object

    if image is None:
        return

    if image.origin:
        y = image.height - y

    if select_object:
        selection.x = min(x, origin.x)
        selection.y = min(y, origin.y)
        selection.width = selection.x + cv.CV_IABS(x - origin.x)
        selection.height = selection.y + cv.CV_IABS(y - origin.y)

        selection.x = max(selection.x, 0)
        selection.y = max(selection.y, 0)
        selection.width = min(selection.width, image.width)
        selection.height = min(selection.height, image.height)
        selection.width -= selection.x
        selection.height -= selection.y

    if event == highgui.CV_EVENT_LBUTTONDOWN:
        origin = cv.cvPoint(x, y)
        selection = cv.cvRect(x, y, 0, 0)
        select_object = 1
    elif event == highgui.CV_EVENT_LBUTTONUP:
        select_object = 0
        if (selection.width > 0 and selection.height > 0):
            track_object = -1
Exemplo n.º 39
0
def findContours(image,getPolygon):
    storage = cv.cvCreateMemStorage (0)
    polyContourArray=[]
    polyStorage=cv.cvCreateMemStorage (0)
    nb_contours, contours = cv.cvFindContours (image,
                                               storage,
                                               cv.sizeof_CvContour,
                                               cv.CV_RETR_TREE,
                                               cv.CV_CHAIN_APPROX_SIMPLE,
                                               cv.cvPoint (0,0))
    
    if contours==None:
        return None
    
    contoursList=list(contours.hrange())
    if not getPolygon:
        ret=contoursList
    else:
        for contour in contoursList:
            per=cvContourPerimeter(contour)
            polyContourArray.append(cv.cvApproxPoly (contour, cv.sizeof_CvContour,
                                    storage,
                                    cv.CV_POLY_APPROX_DP, per/PER_TOLERANCE, 0))
        ret=polyContourArray
    
    return ret
Exemplo n.º 40
0
def draw_ellipse(image, center, axes, angle,
				 start_angle=0.0, end_angle=360.0,
				 color=(255,0,0), thickness=1):
	center = cv.cvPoint(rnd(center[0]), rnd(center[1]))
	axes = cv.cvSize(rnd(axes[0]), rnd(axes[1]))
	color = cv.CV_RGB(color[0], color[1], color[2])
	cv.cvEllipse(image, center, axes, angle, start_angle, end_angle, color, thickness)  
Exemplo n.º 41
0
def illuminate_faces(image):
    changed_image = threshold_image(image)
    faces = face_detector.detectObject(image)
    for face in faces:
        print("Oject found at (x,y) = (%i,%i)" %
              (face.x * face_detector.image_scale,
               face.y * face_detector.image_scale))
        pt1 = cvPoint(int(face.x * face_detector.image_scale),
                      int(face.y * face_detector.image_scale))
        pt2 = cvPoint(
            int((face.x * face_detector.image_scale +
                 face.width * face_detector.image_scale)),
            int((face.y * face_detector.image_scale +
                 face.height * face_detector.image_scale)))
        cvRectangle(changed_image, pt1, pt2, CV_RGB(255, 0, 0), 3, 8, 0)
    return changed_image
def detectObject(image):
    grayscale = cv.cvCreateImage(size, 8, 1)
    cv.cvFlip(image, None, 1)
    cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY)
    storage = cv.cvCreateMemStorage(0)
    cv.cvClearMemStorage(storage)
    cv.cvEqualizeHist(grayscale, grayscale)
    cascade = cv.cvLoadHaarClassifierCascade(haar_file, cv.cvSize(1, 1))
    objects = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2,
                                     cv.CV_HAAR_DO_CANNY_PRUNING,
                                     cv.cvSize(100, 100))

    # Draw dots where hands are
    if objects:
        for i in objects:
            #cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)),
            #               cv.cvPoint(int(i.x+i.width), int(i.y+i.height)),
            #               cv.CV_RGB(0,255,0), 3, 8, 0)
            center = cv.cvPoint(int(i.x + i.width / 2),
                                int(i.y + i.height / 2))
            cv.cvCircle(image, center, 10, cv.CV_RGB(0, 0, 0), 5, 8, 0)
            # Left side check
            if center.x > box_forward_left[
                    0].x and center.x < box_backwards_left[
                        1].x and center.y > box_forward_left[
                            0].y and center.y < box_backwards_left[1].y:
                set_speed('left', center)
            # Right side check
            if center.x > box_forward_right[
                    0].x and center.x < box_backwards_right[
                        1].x and center.y > box_forward_right[
                            0].y and center.y < box_backwards_right[1].y:
                set_speed('right', center)
Exemplo n.º 43
0
    def __init__(self, type_, label, coords, size=None, color=None, parent=None, follow=None):
        Graphic.__init__(self, type_, label, coords, size, color, parent, follow)

        self.__ocv = None
        self.last  = None
        self.diff  = None
        self.orig  = cv.cvPoint( self.x, self.y )
Exemplo n.º 44
0
def printContour(contour, position, color, imagen):
    #print " a contour " +  str(contour.total) + " area: " + str(cvContourArea(contour)) + " perimeter: " + str(cvContourPerimeter(contour))

    # compute the real level of display, given the current position
    levels = position - 3

    # draw contours in red and green
    cv.cvDrawContours(imagen, contour, color, _green, levels,
                      CONTOUR_THICKNESS, cv.CV_AA, cv.cvPoint(0, 0))
Exemplo n.º 45
0
 def LoadPoints(self, ptfile):
     self.points = []
     for line in fileinput.input(ptfile):
         dr = line.strip("\n").strip("\r").split(" ")
         ds = [d.strip(" ") for d in dr]
         dt = [d for d in dr if d != ""]
         x = float(dt[0]) * self.enlarge
         y = float(dt[1]) * self.enlarge
         pt = cv.cvPoint(int(x), int(y))
         self.points.append(pt)
Exemplo n.º 46
0
def matchTemplate(self, template, image):
    '''	matchTemplate(self, template, image):		\
                returns - correlation value of best match (b/w 0 & 1)	\
                top-left coord of template for the best match (cvPoint) \
        '''

    matchResultHeight = image.height - template.height + 1
    matchResultWidth = image.width - template.width + 1

    #print 'matchResultHeight: %d matchResultWidth %d'%(matchResultHeight, matchResultWidth)
    matchResult = cv.cvCreateMat(matchResultHeight, matchResultWidth,
                                 cv.CV_32FC1)
    cv.cvMatchTemplate(image, template, matchResult, cv.CV_TM_CCORR_NORMED)

    min_loc = cv.cvPoint(0, 0)
    max_loc = cv.cvPoint(0, 0)

    min_val, max_val = cv.cvMinMaxLoc(matchResult, min_loc, max_loc)

    return {'image': matchResult, 'max_val': max_val, 'max_loc': max_loc}
Exemplo n.º 47
0
    def DrawKeyPoints(self):
        if (not self.drawimg):
            self.drawimg = cv.cvCloneImage(self.img)

        myfont = cv.cvInitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5)
        ic = 0
        for c in self.points:
            cv.cvPutText(self.drawimg, str(ic), cv.cvPoint(int(c.x), int(c.y)),
                         myfont, cv.cvScalar(255, 255, 0, 0))
            ic += 1
            cv.cvDrawCircle(self.drawimg, c, 4, cv.cvScalar(255, 255, 0, 0))
Exemplo n.º 48
0
def pointeurPrecision(zone_active, framebuffer):
    info_size = []
    centre = []
    for i in range(0, len(zone_active)):
        conteneur = cv.CvConnectedComp()
        info = cv.CvBox2D()
        trouver = cv.cvCamShift(framebuffer, zone_active[i], critere,
                                conteneur, info)
        info_size.append(info.size.height * info.size.width)
        centre.append(cv.cvPoint(int(info.center.x), int(info.center.y)))
        #***** possibilite de recuperer l angle *****#
    return [centre, info_size]
Exemplo n.º 49
0
 def __evenSample(self, npoint):
     totalpoints = 0
     for e in self.edges:
         totalpoints += len(e.points)
     ndist = totalpoints / (0.001 + npoint)
     self.allselected = []
     for c in self.edges:
         c.selected = []
         c.selected.append(self.points[c.start])
         for pi, p in enumerate(c.points):
             if (int((pi + 1) / ndist) - int(pi / ndist)) == 1:
                 c.selected.append(cv.cvPoint(int(p.x), int(p.y)))
         c.selected.append(self.points[c.end])
Exemplo n.º 50
0
def draw_ellipse(image,
                 center,
                 axes,
                 angle,
                 start_angle=0.0,
                 end_angle=360.0,
                 color=(255, 0, 0),
                 thickness=1):
    center = cv.cvPoint(rnd(center[0]), rnd(center[1]))
    axes = cv.cvSize(rnd(axes[0]), rnd(axes[1]))
    color = cv.CV_RGB(color[0], color[1], color[2])
    cv.cvEllipse(image, center, axes, angle, start_angle, end_angle, color,
                 thickness)
Exemplo n.º 51
0
    def detect_face(self, img):
        """ Detect faces within an image, then draw around them.
			The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned 
			for accurate yet slow object detection. For a faster operation on real video 
			images the settings are: 
			scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, 
			min_size=<minimum possible face size
		"""
        min_size = cv.cvSize(20, 20)
        image_scale = 1.3
        haar_scale = 1.2
        min_neighbors = 2
        haar_flags = 0
        gray = cv.cvCreateImage(cv.cvSize(img.width, img.height), 8, 1)
        small_img = cv.cvCreateImage(
            cv.cvSize(cv.cvRound(img.width / image_scale),
                      cv.cvRound(img.height / image_scale)), 8, 1)
        cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY)
        cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR)
        cv.cvEqualizeHist(small_img, small_img)
        cv.cvClearMemStorage(self.storage)

        if (self.cascade):
            t = cv.cvGetTickCount()
            faces = cv.cvHaarDetectObjects(small_img, self.cascade,
                                           self.storage, haar_scale,
                                           min_neighbors, haar_flags, min_size)
            t = cv.cvGetTickCount() - t
            #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.));
            if faces:
                for r in faces:
                    pt1 = cv.cvPoint(int(r.x * image_scale),
                                     int(r.y * image_scale))
                    pt2 = cv.cvPoint(int((r.x + r.width) * image_scale),
                                     int((r.y + r.height) * image_scale))
                    cv.cvRectangle(img, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8,
                                   0)
        return img
Exemplo n.º 52
0
def on_mouse(event, x, y, flags, param):

    # we will use the global pt and add_remove_pt
    global pt
    global add_remove_pt

    if image is None:
        # not initialized, so skip
        return

    if event == highgui.CV_EVENT_LBUTTONDOWN:
        # user has click, so memorize it
        pt = cv.cvPoint(x, y)
        add_remove_pt = True
Exemplo n.º 53
0
def filter_and_render_mixed(image, corners):
    """
    Takes a numpy array of corners and a cvMat image.
    
    """
    n = 15
    footprint = ones((n, n))
    mx = maximum_filter(corners, footprint=footprint)
    local_maxima = (corners == mx) * (corners != zeros(
        corners.shape))  # make sure to remove completly dark points

    points = nonzero(local_maxima)
    del local_maxima

    points = array([points[0], points[1]]).transpose()
    L = []

    for each in points:
        L.append((corners[each[0], each[1]], each[0], each[1], None))
        i = cv.cvPoint(int(each[0]), int(each[1]))
        cv.cvCircle(image, i, 2, cv.CV_RGB(0, 0, 200), 3)

    #cv.cvCvtColor(grayimage, image, cv.CV_GRAY2RGB)
    return image
Exemplo n.º 54
0
    def clear(self):
        cv.cvRectangle(self.buffer, cv.cvPoint(0, 0),
                       cv.cvPoint(self.buffer.width, self.buffer.height),
                       cv.cvScalar(255, 255, 255), cv.CV_FILLED)

        if self.draw_grid:
            line_color = 230
            lc = cv.cvScalar(line_color, line_color, line_color)
            for i in xrange(1, as_int(self.meters_disp) + 3):
                cv.cvCircle(
                    self.buffer,
                    cv.cvPoint(self.w / 2, self.h),
                    as_int(self.pixels_per_meter * (i - .5)),
                    #lc, 1)
                    lc,
                    1,
                    cv.CV_AA)
                cv.cvCircle(
                    self.buffer,
                    cv.cvPoint(self.w / 2, self.h),
                    as_int(self.pixels_per_meter * i),
                    #lc, 1)
                    lc,
                    1,
                    cv.CV_AA)

            for i in xrange(360 / 30):
                x = (self.w / 2) + math.cos(math.radians(
                    i * 30)) * self.pixels_per_meter * (self.meters_disp + 2)
                y = self.h + math.sin(math.radians(
                    i * 30)) * self.pixels_per_meter * (self.meters_disp + 2)
                cv.cvLine(self.buffer, cv.cvPoint(self.w / 2, self.h),
                          cv.cvPoint(as_int(x), as_int(y)), lc, 1, cv.CV_AA)

        if self.draw_center:
            cv.cvCircle(self.buffer, cv.cvPoint(self.w / 2, self.h), 3,
                        cv.cvScalar(0, 0, 200), cv.CV_FILLED, cv.CV_AA)
Exemplo n.º 55
0
 def getCvPoint(self):
     return cv.cvPoint(int(self.x), int(self.y))
Exemplo n.º 56
0
    def detect_squares(self, img):
        """ Find squares within the video stream and draw them """
        N = 11
        thresh = 5
        sz = cv.cvSize(img.width & -2, img.height & -2)
        timg = cv.cvCloneImage(img)
        gray = cv.cvCreateImage(sz, 8, 1)
        pyr = cv.cvCreateImage(cv.cvSize(sz.width / 2, sz.height / 2), 8, 3)
        # create empty sequence that will contain points -
        # 4 points per square (the square's vertices)
        squares = cv.cvCreateSeq(0, cv.sizeof_CvSeq, cv.sizeof_CvPoint,
                                 self.storage)
        squares = cv.CvSeq_CvPoint.cast(squares)

        # select the maximum ROI in the image
        # with the width and height divisible by 2
        subimage = cv.cvGetSubRect(timg, cv.cvRect(0, 0, sz.width, sz.height))

        # down-scale and upscale the image to filter out the noise
        cv.cvPyrDown(subimage, pyr, 7)
        cv.cvPyrUp(pyr, subimage, 7)
        tgray = cv.cvCreateImage(sz, 8, 1)
        # find squares in every color plane of the image
        for c in range(3):
            # extract the c-th color plane
            channels = [None, None, None]
            channels[c] = tgray
            cv.cvSplit(subimage, channels[0], channels[1], channels[2], None)
            for l in range(N):
                # hack: use Canny instead of zero threshold level.
                # Canny helps to catch squares with gradient shading
                if (l == 0):
                    # apply Canny. Take the upper threshold from slider
                    # and set the lower to 0 (which forces edges merging)
                    cv.cvCanny(tgray, gray, 0, thresh, 5)
                    # dilate canny output to remove potential
                    # holes between edge segments
                    cv.cvDilate(gray, gray, None, 1)
                else:
                    # apply threshold if l!=0:
                    #     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
                    cv.cvThreshold(tgray, gray, (l + 1) * 255 / N, 255,
                                   cv.CV_THRESH_BINARY)

                # find contours and store them all as a list
                count, contours = cv.cvFindContours(gray, self.storage,
                                                    cv.sizeof_CvContour,
                                                    cv.CV_RETR_LIST,
                                                    cv.CV_CHAIN_APPROX_SIMPLE,
                                                    cv.cvPoint(0, 0))

                if not contours:
                    continue

                # test each contour
                for contour in contours.hrange():
                    # approximate contour with accuracy proportional
                    # to the contour perimeter
                    result = cv.cvApproxPoly(
                        contour, cv.sizeof_CvContour, self.storage,
                        cv.CV_POLY_APPROX_DP,
                        cv.cvContourPerimeter(contours) * 0.02, 0)
                    # square contours should have 4 vertices after approximation
                    # relatively large area (to filter out noisy contours)
                    # and be convex.
                    # Note: absolute value of an area is used because
                    # area may be positive or negative - in accordance with the
                    # contour orientation
                    if (result.total == 4
                            and abs(cv.cvContourArea(result)) > 1000
                            and cv.cvCheckContourConvexity(result)):
                        s = 0
                        for i in range(5):
                            # find minimum angle between joint
                            # edges (maximum of cosine)
                            if (i >= 2):
                                t = abs(
                                    self.squares_angle(result[i],
                                                       result[i - 2],
                                                       result[i - 1]))
                                if s < t:
                                    s = t
                        # if cosines of all angles are small
                        # (all angles are ~90 degree) then write quandrange
                        # vertices to resultant sequence
                        if (s < 0.3):
                            for i in range(4):
                                squares.append(result[i])

        i = 0
        while i < squares.total:
            pt = []
            # read 4 vertices
            pt.append(squares[i])
            pt.append(squares[i + 1])
            pt.append(squares[i + 2])
            pt.append(squares[i + 3])

            # draw the square as a closed polyline
            cv.cvPolyLine(img, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv.CV_AA, 0)
            i += 4

        return img
Exemplo n.º 57
0
    def timerEvent(self, ev):
        # Fetch a frame from the video camera
        frame = highgui.cvQueryFrame(self.cap)
        img_orig = cv.cvCreateImage(cv.cvSize(frame.width, frame.height),
                                    cv.IPL_DEPTH_8U, frame.nChannels)
        if (frame.origin == cv.IPL_ORIGIN_TL):
            cv.cvCopy(frame, img_orig)
        else:
            cv.cvFlip(frame, img_orig, 0)

        # Create a grey frame to clarify data
        img_grey = cv.cvCreateImage(cv.cvSize(img_orig.width, img_orig.height),
                                    8, 1)
        cv.cvCvtColor(img_orig, img_grey, cv.CV_BGR2GRAY)
        # Detect objects within the frame
        self.faces_storage = cv.cvCreateMemStorage(0)
        faces = self.detect_faces(img_grey)
        self.circles_storage = cv.cvCreateMemStorage(0)
        circles = self.detect_circles(img_grey)
        self.squares_storage = cv.cvCreateMemStorage(0)
        squares = self.detect_squares(img_grey, img_orig)
        self.lines_storage = cv.cvCreateMemStorage(0)
        lines = self.detect_lines(img_grey, img_orig)

        # Draw faces
        if faces:
            for face in faces:
                pt1, pt2 = self.face_points(face)
                cv.cvRectangle(img_orig, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8,
                               0)

        # Draw lines
        if lines:
            for line in lines:
                cv.cvLine(img_orig, line[0], line[1], cv.CV_RGB(255, 255, 0),
                          3, 8)
        # Draw circles
        if circles:
            for circle in circles:
                cv.cvCircle(
                    img_orig,
                    cv.cvPoint(cv.cvRound(circle[0]), cv.cvRound(circle[1])),
                    cv.cvRound(circle[2]), cv.CV_RGB(0, 0, 255), 3, 8, 0)

        # Draw squares
        if squares:
            i = 0
            while i < squares.total:
                pt = []
                # read 4 vertices
                pt.append(squares[i])
                pt.append(squares[i + 1])
                pt.append(squares[i + 2])
                pt.append(squares[i + 3])
                ## draw the square as a closed polyline
                cv.cvPolyLine(img_orig, [pt], 1, cv.CV_RGB(0, 255, 0), 3,
                              cv.CV_AA, 0)
                i += 4

        # Resize the image to display properly within the window
        #	CV_INTER_NN - nearest-neigbor interpolation,
        #	CV_INTER_LINEAR - bilinear interpolation (used by default)
        #	CV_INTER_AREA - resampling using pixel area relation. (preferred for image decimation)
        #	CV_INTER_CUBIC - bicubic interpolation.
        img_display = cv.cvCreateImage(cv.cvSize(self.width(), self.height()),
                                       8, 3)
        cv.cvResize(img_orig, img_display, cv.CV_INTER_NN)
        img_pil = adaptors.Ipl2PIL(img_display)
        s = StringIO()
        img_pil.save(s, "PNG")
        s.seek(0)
        q_img = QImage()
        q_img.loadFromData(s.read())
        bitBlt(self, 0, 0, q_img)
Exemplo n.º 58
0
        # compute the width for each bin do display
        bin_w = histimg.width / hdims

        for i in range(hdims):
            # for all the bins

            # get the value, and scale to the size of the hist image
            val = cv.cvRound(
                cv.cvGetReal1D(hist.bins, i) * histimg.height / 255)

            # compute the color
            color = hsv2rgb(i * 180. / hdims)

            # draw the rectangle in the wanted color
            cv.cvRectangle(histimg, cv.cvPoint(i * bin_w, histimg.height),
                           cv.cvPoint((i + 1) * bin_w, histimg.height - val),
                           color, -1, 8, 0)
        # Make the sweet negative selection box
        if mouse_select_object and mouse_selection.width > 0 and mouse_selection.height > 0:
            a = cv.cvGetSubRect(frame, mouse_selection)
            cv.cvXorS(a, cv.cvScalarAll(255),
                      a)  # Take the negative of the image..
            del a

        # Carry out the histogram tracking...
        if track_object != 0:
            cv.cvInRangeS(hsv, cv.cvScalar(0, smin, min(vmin, vmax), 0),
                          cv.cvScalar(180, 256, max(vmin, vmax), 0), mask)
            cv.cvSplit(hsv, hue, None, None, None)