Esempio n. 1
0
def on_trackbar (position):

    # create the image for putting in it the founded contours
    contours_image = cv.cvCreateImage (cv.cvSize (_SIZE, _SIZE), 8, 3)

    # compute the real level of display, given the current position
    levels = position - 3

    # initialisation
    _contours = contours
    
    if levels <= 0:
        # zero or negative value
        # => get to the nearest face to make it look more funny
        _contours = contours.h_next.h_next.h_next
        
    # first, clear the image where we will draw contours
    cv.cvSetZero (contours_image)
    
    # draw contours in red and green
    cv.cvDrawContours (contours_image, _contours,
                       _red, _green,
                       levels, 3, cv.CV_AA,
                       cv.cvPoint (0, 0))

    # finally, show the image
    highgui.cvShowImage ("contours", contours_image)
def on_trackbar(position):

    # create the image for putting in it the founded contours
    contours_image = cv.cvCreateImage(cv.cvSize(_SIZE, _SIZE), 8, 3)

    # compute the real level of display, given the current position
    levels = position - 3

    # initialisation
    _contours = contours

    if levels <= 0:
        # zero or negative value
        # => get to the nearest face to make it look more funny
        _contours = contours.h_next.h_next.h_next

    # first, clear the image where we will draw contours
    cv.cvSetZero(contours_image)

    # draw contours in red and green
    cv.cvDrawContours(contours_image, _contours, _red, _green, levels, 3,
                      cv.CV_AA, cv.cvPoint(0, 0))

    # finally, show the image
    highgui.cvShowImage("contours", contours_image)
Esempio n. 3
0
def findEdges(original, out, threshold1 = 100, threshold2 = None):
	"""Return a new edge detected image with a specified threshold"""
	warnings.warn("Use findBWEdges instead unless you really need colored edges.", DeprecationWarning)

	#Define threshold2
	if threshold2 == None:
		threshold2 = threshold1 * 3

	# Create two pictures with only one channel for a b/w copy
	# and one for storring the edges found in the b/w picture
	gray = cv.cvCreateImage(cv.cvGetSize(original), 8, 1)
	edge = cv.cvCreateImage(cv.cvGetSize(original), 8, 1)

	# Create the b/w copy of the original
	cv.cvCvtColor(original, gray, cv.CV_BGR2GRAY)

	# Blur the b/w copy, but put the result into edge pic
	cv.cvSmooth(gray, edge, cv.CV_BLUR, 3, 3, 0)

	# Negate the b/w copy of original with newly blurred
	# b/w copy. This will make egdes stand out
	cv.cvNot(gray, edge)

	# Run an edge-finding algorithm called 'Canny'
	# It will analyse the first argument and store the
	# resulting picture in the second argument
	cv.cvCanny(gray, edge, threshold1, threshold2)

	# We initialize our out-image to black
	cv.cvSetZero(out)

	# Finally, we use the found edges, which are b/w, as
	# a mask for copying the colored edges from the original
	# to the out-image
	cv.cvCopy(original, out, edge)
Esempio n. 4
0
File: chroma.py Progetto: bmiro/vpc
def getFilter(frameWidht, frameHeight):    
    cvNamedWindow("Filtred")
    
    cvCreateTrackbar("hmax", "Filtred", getHlsFilter('hmax'), 180, trackBarChangeHmax)
    cvCreateTrackbar("hmin", "Filtred", getHlsFilter('hmin'), 180, trackBarChangeHmin)
    #cvCreateTrackbar("lmax", "Filtred", hlsFilter['lmax'], 255, trackBarChangeLmax)
    #cvCreateTrackbar("lmin", "Filtred", hlsFilter['lmin'], 255, trackBarChangeLmin)
    cvCreateTrackbar("smax", "Filtred", getHlsFilter('smax'), 255, trackBarChangeSmax)
    cvCreateTrackbar("smin", "Filtred", getHlsFilter('smin'), 255, trackBarChangeSmin)

    cvSetMouseCallback("Filtred", mouseClick, None)
    
    frame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    hlsFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)
    filtredFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 3)

    mask = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)

    hFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    lFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    sFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    
    ThHFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    ThLFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    ThSFrame = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1)
    
    key = -1
    while key == -1: 
        if not cvGrabFrame(CAM):
            print "Could not grab a frame"
            exit
        frame = cvQueryFrame(CAM)
        
        cvCvtColor(frame, hlsFrame, CV_BGR2HLS)
    
        cvSplit(hlsFrame, hFrame, lFrame, sFrame, None)
        
        pixelInRange(hFrame, getHlsFilter('hmin'), getHlsFilter('hmax'), 0, 180, ThHFrame) 
        #pixelInRange(lFrame, getHlsFilter('lmin'), getHlsFilter('lmax'), 0, 255, ThLFrame)
        pixelInRange(sFrame, getHlsFilter('smin'), getHlsFilter('smax'), 0, 255, ThSFrame)
        
        cvSetZero(mask)        
        cvAnd(ThHFrame, ThSFrame, mask)
        
        cvSetZero(filtredFrame)
        
        cvCopy(frame, filtredFrame, mask)
        
        cvShowImage("Filtred", filtredFrame)

        key = cvWaitKey(10)
        if key == 'r':
            key = -1
            resetHlsFilter()
            
    cvDestroyWindow("Filtred")    
Esempio n. 5
0
def main(): # ctrl+c to end
    global h,s,v,h2,v2,s2,d,e
    highgui.cvNamedWindow("Camera 1", 1)
    highgui.cvNamedWindow("Orig", 1)
    highgui.cvCreateTrackbar("H", "Camera 1", h, 256, tb_h)
    highgui.cvCreateTrackbar("S", "Camera 1", s, 256, tb_s)
    highgui.cvCreateTrackbar("V", "Camera 1", v, 256, tb_v)
    highgui.cvCreateTrackbar("H2", "Camera 1", h2, 256, tb_h2)
    highgui.cvCreateTrackbar("S2", "Camera 1", s2, 256, tb_s2)
    highgui.cvCreateTrackbar("V2", "Camera 1", v2, 256, tb_v2)
    highgui.cvCreateTrackbar("Dilate", "Camera 1", d, 30, tb_d)
    highgui.cvCreateTrackbar("Erode", "Camera 1", e, 30, tb_e)
    
    cap = highgui.cvCreateCameraCapture(1)
    highgui.cvSetCaptureProperty(cap, highgui.CV_CAP_PROP_FRAME_WIDTH, IMGW)
    highgui.cvSetCaptureProperty(cap, highgui.CV_CAP_PROP_FRAME_HEIGHT, IMGH)
    c = 0
    t1 = tdraw = time.clock()
    t = 1
    font = cv.cvInitFont(cv.CV_FONT_HERSHEY_PLAIN, 1, 1)
    while c != 0x27:
        image = highgui.cvQueryFrame(cap)
        if not image:
            print "capture failed"
            break
            
        thresh = cv.cvCreateImage(cv.cvSize(IMGW,IMGH),8,1)
        cv.cvSetZero(thresh)
        cv.cvCvtColor(image,image,cv.CV_RGB2HSV)
        cv.cvInRangeS(image, (h,s,v,0), (h2,s2,v2,0), thresh)
        result = cv.cvCreateImage(cv.cvSize(IMGW,IMGH),8,3)
        cv.cvSetZero(result)
        
        cv.cvOr(image,image,result,thresh)
        for i in range(1,e):
            cv.cvErode(result,result)
        for i in range(1,d):
            cv.cvDilate(result,result)
            
        # floodfill objects back in, allowing threshold differences outwards
        
        t2 = time.clock()
        if t2 > tdraw+0.3:
            t = t2-t1
            tdraw=t2
        cv.cvPutText(result, "FPS: " + str(1 / (t)), (0,25), font, (255,255,255))
        t1 = t2
        highgui.cvShowImage("Orig", image)
        highgui.cvShowImage("Camera 1", result)
        c = highgui.cvWaitKey(10)
Esempio n. 6
0
def on_trackbar (position):

    cv.cvSmooth (gray, edge, cv.CV_BLUR, 3, 3, 0)
    cv.cvNot (gray, edge)

    # run the edge dector on gray scale
    cv.cvCanny (gray, edge, position, position * 3, 3)

    # reset
    cv.cvSetZero (col_edge)

    # copy edge points
    cv.cvCopy (image, col_edge, edge)
    
    # show the image
    highgui.cvShowImage (win_name, col_edge)
Esempio n. 7
0
def on_trackbar(position):

    cv.cvSmooth(gray, edge, cv.CV_BLUR, 3, 3, 0)
    cv.cvNot(gray, edge)

    # run the edge dector on gray scale
    cv.cvCanny(gray, edge, position, position * 3, 3)

    # reset
    cv.cvSetZero(col_edge)

    # copy edge points
    cv.cvCopy(image, col_edge, edge)

    # show the image
    highgui.cvShowImage(win_name, col_edge)
Esempio n. 8
0
def on_trackbar (position):
    #下面两句应该是没什么用的
    cv.cvSmooth (gray, edge, cv.CV_BLUR, 3, 3, 0)    #图像平滑
    cv.cvNot (gray, edge)     #计算数组元素的按位取反
 
    # run the edge dector on gray scale
    cv.cvCanny (gray, edge, position, position * 3, 3)   #采用 Canny 算法做边缘检测
 
    # reset
    cv.cvSetZero (col_edge)   #清空数组
 
    # copy edge points
    cv.cvCopy (image, col_edge, edge)   #参数edge影响拷贝的结果
 
    # show the image
    highgui.cvShowImage (win_name, col_edge)
Esempio n. 9
0
def threshold_image(image, n=[]):
    """Record the first 5 images to get a background, then diff current frame with the last saved frame.
    """
    if len(n) < 5:
        # n[4] will be our background
        # First capture a few images
        n.append(cv.cvCloneMat(image))
        if len(n) == 5:
            # last time here
            # could do averaging here.
            pass
        return image

    original = n[4]
    differenceImage = cv.cvCloneMat(image)
    cv.cvAbsDiff(image, original, differenceImage)
    """The threshold value determines the amount of "Change" required 
    before something will show up"""
    thresholdValue = 50  # 32
    cv.cvThreshold(differenceImage, differenceImage, thresholdValue, 255,
                   cv.CV_THRESH_BINARY)

    # Convert to one channel
    gray = cv.cvCreateImage(cv.cvGetSize(differenceImage), 8, 1)
    cv.cvCvtColor(differenceImage, gray, cv.CV_BGR2GRAY)

    # Use median filter to remove salt and pepper noise.
    cv.cvSmooth(gray, gray, cv.CV_MEDIAN, 15)

    # Dilate and the threshold image
    # It adds a border to the object.
    #cv.cvDilate(gray,gray, None, 9)

    # Add a bit of Blur to the threshold mask
    cv.cvSmooth(gray, gray, cv.CV_GAUSSIAN, 5)

    result = cv.cvCloneMat(image)
    cv.cvSetZero(result)

    cv.cvAnd(image, image, result, gray)
    return result
Esempio n. 10
0
def threshold_image(image, n=[]):
    """Record the first 5 images to get a background, then diff current frame with the last saved frame.
    """
    if len(n) < 5:
        # n[4] will be our background
        # First capture a few images
        n.append(cv.cvCloneMat(image))
        if len(n) == 5:
            # last time here 
            # could do averaging here.
            pass
        return image

    original = n[4]
    differenceImage  = cv.cvCloneMat( image )
    cv.cvAbsDiff( image, original, differenceImage )
    
    """The threshold value determines the amount of "Change" required 
    before something will show up"""
    thresholdValue = 50     # 32 
    cv.cvThreshold( differenceImage, differenceImage, thresholdValue, 255, cv.CV_THRESH_BINARY )
    
    # Convert to one channel
    gray = cv.cvCreateImage( cv.cvGetSize(differenceImage), 8, 1 )
    cv.cvCvtColor( differenceImage, gray, cv.CV_BGR2GRAY )   
    
    # Use median filter to remove salt and pepper noise.
    cv.cvSmooth(gray, gray, cv.CV_MEDIAN, 15)
    
    # Dilate and the threshold image
    # It adds a border to the object.
    #cv.cvDilate(gray,gray, None, 9)
    
    # Add a bit of Blur to the threshold mask
    cv.cvSmooth(gray, gray, cv.CV_GAUSSIAN, 5)
    
    result  = cv.cvCloneMat( image)
    cv.cvSetZero(result)
    
    cv.cvAnd(image,image, result, gray)
    return result
                                      highgui.CV_CAP_PROP_FRAME_HEIGHT,240)
    else:
        # we have an argument on the command line,
        # we can assume this is a file name, so open it
        capture = highgui.cvCreateFileCapture (sys.argv [1])            

    # check that capture device is OK
    if not capture:
        print "Error opening capture device"
        sys.exit (1)
        
    # create an image to put in the histogram
    histimg = cv.cvCreateImage (cv.cvSize (320,240), 8, 3)

    # init the image of the histogram to black
    cv.cvSetZero (histimg)

    # capture the 1st frame to get some propertie on it
    frame = highgui.cvQueryFrame (capture)

    # get some properties of the frame
    frame_size = cv.cvGetSize (frame)

    # compute which selection of the frame we want to monitor
    selection = cv.cvRect (0, 0, frame.width, frame.height)

    # create some images usefull later
    hue = cv.cvCreateImage (frame_size, 8, 1)
    mask = cv.cvCreateImage (frame_size, 8, 1)
    hsv = cv.cvCreateImage (frame_size, 8, 3 )
Esempio n. 12
0
                                     1200)
    else:
        # we have an argument on the command line,
        # we can assume this is a file name, so open it
        capture = highgui.cvCreateFileCapture(sys.argv[1])

    # check that capture device is OK
    if not capture:
        print "Error opening capture device"
        sys.exit(1)

    # create an image to put in the histogram
    histimg = cv.cvCreateImage(cv.cvSize(320, 240), 8, 3)

    # init the image of the histogram to black
    cv.cvSetZero(histimg)

    # capture the 1st frame to get some propertie on it
    frame = highgui.cvQueryFrame(capture)

    # get some properties of the frame
    frame_size = cv.cvGetSize(frame)

    # compute which selection of the frame we want to monitor
    selection = cv.cvRect(0, 0, frame.width, frame.height)

    # create some images usefull later
    hue = cv.cvCreateImage(frame_size, 8, 1)
    mask = cv.cvCreateImage(frame_size, 8, 1)
    hsv = cv.cvCreateImage(frame_size, 8, 3)
    backproject = cv.cvCreateImage(frame_size, 8, 1)
Esempio n. 13
0
grayimage = cv.cvCreateImage(cv.cvGetSize(image), 8, 1)
cannyedges = cv.cvCreateImage(cv.cvGetSize(image), 8, 1)


#convert to grayscale
cv.cvCvtColor(image, grayimage, cv.CV_BGR2GRAY)
#Canny
#Canny(image, edges, threshold1, threshold2, aperture_size=3) = None
#Implements the Canny algorithm for edge detection.
cv.cvCanny(grayimage, cannyedges, 150, 450 , 3)


#This is the line that throws the error
storage = cv.cvCreateMat(50, 1, cv.CV_32FC3)

cv.cvSetZero(storage)

#circles = cv.cvHoughCircles(grayimage, storage, cv.CV_HOUGH_GRADIENT, 2, grayimage.height/4, 150, 40, long(sys.argv[2]), long(sys.argv[3]))
#circles = cv.cvHoughCircles(grayimage, storage, cv.CV_HOUGH_GRADIENT, 1, grayimage.height, 200, 40, long(sys.argv[2]), long(sys.argv[3]))
circles = cv.cvHoughCircles(grayimage, storage, cv.CV_HOUGH_GRADIENT, 1, grayimage.width, 150, 40, long(sys.argv[2]), grayimage.width)

print storage
for i in storage:
    print i[0], i[1], i[2]
    center = cv.cvRound(i[0]), cv.cvRound(i[1])
    radius = cv.cvRound(i[2])
    cv.cvCircle(image, (center), radius, cv.CV_RGB(255, 0, 0), 1, cv.CV_AA, 0 ) 
    cv.cvCircle(image, (center), 10, cv.CV_RGB(255, 0, 0), -1, cv.CV_AA, 0 ) 
    cv.cvCircle(cannyedges, (center), radius, cv.CV_RGB(255, 255, 255), 1, cv.CV_AA, 0 ) 
 #   for c in range(0,3):
        #v = cv.cvRound( cv.cvmGet(storage, i, c) )
Esempio n. 14
0
def main():

    print "OpenCV version: %s (%d, %d, %d)" % (cv.CV_VERSION,
                                               cv.CV_MAJOR_VERSION,
                                               cv.CV_MINOR_VERSION,
                                               cv.CV_SUBMINOR_VERSION)

    # create windows 
    create_and_position_window('Thresholded_HSV_Image', 10, 10)
    create_and_position_window('RGB_VideoFrame', 10+cam_width, 10)

    create_and_position_window('Hue', 10, 10+cam_height)
    create_and_position_window('Saturation', 210, 10+cam_height)
    create_and_position_window('Value', 410, 10+cam_height)
    create_and_position_window('LaserPointer', 0,0)

    capture = setup_camera_capture()

    # create images for the different channels
    h_img = cv.cvCreateImage (cv.cvSize (cam_width,cam_height), 8, 1)
    s_img = cv.cvCreateImage (cv.cvSize (cam_width,cam_height), 8, 1)
    v_img = cv.cvCreateImage (cv.cvSize (cam_width,cam_height), 8, 1)
    laser_img = cv.cvCreateImage (cv.cvSize (cam_width,cam_height), 8, 1)
    cv.cvSetZero(h_img)
    cv.cvSetZero(s_img)
    cv.cvSetZero(v_img)
    cv.cvSetZero(laser_img)

    while True: 
        # 1. capture the current image
        frame = highgui.cvQueryFrame (capture)
        if frame is None:
            # no image captured... end the processing
            break

        hsv_image = cv.cvCloneImage(frame) # temporary copy of the frame
        cv.cvCvtColor(frame, hsv_image, cv.CV_BGR2HSV) # convert to HSV

        # split the video frame into color channels
        cv.cvSplit(hsv_image, h_img, s_img, v_img, None)

        # Threshold ranges of HSV components.
        cv.cvInRangeS(h_img, hmin, hmax, h_img)
        cv.cvInRangeS(s_img, smin, smax, s_img)
        cv.cvInRangeS(v_img, vmin, vmax, v_img)

        # Perform an AND on HSV components to identify the laser!
        cv.cvAnd(h_img, v_img, laser_img)
        # This actually Worked OK for me without using Saturation.
        #cv.cvAnd(laser_img, s_img,laser_img) 

        # Merge the HSV components back together.
        cv.cvMerge(h_img, s_img, v_img, None, hsv_image)

        #-----------------------------------------------------
        # NOTE: default color space in OpenCV is BGR!!
        # we can now display the images 
        highgui.cvShowImage ('Thresholded_HSV_Image', hsv_image)
        highgui.cvShowImage ('RGB_VideoFrame', frame)
        highgui.cvShowImage ('Hue', h_img)
        highgui.cvShowImage ('Saturation', s_img)
        highgui.cvShowImage ('Value', v_img)
        highgui.cvShowImage('LaserPointer', laser_img)

        # handle events
        k = highgui.cvWaitKey (10)

        if k == '\x1b' or k == 'q':
            # user has press the ESC key, so exit
            break
Esempio n. 15
0
        tx = x
        ty = y
        print x, y
        showoriginal = False
    elif event == highgui.CV_EVENT_LBUTTONDOWN:
        showoriginal = True
        print "Orig"
highgui.cvSetMouseCallback("Blob", mousecb, 0)

while True:
    if not fromfile:
        img = highgui.cvQueryFrame(cap0)
        cv.cvCvtColor(img,img,cv.CV_RGB2HSV)
    (h,s,v) = (img[ty][tx][0],img[ty][tx][1],img[ty][tx][2])
    thresh = cv.cvCreateImage(cv.cvSize(IMGW,IMGH),8,1)
    cv.cvSetZero(thresh)
    
    # dynamic floodfill
    import queue
    q = queue.Queue(0)
    q.put({"x":tx,"y":ty})
    thresh[ty][tx] = 255
    while True:
        # move left and right until boundary is hit
        n = q.pop()
        l = n.x - 1
        r = n.x + 1
        y = n.y
        while True:
            if h-20 < img[y][l][0] < h+20:
                thresh[y][l] = 255
Esempio n. 16
0
            prev_grey = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1)
            pyramid = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1)
            prev_pyramid = cv.cvCreateImage (cv.cvGetSize (frame), 8, 1)
            eig = cv.cvCreateImage (cv.cvGetSize (frame), cv.IPL_DEPTH_32F, 1)
            temp = cv.cvCreateImage (cv.cvGetSize (frame), cv.IPL_DEPTH_32F, 1)
            points = [[], []]

        # copy the frame, so we can draw on it
        cv.cvCopy (frame, image)

        # create a grey version of the image
        cv.cvCvtColor (image, grey, cv.CV_BGR2GRAY)

        if night_mode:
            # night mode: only display the points
            cv.cvSetZero (image)

        if need_to_init:
            # we want to search all the good points
            # create the wanted images
            
            # search the good points
            points [1] = cv.cvGoodFeaturesToTrack (
                grey, eig, temp,
                MAX_COUNT,
                quality, min_distance, None, 3, 0, 0.04)
            
            # refine the corner locations
            cv.cvFindCornerSubPix (
                grey,
                points [1],
Esempio n. 17
0
def on_trackbar1(position):
	global pos1 
	global pos2
	global pos3
	global pos4
	global pos5
	global pos6
	global pos7
	global img
	global gray
	global edges
	print
	print position, pos2, pos3, pos4, pos5, pos6, pos7

	temp = cv.cvCloneImage(img)
	gray = cv.cvCreateImage(cv.cvGetSize(temp), 8, 1)	
	edges = cv.cvCreateImage(cv.cvGetSize(temp), 8, 1)
	dst =  cv.cvCreateImage( cv.cvSize(256,256), 8, 3 )
	

	src = cv.cvCloneImage(img)
	src2 = cv.cvCreateImage( cv.cvGetSize(src), 8, 3 );
	cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY)

	cv.cvCanny(gray, edges, position, pos2, 3)
	cv.cvSmooth(edges, edges, cv.CV_GAUSSIAN, 9, 9)

	storage = cv.cvCreateMat(50, 1, cv.CV_32FC3)
	cv.cvSetZero(storage)
	try:
		circles = cv.cvHoughCircles(gray, storage, cv.CV_HOUGH_GRADIENT, 1, float(pos3), float(pos2), float(pos4), long(pos5),long(pos6) )
		#print storage
		for i in storage:
			print "Center: ", i[0], i[1], "  Radius: ", i[2]
			center = cv.cvRound(i[0]), cv.cvRound(i[1])
			radius = cv.cvRound(i[2])
			cv.cvCircle(temp, (center), radius, cv.CV_RGB(255, 0, 0), 1, cv.CV_AA, 0 ) 
			cv.cvCircle(edges, (center), radius, cv.CV_RGB(255, 255, 255), 1, cv.CV_AA, 0 ) 
			if radius > 200:
				print "Circle found over 200 Radius"
				center_crop_topleft = (center[0]-(radius - pos7)), (center[1]-(radius - pos7))
				center_crop_bottomright = (center[0]+(radius - pos7)), (center[1]+(radius - pos7))
				print "crop top left:     ", center_crop_topleft
				print "crop bottom right: ", center_crop_bottomright
				center_crop = cv.cvGetSubRect(src, (center_crop_topleft[0], center_crop_topleft[1] , (center_crop_bottomright[0] - center_crop_topleft[0]), (center_crop_bottomright[1] - center_crop_topleft[1])  ))
				#center_crop = cv.cvGetSubRect(src, (50, 50, radius/2, radius/2))
				cvShowImage( "center_crop", center_crop )
				print "center_crop created"
				

				#mark found circle's center with blue point and blue circle of pos 7 radius
				cv.cvCircle(temp ,(center), 2, cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) 	
				cv.cvCircle(temp ,(center), (radius - pos7), cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) 
				#cvLogPolar(src, dst, (center), 48, CV_INTER_LINEAR	+CV_WARP_FILL_OUTLIERS )
				#this will draw a smaller cirle outlining the center circle				
				#pos7 = int(pos7 /2.5)
				#cv.cvCircle(dst  ,(img_size.width-pos7, 0), 2, cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 )
				#cv.cvLine(dst, (img_size.width-pos7-1, 0), (img_size.width-pos7-1, img_size.height), cv.CV_RGB(0, 0, 255),1,8,0)
				#cvShowImage( "log-polar", dst )
				
				
				#print radius, (radius-pos7)
				
				#cropped = cv.cvCreateImage( (pos7, img_size.height), 8, 3)
				#cropped2 = cv.cvCreateImage( (pos7, img_size.height), 8, 3)
				
				#coin_edge_img = cv.cvGetSubRect(dst, (img_size.width-pos7, 0, pos7 ,img_size.height ))

				#to create the center cropped part of coin
				#img_size = cvGetSize(scr)

				#cvCopy(coin_edge_img, cropped)
				#cvSaveImage("temp.png", cropped)
				#im = Image.open("temp.png").rotate(90)
				#print "pil image size = ", im.size[0], im.size[1]
				#im = im.resize((im.size[0]*2, im.size[1]*2))
				#print "pil image size = ", im.size
				#im.show()
				#im.save("temp2.png")
				cropped2 = highgui.cvLoadImage("temp2.png")
                                #cvShowImage( "cropped", cropped2)

	except:
		print "Exception:", sys.exc_info()[0] 
		print position, pos2, pos3, pos4, pos5, pos6, pos7
		pass

	highgui.cvShowImage("edges", edges)
	#cvShowImage( "log-polar", dst )
	cvShowImage(wname, temp)
Esempio n. 18
0
            image.origin = frame.origin
            grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
            prev_grey = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
            pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
            prev_pyramid = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1)
            points = [[], []]

        # copy the frame, so we can draw on it
        cv.cvCopy(frame, image)

        # create a grey version of the image
        cv.cvCvtColor(image, grey, cv.CV_BGR2GRAY)

        if night_mode:
            # night mode: only display the points
            cv.cvSetZero(image)

        if need_to_init:
            # we want to search all the good points

            # create the wanted images
            eig = cv.cvCreateImage(cv.cvGetSize(grey), 32, 1)
            temp = cv.cvCreateImage(cv.cvGetSize(grey), 32, 1)

            # the default parameters
            quality = 0.01
            min_distance = 10

            # search the good points
            points[1] = cv.cvGoodFeaturesToTrack(grey, eig, temp, MAX_COUNT,
                                                 quality, min_distance, None,