Example #1
0
def depthmatch(x,y,leftimage,rightimage,roi=80,buf=50,baseline=2.7,focal_length=80):
    """depthmatch function
    x,y : (int) pixel position of target in left image
    leftimage, rightimage : (IplImage) stereo images
    roi: (int) region of interest around x,y to use in matching
    buf: (int) buffer outside of a straight horizontal search for a match
    """
    #print "Match",x,y
    info = cv.cvGetSize(leftimage)
    width = info.width
    height = info.height
    centerx = width/2
    centery = height/2
    
    (y1,x1,y2,x2) = (y-roi,x-roi,y+roi,x+roi)
    if y1<0: y1 = 0
    if x1<0: x1 = 0
    if y2>height: y2 = height
    if x2>width: x2 = width
    # copy subregion roi x roi

    template_rect = cv.cvRect(x1,y1,(x2-x1),(y2-y1))
    template = cv.cvGetSubRect(leftimage, template_rect)
    
    #(y3,x3,y4,x4) = (y-roi-buf,x-roi-buf,y+roi+buf,width) # +/- 20 pixels in vertical direction, -20 to the right edge

    (y3,x3,y4,x4) = (y-roi-buf,0,y+roi+buf,x+roi+buf) # +/- buf pixels in vertical direction, +buf to the left edge
    if x3<0: x3 = 0
    if y3<0: y3 = 0
    if x4>=width: x4 = width-1
    if y4>height: y4 = height
    #cv.cvSetImageROI(rightimage, (y3,x3,y4,x4))

    rightsub_rect = cv.cvRect(x3,y3,(x4-x3),(y4-y3))
    rightsub = cv.cvGetSubRect(rightimage, rightsub_rect)
    # result matrix should be (W - w + 1) x (H - h + 1) where WxH are template dimensions, wxh are rightsub dimensions
    W = x4-x3
    H = y4-y3
    w = x2-x1
    h = y2-y1

    resy = (y4-y3)-(y2-y1)+1
    resx = (x4-x3)-(x2-x1)+1

    resultmat = cv.cvCreateImage((resx, resy), 32, 1)
    cv.cvZero(resultmat)
    # match template image in a subportion of rightimage
    cv.cvMatchTemplate(rightsub, template, resultmat, cv.CV_TM_SQDIFF)
    min_val, max_val, min_point, max_point = cv.cvMinMaxLoc(resultmat)
    cv.cvNormalize(resultmat, resultmat, 1, 0, cv.CV_MINMAX)
    depth = plane2point(x-centerx, y-centery, x3+min_point.x+roi-centerx, y3+min_point.y+roi-centery, baseline, focal_length)
    #print "Found match at", min_point.x+x3, min_point.y+y3
    return (depth, (x,y), (x3+min_point.x+roi, y3+min_point.y+roi))
            break

        # mirror the captured image
        cv.cvFlip (frame, None, 1)

        # compute the hsv version of the image 
        cv.cvCvtColor (frame, hsv, cv.CV_BGR2HSV)

        # compute which pixels are in the wanted range
        cv.cvInRangeS (hsv, hsv_min, hsv_max, mask)

        # extract the hue from the hsv array
        cv.cvSplit (hsv, hue, None, None, None)

        # select the rectangle of interest in the hue/mask arrays
        hue_roi = cv.cvGetSubRect (hue, selection)
        mask_roi = cv.cvGetSubRect (mask, selection)

        # it's time to compute the histogram
        cv.cvCalcHist (hue_roi, hist, 0, mask_roi)

        # extract the min and max value of the histogram
        min_val, max_val = cv.cvGetMinMaxHistValue (hist, None, None)

        # compute the scale factor
        if max_val > 0:
            scale = 255. / max_val
        else:
            scale = 0.

        # scale the histograms
Example #3
0
	def detect_squares(self, img_grey, img_orig):
		""" Find squares within the video stream and draw them """
		cv.cvClearMemStorage(self.faces_storage)
		N										= 11
		thresh									= 5
		sz										= cv.cvSize(img_grey.width & -2, img_grey.height & -2)
		timg									= cv.cvCloneImage(img_orig)
		pyr										= cv.cvCreateImage(cv.cvSize(sz.width/2, sz.height/2), 8, 3)
		# create empty sequence that will contain points -
		# 4 points per square (the square's vertices)
		squares									= cv.cvCreateSeq(0, cv.sizeof_CvSeq, cv.sizeof_CvPoint, self.squares_storage)
		squares									= cv.CvSeq_CvPoint.cast(squares)

		# select the maximum ROI in the image
		# with the width and height divisible by 2
		subimage								= cv.cvGetSubRect(timg, cv.cvRect(0, 0, sz.width, sz.height))

		cv.cvReleaseImage(timg)

		# down-scale and upscale the image to filter out the noise
		cv.cvPyrDown(subimage, pyr, 7)
		cv.cvPyrUp(pyr, subimage, 7)
		cv.cvReleaseImage(pyr)
		tgrey									= cv.cvCreateImage(sz, 8, 1)
		# find squares in every color plane of the image
		for c in range(3):
			# extract the c-th color plane
			channels							= [None, None, None]
			channels[c]							= tgrey
			cv.cvSplit(subimage, channels[0], channels[1], channels[2], None)
			for l in range(N):
				# hack: use Canny instead of zero threshold level.
				# Canny helps to catch squares with gradient shading
				if(l == 0):
					# apply Canny. Take the upper threshold from slider
					# and set the lower to 0 (which forces edges merging)
					cv.cvCanny(tgrey, img_grey, 0, thresh, 5)
					# dilate canny output to remove potential
					# holes between edge segments
					cv.cvDilate(img_grey, img_grey, None, 1)
				else:
					# apply threshold if l!=0:
					#     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
					cv.cvThreshold(tgrey, img_grey, (l+1)*255/N, 255, cv.CV_THRESH_BINARY)

				# find contours and store them all as a list
				count, contours					= cv.cvFindContours(img_grey,
																	self.squares_storage,
																	cv.sizeof_CvContour,
																	cv.CV_RETR_LIST,
																	cv.CV_CHAIN_APPROX_SIMPLE,
																	cv.cvPoint(0,0))

				if not contours:
					continue

				# test each contour
				for contour in contours.hrange():
					# approximate contour with accuracy proportional
					# to the contour perimeter
					result						= cv.cvApproxPoly(contour,
																	cv.sizeof_CvContour,
																	self.squares_storage,
																	cv.CV_POLY_APPROX_DP,
																	cv.cvContourPerimeter(contours)*0.02, 0)
					# square contours should have 4 vertices after approximation
					# relatively large area (to filter out noisy contours)
					# and be convex.
					# Note: absolute value of an area is used because
					# area may be positive or negative - in accordance with the
					# contour orientation
					if(result.total == 4 and abs(cv.cvContourArea(result)) > 1000 and cv.cvCheckContourConvexity(result)):
						s						= 0
						for i in range(5):
							# find minimum angle between joint
							# edges (maximum of cosine)
							if(i >= 2):
								t				= abs(self.squares_angle(result[i], result[i-2], result[i-1]))
								if s<t:
									s			= t
						# if cosines of all angles are small
						# (all angles are ~90 degree) then write quandrange
						# vertices to resultant sequence
						if(s < 0.3):
							for i in range(4):
								squares.append(result[i])

		cv.cvReleaseImage(tgrey)
		return squares
Example #4
0
            break

        # mirror the captured image
        #cv.cvFlip (frame, None, 1)

        # compute the hsv version of the image
        cv.cvCvtColor(frame, hsv, cv.CV_BGR2HSV)

        # compute which pixels are in the wanted range
        cv.cvInRangeS(hsv, hsv_min, hsv_max, mask)

        # extract the hue from the hsv array
        cv.cvSplit(hsv, hue, None, None, None)

        # select the rectangle of interest in the hue/mask arrays
        hue_roi = cv.cvGetSubRect(hue, selection)
        mask_roi = cv.cvGetSubRect(mask, selection)

        # it's time to compute the histogram
        cv.cvCalcHist(hue_roi, hist, 0, mask_roi)

        # extract the min and max value of the histogram
        min_val, max_val, min_idx, max_idx = cv.cvGetMinMaxHistValue(hist)

        # compute the scale factor
        if max_val > 0:
            scale = 255. / max_val
        else:
            scale = 0.

        # scale the histograms
    def detect_squares(self, img):
        """ Find squares within the video stream and draw them """
        N = 11
        thresh = 5
        sz = cv.cvSize(img.width & -2, img.height & -2)
        timg = cv.cvCloneImage(img)
        gray = cv.cvCreateImage(sz, 8, 1)
        pyr = cv.cvCreateImage(cv.cvSize(sz.width / 2, sz.height / 2), 8, 3)
        # create empty sequence that will contain points -
        # 4 points per square (the square's vertices)
        squares = cv.cvCreateSeq(0, cv.sizeof_CvSeq, cv.sizeof_CvPoint,
                                 self.storage)
        squares = cv.CvSeq_CvPoint.cast(squares)

        # select the maximum ROI in the image
        # with the width and height divisible by 2
        subimage = cv.cvGetSubRect(timg, cv.cvRect(0, 0, sz.width, sz.height))

        # down-scale and upscale the image to filter out the noise
        cv.cvPyrDown(subimage, pyr, 7)
        cv.cvPyrUp(pyr, subimage, 7)
        tgray = cv.cvCreateImage(sz, 8, 1)
        # find squares in every color plane of the image
        for c in range(3):
            # extract the c-th color plane
            channels = [None, None, None]
            channels[c] = tgray
            cv.cvSplit(subimage, channels[0], channels[1], channels[2], None)
            for l in range(N):
                # hack: use Canny instead of zero threshold level.
                # Canny helps to catch squares with gradient shading
                if (l == 0):
                    # apply Canny. Take the upper threshold from slider
                    # and set the lower to 0 (which forces edges merging)
                    cv.cvCanny(tgray, gray, 0, thresh, 5)
                    # dilate canny output to remove potential
                    # holes between edge segments
                    cv.cvDilate(gray, gray, None, 1)
                else:
                    # apply threshold if l!=0:
                    #     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
                    cv.cvThreshold(tgray, gray, (l + 1) * 255 / N, 255,
                                   cv.CV_THRESH_BINARY)

                # find contours and store them all as a list
                count, contours = cv.cvFindContours(gray, self.storage,
                                                    cv.sizeof_CvContour,
                                                    cv.CV_RETR_LIST,
                                                    cv.CV_CHAIN_APPROX_SIMPLE,
                                                    cv.cvPoint(0, 0))

                if not contours:
                    continue

                # test each contour
                for contour in contours.hrange():
                    # approximate contour with accuracy proportional
                    # to the contour perimeter
                    result = cv.cvApproxPoly(
                        contour, cv.sizeof_CvContour, self.storage,
                        cv.CV_POLY_APPROX_DP,
                        cv.cvContourPerimeter(contours) * 0.02, 0)
                    # square contours should have 4 vertices after approximation
                    # relatively large area (to filter out noisy contours)
                    # and be convex.
                    # Note: absolute value of an area is used because
                    # area may be positive or negative - in accordance with the
                    # contour orientation
                    if (result.total == 4
                            and abs(cv.cvContourArea(result)) > 1000
                            and cv.cvCheckContourConvexity(result)):
                        s = 0
                        for i in range(5):
                            # find minimum angle between joint
                            # edges (maximum of cosine)
                            if (i >= 2):
                                t = abs(
                                    self.squares_angle(result[i],
                                                       result[i - 2],
                                                       result[i - 1]))
                                if s < t:
                                    s = t
                        # if cosines of all angles are small
                        # (all angles are ~90 degree) then write quandrange
                        # vertices to resultant sequence
                        if (s < 0.3):
                            for i in range(4):
                                squares.append(result[i])

        i = 0
        while i < squares.total:
            pt = []
            # read 4 vertices
            pt.append(squares[i])
            pt.append(squares[i + 1])
            pt.append(squares[i + 2])
            pt.append(squares[i + 3])

            # draw the square as a closed polyline
            cv.cvPolyLine(img, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv.CV_AA, 0)
            i += 4

        return img
Example #6
0
            break

        # mirror the captured image
        cv.cvFlip (frame, None, 1)

        # compute the hsv version of the image 
        cv.cvCvtColor (frame, hsv, cv.CV_BGR2HSV)

        # compute which pixels are in the wanted range
        cv.cvInRangeS (hsv, hsv_min, hsv_max, mask)

        # extract the hue from the hsv array
        cv.cvSplit (hsv, hue, None, None, None)

        # select the rectangle of interest in the hue/mask arrays
        hue_roi = cv.cvGetSubRect (hue, selection)
        mask_roi = cv.cvGetSubRect (mask, selection)

        # it's time to compute the histogram
        cv.cvCalcHist (hue_roi, hist, 0, mask_roi)

        # extract the min and max value of the histogram
        #min_val, max_val = cv.cvGetMinMaxHistValue (hist, None, None)
  	min_val, max_val, min_idx, max_idx = cv.cvGetMinMaxHistValue (hist)

        # compute the scale factor
        if max_val > 0:
            scale = 255. / max_val
        else:
            scale = 0.
Example #7
0
def depthmatch(x,y,leftimage,rightimage,roi=20,buf=10,debug=False):
    __doc__ = """depthmatch function
    x,y : (int) pixel position of target in left image
    leftimage, rightimage : (IplImage) stereo images
    roi: (int) region of interest around x,y to use in matching
    buf: (int) buffer outside of a straight horizontal search for a match
    """
    info = cv.cvGetSize(leftimage)
    width = info.width
    height = info.height

    (y1,x1,y2,x2) = (y-roi,x-roi,y+roi,x+roi)
    #template = cv.cvCreateImage((roi*2,roi*2), 8, 3)
    if y1<0: y1 = 0
    if x1<0: x1 = 0
    if y2>height: y2 = height
    if x2>width: x2 = width
    #cv.cvSetZero(template)
    # copy subregion roi x roi

    template_rect = cv.cvRect(x1,y1,(x2-x1),(y2-y1))
    template = cv.cvGetSubRect(leftimage, template_rect)
    (y3,x3,y4,x4) = (y-roi-buf,x-roi-buf,y+roi+buf,width) # +/- 20 pixels in vertical direction, -20 to the right edge
    if x3<0: x3 = 0
    if y3<0: y3 = 0
    if x4>=width: x4 = width-1
    if y4>height: y4 = height
    #cv.cvSetImageROI(rightimage, (y3,x3,y4,x4))

    rightsub_rect = cv.cvRect(x3,y3,(x4-x3),(y4-y3))
    rightsub = cv.cvGetSubRect(rightimage, rightsub_rect)
    # result matrix should be (W - w + 1) x (H - h + 1) where WxH are template dimensions, wxh are rightsub dimensions
    W = x4-x3
    H = y4-y3
    w = x2-x1
    h = y2-y1

    resy = (y4-y3)-(y2-y1)+1
    resx = (x4-x3)-(x2-x1)+1

    resultmat = cv.cvCreateImage((resx, resy), 32, 1)
    cv.cvZero(resultmat)
    # match template image in a subportion of rightimage
    cv.cvMatchTemplate(rightsub, template, resultmat, cv.CV_TM_SQDIFF)
    min_val, max_val, min_point, max_point = cv.cvMinMaxLoc(resultmat)
    cv.cvNormalize(resultmat, resultmat, 1, 0, cv.CV_MINMAX)
    depth = stereo.depth(x, x3+min_point.x, max_pixels=width/2)
    
    if debug:
        print "Input image: %ix%i, target: (%i,%i)" % (width,height,x,y)
        print "Template box: (%i,%i) to (%i,%i)" % (x1, y1, x2, y2)
        print "Search area: (%i,%i) to (%i,%i)" % (x3, y3, x4, y4)
        print "%ix%i, %ix%i" % (W,H,w,h)
        print "Result matrix %ix%i" % (resx, resy)
        print "stereo.depth(%i,%i,max_pixels=%i)" % (x, min_point.x+x3,width/2)
        if depth[0]:
            print "Depth: ", depth[0], "(cm)"
        #cv.cvRectangle(rightimage, cv.cvPoint(x1,y1), cv.cvPoint(x2,y2), (255,0,0))
        cv.cvRectangle(rightimage, cv.cvPoint(min_point.x+x3,min_point.y+y3), cv.cvPoint(min_point.x+x3+roi*2,min_point.y+y3+roi*2), (0,255,0))
        cv.cvRectangle(rightimage, cv.cvPoint(x3,y3), cv.cvPoint(x4,y4), (0,0,255))
        cv.cvRectangle(leftimage, cv.cvPoint(x1,y1), cv.cvPoint(x2,y2), (255,0,0))
        #cv.cvRectangle(leftimage, cv.cvPoint(min_point.x+x3,min_point.y+y3), cv.cvPoint(min_point.x+x3+roi*2,min_point.y+y3+roi*2), (0,255,0))
        cv.cvRectangle(leftimage, cv.cvPoint(x3,y3), cv.cvPoint(x4,y4), (0,0,255))
        if depth[0]:
            cv.cvPutText(leftimage, "%5f(cm)" % depth[0], (x1,y1), font, (255,255,255))
        highgui.cvShowImage("depthmatch - template", template)
        highgui.cvShowImage("depthmatch - match", resultmat)
        highgui.cvShowImage("depthmatch - right", rightimage)
        highgui.cvShowImage("depthmatch - left", leftimage)
Example #8
0
                  cv.CV_RGB(0,255,0), 2, 8, 0 )
        
        if track_object:
            _vmin = vmin
            _vmax = vmax

            cv.cvInRangeS( hsv,
                           cv.cvScalar(  0, smin,min(_vmin,_vmax),0),
                           cv.cvScalar(180, 256, max(_vmin,_vmax),0),
                           mask );

            cv.cvSplit( hsv, hue, None, None, None)

            if track_object < 0:
                max_val = 0.0                
                subhue = cv.cvGetSubRect(hue, selection)
                submask = cv.cvGetSubRect(mask, selection)
                cv.cvCalcHist( subhue, hist, 0, submask )
                
                # extract the min and max value of the histogram
                min_val, max_val, min_idx, max_idx = cv.cvGetMinMaxHistValue (hist)
                
                if (max_val):
                    cv.cvConvertScale( hist.bins, hist.bins, 255.0 / max_val, 0)
                else:
                    cv.cvConvertScale( hist.bins, hist.bins, 0.0, 0 )

                track_window = selection
                track_object = 1

Example #9
0
            break

        # mirror the captured image
        #cv.cvFlip (frame, None, 1)

        # compute the hsv version of the image 
        cv.cvCvtColor (frame, hsv, cv.CV_BGR2HSV)

        # compute which pixels are in the wanted range
        cv.cvInRangeS (hsv, hsv_min, hsv_max, mask)

        # extract the hue from the hsv array
        cv.cvSplit (hsv, hue, None, None, None)

        # select the rectangle of interest in the hue/mask arrays
        hue_roi = cv.cvGetSubRect (hue, selection)
        mask_roi = cv.cvGetSubRect (mask, selection)

        # it's time to compute the histogram
        cv.cvCalcHist (hue_roi, hist, 0, mask_roi)

        # extract the min and max value of the histogram
        min_val, max_val, min_idx, max_idx = cv.cvGetMinMaxHistValue (hist)

        # compute the scale factor
        if max_val > 0:
            scale = 255. / max_val
        else:
            scale = 0.

        # scale the histograms
Example #10
0
def compute_saliency(image):
    global thresh
    global scale
    saliency_scale = int(math.pow(2,scale));
    bw_im1 = cv.cvCreateImage(cv.cvGetSize(image), cv.IPL_DEPTH_8U,1)
    cv.cvCvtColor(image, bw_im1, cv.CV_BGR2GRAY)
    bw_im = cv.cvCreateImage(cv.cvSize(saliency_scale,saliency_scale), cv.IPL_DEPTH_8U,1)
    cv.cvResize(bw_im1, bw_im)
    highgui.cvShowImage("BW", bw_im)
    realInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 1);
    imaginaryInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 1);
    complexInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 2);

    cv.cvScale(bw_im, realInput, 1.0, 0.0);
    cv.cvZero(imaginaryInput);
    cv.cvMerge(realInput, imaginaryInput, None, None, complexInput);

    dft_M = saliency_scale #cv.cvGetOptimalDFTSize( bw_im.height - 1 );
    dft_N = saliency_scale #cv.cvGetOptimalDFTSize( bw_im.width - 1 );

    dft_A = cv.cvCreateMat( dft_M, dft_N, cv.CV_32FC2 );
    image_Re = cv.cvCreateImage( cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);
    image_Im = cv.cvCreateImage( cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);

    # copy A to dft_A and pad dft_A with zeros
    tmp = cv.cvGetSubRect( dft_A, cv.cvRect(0,0, bw_im.width, bw_im.height));
    cv.cvCopy( complexInput, tmp, None );
    if(dft_A.width > bw_im.width):
        tmp = cv.cvGetSubRect( dft_A, cv.cvRect(bw_im.width,0, dft_N - bw_im.width, bw_im.height));
        cv.cvZero( tmp );
    
    cv.cvDFT( dft_A, dft_A, cv.CV_DXT_FORWARD, complexInput.height );
    cv.cvSplit( dft_A, image_Re, image_Im, None, None );
    
    # Compute the phase angle 
    image_Mag = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);
    image_Phase = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);
    

    #compute the phase of the spectrum
    cv.cvCartToPolar(image_Re, image_Im, image_Mag, image_Phase, 0)

    log_mag = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);
    cv.cvLog(image_Mag, log_mag)
    #Box filter the magnitude, then take the difference
    image_Mag_Filt = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);
    filt = cv.cvCreateMat(3,3, cv.CV_32FC1);
    cv.cvSet(filt,cv.cvScalarAll(-1.0/9.0))
    cv.cvFilter2D(log_mag, image_Mag_Filt, filt, cv.cvPoint(-1,-1))

    cv.cvAdd(log_mag, image_Mag_Filt, log_mag, None)
    cv.cvExp(log_mag, log_mag)
    cv.cvPolarToCart(log_mag, image_Phase, image_Re, image_Im,0);

    cv.cvMerge(image_Re, image_Im, None, None, dft_A)
    cv.cvDFT( dft_A, dft_A, cv.CV_DXT_INVERSE, complexInput.height)
            
    tmp = cv.cvGetSubRect( dft_A, cv.cvRect(0,0, bw_im.width, bw_im.height));
    cv.cvCopy( tmp, complexInput, None );
    cv.cvSplit(complexInput, realInput, imaginaryInput, None, None)
    min, max = cv.cvMinMaxLoc(realInput);
    #cv.cvScale(realInput, realInput, 1.0/(max-min), 1.0*(-min)/(max-min));
    cv.cvSmooth(realInput, realInput);
    threshold = thresh/100.0*cv.cvAvg(realInput)[0]
    cv.cvThreshold(realInput, realInput, threshold, 1.0, cv.CV_THRESH_BINARY)
    tmp_img = cv.cvCreateImage(cv.cvGetSize(bw_im1),cv.IPL_DEPTH_32F, 1)
    cv.cvResize(realInput,tmp_img)
    cv.cvScale(tmp_img, bw_im1, 255,0)
    return bw_im1
Example #11
0
        cv.cvLine(image, cv.cvPoint(image.width / 2, 0),
                  cv.cvPoint(image.width / 2, image.height),
                  cv.CV_RGB(0, 255, 0), 2, 8, 0)

        if track_object:
            _vmin = vmin
            _vmax = vmax

            cv.cvInRangeS(hsv, cv.cvScalar(0, smin, min(_vmin, _vmax), 0),
                          cv.cvScalar(180, 256, max(_vmin, _vmax), 0), mask)

            cv.cvSplit(hsv, hue, None, None, None)

            if track_object < 0:
                max_val = 0.0
                subhue = cv.cvGetSubRect(hue, selection)
                submask = cv.cvGetSubRect(mask, selection)
                cv.cvCalcHist(subhue, hist, 0, submask)

                # extract the min and max value of the histogram
                min_val, max_val, min_idx, max_idx = cv.cvGetMinMaxHistValue(
                    hist)

                if (max_val):
                    cv.cvConvertScale(hist.bins, hist.bins, 255.0 / max_val, 0)
                else:
                    cv.cvConvertScale(hist.bins, hist.bins, 0.0, 0)

                track_window = selection
                track_object = 1
Example #12
0
def on_trackbar1(position):
	global pos1 
	global pos2
	global pos3
	global pos4
	global pos5
	global pos6
	global pos7
	global img
	global gray
	global edges
	print
	print position, pos2, pos3, pos4, pos5, pos6, pos7

	temp = cv.cvCloneImage(img)
	gray = cv.cvCreateImage(cv.cvGetSize(temp), 8, 1)	
	edges = cv.cvCreateImage(cv.cvGetSize(temp), 8, 1)
	dst =  cv.cvCreateImage( cv.cvSize(256,256), 8, 3 )
	

	src = cv.cvCloneImage(img)
	src2 = cv.cvCreateImage( cv.cvGetSize(src), 8, 3 );
	cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY)

	cv.cvCanny(gray, edges, position, pos2, 3)
	cv.cvSmooth(edges, edges, cv.CV_GAUSSIAN, 9, 9)

	storage = cv.cvCreateMat(50, 1, cv.CV_32FC3)
	cv.cvSetZero(storage)
	try:
		circles = cv.cvHoughCircles(gray, storage, cv.CV_HOUGH_GRADIENT, 1, float(pos3), float(pos2), float(pos4), long(pos5),long(pos6) )
		#print storage
		for i in storage:
			print "Center: ", i[0], i[1], "  Radius: ", i[2]
			center = cv.cvRound(i[0]), cv.cvRound(i[1])
			radius = cv.cvRound(i[2])
			cv.cvCircle(temp, (center), radius, cv.CV_RGB(255, 0, 0), 1, cv.CV_AA, 0 ) 
			cv.cvCircle(edges, (center), radius, cv.CV_RGB(255, 255, 255), 1, cv.CV_AA, 0 ) 
			if radius > 200:
				print "Circle found over 200 Radius"
				center_crop_topleft = (center[0]-(radius - pos7)), (center[1]-(radius - pos7))
				center_crop_bottomright = (center[0]+(radius - pos7)), (center[1]+(radius - pos7))
				print "crop top left:     ", center_crop_topleft
				print "crop bottom right: ", center_crop_bottomright
				center_crop = cv.cvGetSubRect(src, (center_crop_topleft[0], center_crop_topleft[1] , (center_crop_bottomright[0] - center_crop_topleft[0]), (center_crop_bottomright[1] - center_crop_topleft[1])  ))
				#center_crop = cv.cvGetSubRect(src, (50, 50, radius/2, radius/2))
				cvShowImage( "center_crop", center_crop )
				print "center_crop created"
				

				#mark found circle's center with blue point and blue circle of pos 7 radius
				cv.cvCircle(temp ,(center), 2, cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) 	
				cv.cvCircle(temp ,(center), (radius - pos7), cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) 
				#cvLogPolar(src, dst, (center), 48, CV_INTER_LINEAR	+CV_WARP_FILL_OUTLIERS )
				#this will draw a smaller cirle outlining the center circle				
				#pos7 = int(pos7 /2.5)
				#cv.cvCircle(dst  ,(img_size.width-pos7, 0), 2, cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 )
				#cv.cvLine(dst, (img_size.width-pos7-1, 0), (img_size.width-pos7-1, img_size.height), cv.CV_RGB(0, 0, 255),1,8,0)
				#cvShowImage( "log-polar", dst )
				
				
				#print radius, (radius-pos7)
				
				#cropped = cv.cvCreateImage( (pos7, img_size.height), 8, 3)
				#cropped2 = cv.cvCreateImage( (pos7, img_size.height), 8, 3)
				
				#coin_edge_img = cv.cvGetSubRect(dst, (img_size.width-pos7, 0, pos7 ,img_size.height ))

				#to create the center cropped part of coin
				#img_size = cvGetSize(scr)

				#cvCopy(coin_edge_img, cropped)
				#cvSaveImage("temp.png", cropped)
				#im = Image.open("temp.png").rotate(90)
				#print "pil image size = ", im.size[0], im.size[1]
				#im = im.resize((im.size[0]*2, im.size[1]*2))
				#print "pil image size = ", im.size
				#im.show()
				#im.save("temp2.png")
				cropped2 = highgui.cvLoadImage("temp2.png")
                                #cvShowImage( "cropped", cropped2)

	except:
		print "Exception:", sys.exc_info()[0] 
		print position, pos2, pos3, pos4, pos5, pos6, pos7
		pass

	highgui.cvShowImage("edges", edges)
	#cvShowImage( "log-polar", dst )
	cvShowImage(wname, temp)