Пример #1
0
def depthmatch(x,y,leftimage,rightimage,roi=80,buf=50,baseline=2.7,focal_length=80):
    """depthmatch function
    x,y : (int) pixel position of target in left image
    leftimage, rightimage : (IplImage) stereo images
    roi: (int) region of interest around x,y to use in matching
    buf: (int) buffer outside of a straight horizontal search for a match
    """
    #print "Match",x,y
    info = cv.cvGetSize(leftimage)
    width = info.width
    height = info.height
    centerx = width/2
    centery = height/2
    
    (y1,x1,y2,x2) = (y-roi,x-roi,y+roi,x+roi)
    if y1<0: y1 = 0
    if x1<0: x1 = 0
    if y2>height: y2 = height
    if x2>width: x2 = width
    # copy subregion roi x roi

    template_rect = cv.cvRect(x1,y1,(x2-x1),(y2-y1))
    template = cv.cvGetSubRect(leftimage, template_rect)
    
    #(y3,x3,y4,x4) = (y-roi-buf,x-roi-buf,y+roi+buf,width) # +/- 20 pixels in vertical direction, -20 to the right edge

    (y3,x3,y4,x4) = (y-roi-buf,0,y+roi+buf,x+roi+buf) # +/- buf pixels in vertical direction, +buf to the left edge
    if x3<0: x3 = 0
    if y3<0: y3 = 0
    if x4>=width: x4 = width-1
    if y4>height: y4 = height
    #cv.cvSetImageROI(rightimage, (y3,x3,y4,x4))

    rightsub_rect = cv.cvRect(x3,y3,(x4-x3),(y4-y3))
    rightsub = cv.cvGetSubRect(rightimage, rightsub_rect)
    # result matrix should be (W - w + 1) x (H - h + 1) where WxH are template dimensions, wxh are rightsub dimensions
    W = x4-x3
    H = y4-y3
    w = x2-x1
    h = y2-y1

    resy = (y4-y3)-(y2-y1)+1
    resx = (x4-x3)-(x2-x1)+1

    resultmat = cv.cvCreateImage((resx, resy), 32, 1)
    cv.cvZero(resultmat)
    # match template image in a subportion of rightimage
    cv.cvMatchTemplate(rightsub, template, resultmat, cv.CV_TM_SQDIFF)
    min_val, max_val, min_point, max_point = cv.cvMinMaxLoc(resultmat)
    cv.cvNormalize(resultmat, resultmat, 1, 0, cv.CV_MINMAX)
    depth = plane2point(x-centerx, y-centery, x3+min_point.x+roi-centerx, y3+min_point.y+roi-centery, baseline, focal_length)
    #print "Found match at", min_point.x+x3, min_point.y+y3
    return (depth, (x,y), (x3+min_point.x+roi, y3+min_point.y+roi))
Пример #2
0
	def read(self):

		raw_thresh = self.thresh.read()

		cvt_red = cv.cvCreateImage(cv.cvSize(raw_thresh.width,raw_thresh.height),raw_thresh.depth,1)
		cv.cvSplit(raw_thresh,cvt_red,None,None,None)
		cvpt_min = cv.cvPoint(0,0)
		cvpt_max = cv.cvPoint(0,0)
		t = cv.cvMinMaxLoc(cvt_red,cvpt_min,cvpt_max)

		if cvpt_max.x == 0 and cvpt_max.y == 0 :
			return []
		return [(cvpt_max.x,cvpt_max.y)]
Пример #3
0
	def read(self):
		src = self.camera.read()
		thresh = self.thresh2pg.read()
		red = self.red2pg.read()
		raw_thresh = self.thresh.read()

		cvt_red = cv.cvCreateImage(cv.cvSize(raw_thresh.width,raw_thresh.height),raw_thresh.depth,1)
		cv.cvSplit(raw_thresh,cvt_red,None,None,None)
		cvpt_min = cv.cvPoint(0,0)
		cvpt_max = cv.cvPoint(0,0)
		t = cv.cvMinMaxLoc(cvt_red,cvpt_min,cvpt_max)

		return src,thresh,red,(cvpt_max.x,cvpt_max.y)
Пример #4
0
def matchTemplate(self, template, image):
        '''	matchTemplate(self, template, image):		\
                returns - correlation value of best match (b/w 0 & 1)	\
                top-left coord of template for the best match (cvPoint) \
        '''

        matchResultHeight = image.height-template.height+1
        matchResultWidth = image.width-template.width+1

        #print 'matchResultHeight: %d matchResultWidth %d'%(matchResultHeight, matchResultWidth)
        matchResult = cv.cvCreateMat(matchResultHeight, matchResultWidth, cv.CV_32FC1)
        cv.cvMatchTemplate(image, template, matchResult, cv.CV_TM_CCORR_NORMED)

        min_loc = cv.cvPoint(0,0)
        max_loc = cv.cvPoint(0,0)

        min_val, max_val = cv.cvMinMaxLoc(matchResult, min_loc, max_loc)

        return {'image': matchResult , 'max_val':max_val, 'max_loc':max_loc}
Пример #5
0
def matchTemplate(self, template, image):
    '''	matchTemplate(self, template, image):		\
                returns - correlation value of best match (b/w 0 & 1)	\
                top-left coord of template for the best match (cvPoint) \
        '''

    matchResultHeight = image.height - template.height + 1
    matchResultWidth = image.width - template.width + 1

    #print 'matchResultHeight: %d matchResultWidth %d'%(matchResultHeight, matchResultWidth)
    matchResult = cv.cvCreateMat(matchResultHeight, matchResultWidth,
                                 cv.CV_32FC1)
    cv.cvMatchTemplate(image, template, matchResult, cv.CV_TM_CCORR_NORMED)

    min_loc = cv.cvPoint(0, 0)
    max_loc = cv.cvPoint(0, 0)

    min_val, max_val = cv.cvMinMaxLoc(matchResult, min_loc, max_loc)

    return {'image': matchResult, 'max_val': max_val, 'max_loc': max_loc}
Пример #6
0
	def read(self):

		frame = self.input.read()
		if self.debug :
			raw_frame = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,frame.nChannels)
			cv.cvCopy(frame,raw_frame,None)
			self.raw_frame_surface=pygame.image.frombuffer(frame.imageData,(frame.width,frame.height),'RGB')

		if self.enabled :

			cvt_red = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),frame.depth,1)
			cv.cvSplit(frame,None,None,cvt_red,None)

			if self.debug :
				red_frame = cv.cvCreateImage(cv.cvSize(cvt_red.width,cvt_red.height),cvt_red.depth,3)
				cv.cvMerge(cvt_red,None,None,None,red_frame)
				self.red_frame_surface = pygame.image.frombuffer(red_frame.imageData,(cvt_red.width,cvt_red.height),'RGB')

			# I think these functions are too specialized for transforms
			cv.cvSmooth(cvt_red,cvt_red,cv.CV_GAUSSIAN,3, 0, 0, 0 )
			cv.cvErode(cvt_red, cvt_red, None, 1)
			cv.cvDilate(cvt_red, cvt_red, None, 1)

			if self.debug :
				thresh_frame = cv.cvCreateImage(cv.cvSize(cvt_red.width,cvt_red.height),cvt_red.depth,3)
				cv.cvMerge(cvt_red,None,None,None,thresh_frame)
				self.thresh_frame_surface = pygame.image.frombuffer(cvt_red.imageData,(cvt_red.width,cvt_red.height),'RGB')

			cvpt_min = cv.cvPoint(0,0)
			cvpt_max = cv.cvPoint(0,0)
			t = cv.cvMinMaxLoc(cvt_red,cvpt_min,cvpt_max)

			print t
			if cvpt_max.x == 0 and cvpt_max.y == 0 :
				return []
			return [(cvpt_max.x,cvpt_max.y)]
Пример #7
0
def depthmatch(x,y,leftimage,rightimage,roi=20,buf=10,debug=False):
    __doc__ = """depthmatch function
    x,y : (int) pixel position of target in left image
    leftimage, rightimage : (IplImage) stereo images
    roi: (int) region of interest around x,y to use in matching
    buf: (int) buffer outside of a straight horizontal search for a match
    """
    info = cv.cvGetSize(leftimage)
    width = info.width
    height = info.height

    (y1,x1,y2,x2) = (y-roi,x-roi,y+roi,x+roi)
    #template = cv.cvCreateImage((roi*2,roi*2), 8, 3)
    if y1<0: y1 = 0
    if x1<0: x1 = 0
    if y2>height: y2 = height
    if x2>width: x2 = width
    #cv.cvSetZero(template)
    # copy subregion roi x roi

    template_rect = cv.cvRect(x1,y1,(x2-x1),(y2-y1))
    template = cv.cvGetSubRect(leftimage, template_rect)
    (y3,x3,y4,x4) = (y-roi-buf,x-roi-buf,y+roi+buf,width) # +/- 20 pixels in vertical direction, -20 to the right edge
    if x3<0: x3 = 0
    if y3<0: y3 = 0
    if x4>=width: x4 = width-1
    if y4>height: y4 = height
    #cv.cvSetImageROI(rightimage, (y3,x3,y4,x4))

    rightsub_rect = cv.cvRect(x3,y3,(x4-x3),(y4-y3))
    rightsub = cv.cvGetSubRect(rightimage, rightsub_rect)
    # result matrix should be (W - w + 1) x (H - h + 1) where WxH are template dimensions, wxh are rightsub dimensions
    W = x4-x3
    H = y4-y3
    w = x2-x1
    h = y2-y1

    resy = (y4-y3)-(y2-y1)+1
    resx = (x4-x3)-(x2-x1)+1

    resultmat = cv.cvCreateImage((resx, resy), 32, 1)
    cv.cvZero(resultmat)
    # match template image in a subportion of rightimage
    cv.cvMatchTemplate(rightsub, template, resultmat, cv.CV_TM_SQDIFF)
    min_val, max_val, min_point, max_point = cv.cvMinMaxLoc(resultmat)
    cv.cvNormalize(resultmat, resultmat, 1, 0, cv.CV_MINMAX)
    depth = stereo.depth(x, x3+min_point.x, max_pixels=width/2)
    
    if debug:
        print "Input image: %ix%i, target: (%i,%i)" % (width,height,x,y)
        print "Template box: (%i,%i) to (%i,%i)" % (x1, y1, x2, y2)
        print "Search area: (%i,%i) to (%i,%i)" % (x3, y3, x4, y4)
        print "%ix%i, %ix%i" % (W,H,w,h)
        print "Result matrix %ix%i" % (resx, resy)
        print "stereo.depth(%i,%i,max_pixels=%i)" % (x, min_point.x+x3,width/2)
        if depth[0]:
            print "Depth: ", depth[0], "(cm)"
        #cv.cvRectangle(rightimage, cv.cvPoint(x1,y1), cv.cvPoint(x2,y2), (255,0,0))
        cv.cvRectangle(rightimage, cv.cvPoint(min_point.x+x3,min_point.y+y3), cv.cvPoint(min_point.x+x3+roi*2,min_point.y+y3+roi*2), (0,255,0))
        cv.cvRectangle(rightimage, cv.cvPoint(x3,y3), cv.cvPoint(x4,y4), (0,0,255))
        cv.cvRectangle(leftimage, cv.cvPoint(x1,y1), cv.cvPoint(x2,y2), (255,0,0))
        #cv.cvRectangle(leftimage, cv.cvPoint(min_point.x+x3,min_point.y+y3), cv.cvPoint(min_point.x+x3+roi*2,min_point.y+y3+roi*2), (0,255,0))
        cv.cvRectangle(leftimage, cv.cvPoint(x3,y3), cv.cvPoint(x4,y4), (0,0,255))
        if depth[0]:
            cv.cvPutText(leftimage, "%5f(cm)" % depth[0], (x1,y1), font, (255,255,255))
        highgui.cvShowImage("depthmatch - template", template)
        highgui.cvShowImage("depthmatch - match", resultmat)
        highgui.cvShowImage("depthmatch - right", rightimage)
        highgui.cvShowImage("depthmatch - left", leftimage)
Пример #8
0
def compute_saliency(image):
    global thresh
    global scale
    saliency_scale = int(math.pow(2,scale));
    bw_im1 = cv.cvCreateImage(cv.cvGetSize(image), cv.IPL_DEPTH_8U,1)
    cv.cvCvtColor(image, bw_im1, cv.CV_BGR2GRAY)
    bw_im = cv.cvCreateImage(cv.cvSize(saliency_scale,saliency_scale), cv.IPL_DEPTH_8U,1)
    cv.cvResize(bw_im1, bw_im)
    highgui.cvShowImage("BW", bw_im)
    realInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 1);
    imaginaryInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 1);
    complexInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 2);

    cv.cvScale(bw_im, realInput, 1.0, 0.0);
    cv.cvZero(imaginaryInput);
    cv.cvMerge(realInput, imaginaryInput, None, None, complexInput);

    dft_M = saliency_scale #cv.cvGetOptimalDFTSize( bw_im.height - 1 );
    dft_N = saliency_scale #cv.cvGetOptimalDFTSize( bw_im.width - 1 );

    dft_A = cv.cvCreateMat( dft_M, dft_N, cv.CV_32FC2 );
    image_Re = cv.cvCreateImage( cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);
    image_Im = cv.cvCreateImage( cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);

    # copy A to dft_A and pad dft_A with zeros
    tmp = cv.cvGetSubRect( dft_A, cv.cvRect(0,0, bw_im.width, bw_im.height));
    cv.cvCopy( complexInput, tmp, None );
    if(dft_A.width > bw_im.width):
        tmp = cv.cvGetSubRect( dft_A, cv.cvRect(bw_im.width,0, dft_N - bw_im.width, bw_im.height));
        cv.cvZero( tmp );
    
    cv.cvDFT( dft_A, dft_A, cv.CV_DXT_FORWARD, complexInput.height );
    cv.cvSplit( dft_A, image_Re, image_Im, None, None );
    
    # Compute the phase angle 
    image_Mag = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);
    image_Phase = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);
    

    #compute the phase of the spectrum
    cv.cvCartToPolar(image_Re, image_Im, image_Mag, image_Phase, 0)

    log_mag = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);
    cv.cvLog(image_Mag, log_mag)
    #Box filter the magnitude, then take the difference
    image_Mag_Filt = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1);
    filt = cv.cvCreateMat(3,3, cv.CV_32FC1);
    cv.cvSet(filt,cv.cvScalarAll(-1.0/9.0))
    cv.cvFilter2D(log_mag, image_Mag_Filt, filt, cv.cvPoint(-1,-1))

    cv.cvAdd(log_mag, image_Mag_Filt, log_mag, None)
    cv.cvExp(log_mag, log_mag)
    cv.cvPolarToCart(log_mag, image_Phase, image_Re, image_Im,0);

    cv.cvMerge(image_Re, image_Im, None, None, dft_A)
    cv.cvDFT( dft_A, dft_A, cv.CV_DXT_INVERSE, complexInput.height)
            
    tmp = cv.cvGetSubRect( dft_A, cv.cvRect(0,0, bw_im.width, bw_im.height));
    cv.cvCopy( tmp, complexInput, None );
    cv.cvSplit(complexInput, realInput, imaginaryInput, None, None)
    min, max = cv.cvMinMaxLoc(realInput);
    #cv.cvScale(realInput, realInput, 1.0/(max-min), 1.0*(-min)/(max-min));
    cv.cvSmooth(realInput, realInput);
    threshold = thresh/100.0*cv.cvAvg(realInput)[0]
    cv.cvThreshold(realInput, realInput, threshold, 1.0, cv.CV_THRESH_BINARY)
    tmp_img = cv.cvCreateImage(cv.cvGetSize(bw_im1),cv.IPL_DEPTH_32F, 1)
    cv.cvResize(realInput,tmp_img)
    cv.cvScale(tmp_img, bw_im1, 255,0)
    return bw_im1
Пример #9
0
	def draw(self,screen) :

		if not self.is_calibrated :
			screen.fill(THECOLORS["white"])
			checkerboard = pygame.image.load('checkerboard.png')
			checkerboard = pygame.transform.scale(checkerboard,self.dims)
			screen.blit(checkerboard,(0,0))
			pygame.display.flip()
			corners = self.lazer._calibrate_camera()
			self.is_calibrated = True
		else :
			#frame = self.lazer.get_curr_frame()
			#screen.blit(frame,(0,0))
			pass

			"""
			pt = self.input.get_com()
			if pt is None : pt = (0,0)
			if self.blink_phase == 0 :
				self.pointer_sprite.update(pt)
			else :
				self.blink_phase -= 1
				self.pointer_sprite.update(None)

			if self.pointer_sprite.on :
				self.curr_line.append(pt)
			else :
				self.curr_line = []
				self.lines.append(self.curr_line)

			if self.pointer_sprite.clicked :
				self.button.click()

			for l in self.lines :
				if len(l) > 1 :
					pygame.draw.aalines(screen,THECOLORS["black"],False,l,1)
			"""

			# don't try this at home, kids
			frame = self.lazer._get_cv_frame()
			thresh = 248
			red = self.lazer._get_color_frame('r',frame)
			cvpt_min = cv.cvPoint(0,0)
			cvpt_max = cv.cvPoint(0,0)
			t = cv.cvMinMaxLoc(red,cvpt_min,cvpt_max)
			#print t,cvpt_min,cvpt_max

			#red = self.lazer._get_scaled_frame(red)
			#red = self.lazer._get_threshold_frame(frame=red,thresh=thresh,type=CV_THRESH_BINARY)

			blue = self.lazer._get_color_frame('b',frame)
			#blue = self.lazer._get_threshold_frame(frame=blue,thresh=thresh,type=CV_THRESH_BINARY)
			blue = self.lazer._cv_to_pygame(blue,channel=2)
			blue = pygame.transform.scale(blue,(self.dims[0]/2,self.dims[1]/2))

			green = self.lazer._get_color_frame('g',frame)
			#green = self.lazer._get_threshold_frame(frame=green,thresh=thresh,type=CV_THRESH_BINARY)
			green = self.lazer._cv_to_pygame(green,channel=1)
			green = pygame.transform.scale(green,(self.dims[0]/2,self.dims[1]/2))

			value = self.lazer._get_hsv_frame(frame)
			value = self.lazer._get_frame_channel(value,2)
			value = self.lazer._get_shifted_frame(-215,value)
			#value = self.lazer._get_threshold_frame(frame=value,thresh=thresh,type=CV_THRESH_BINARY)

			comb = self.lazer._multiply_frames(red,value)
			comb = self.lazer._cv_to_pygame(comb,channel=5)
			comb = pygame.transform.scale(comb,(self.dims[0]/2,self.dims[1]/2))

			red = self.lazer._cv_to_pygame(red,channel=0)
			red = pygame.transform.scale(red,(self.dims[0]/2,self.dims[1]/2))

			value = self.lazer._cv_to_pygame(value,channel=-1)
			value = pygame.transform.scale(value,(self.dims[0]/2,self.dims[1]/2))

			#frame = self.lazer.get_curr_frame()
			frame = self.lazer._cv_to_pygame(frame)
			frame = pygame.transform.scale(frame,(self.dims[0]/2,self.dims[1]/2))

			screen.blit(red,(0,0))
			screen.blit(blue,(self.dims[0]/2,0))
			screen.blit(green,(0,self.dims[1]/2))
			screen.blit(frame,(self.dims[0]/2,self.dims[1]/2))

			screen.fill((100,100,100,255))

			if t[1] >= 0 :
				pygame.draw.circle(screen,THECOLORS["red"],(cvpt_max.x,cvpt_max.y),5,1)
				pygame.draw.circle(screen,THECOLORS["white"],(cvpt_max.x,cvpt_max.y),3,0)