Пример #1
0
    def getStandardizedRects(self):
        '''
        @return: the boxes centered on the target center of mass +- n_sigma*std
        @note: You must call detect() before getStandardizedRects() to see updated results.
        '''
        #create a list of the top-level contours found in the contours (cv.Seq) structure
        rects = []
        if len(self._contours) < 1: return (rects)
        seq = self._contours
        while not (seq == None):
            (x, y, w, h) = cv.BoundingRect(seq)
            if (cv.ContourArea(seq) >
                    self._minArea):  # and  self._filter(rect)
                r = pv.Rect(x, y, w, h)
                moments = cv.Moments(seq)
                m_0_0 = cv.GetSpatialMoment(moments, 0, 0)
                m_0_1 = cv.GetSpatialMoment(moments, 0, 1)
                m_1_0 = cv.GetSpatialMoment(moments, 1, 0)
                mu_2_0 = cv.GetCentralMoment(moments, 2, 0)
                mu_0_2 = cv.GetCentralMoment(moments, 0, 2)

                cx = m_1_0 / m_0_0
                cy = m_0_1 / m_0_0
                w = 2.0 * self._rect_sigma * np.sqrt(mu_2_0 / m_0_0)
                h = 2.0 * self._rect_sigma * np.sqrt(mu_0_2 / m_0_0)

                r = pv.CenteredRect(cx, cy, w, h)

                rects.append(r)
            seq = seq.h_next()

        if self._filter != None:
            rects = self._filter(rects)

        return rects
Пример #2
0
def get_angle(pts):
    moments = cv.Moments(pts, 0)
    mu11 = cv.GetCentralMoment(moments, 1, 1)
    mu20 = cv.GetCentralMoment(moments, 2, 0)
    mu02 = cv.GetCentralMoment(moments, 0, 2)
    print "Got moments"
    return 1 / 2.0 * arctan((2 * mu11 / float(mu20 - mu02)))
Пример #3
0
def yellow(img):
    global yx
    global yy
    global bx
    global by

    #blur the source image to reduce color noise
    cv.Smooth(img, img, cv.CV_BLUR, 3)

    #convert the image to hsv(Hue, Saturation, Value) so its
    #easier to determine the color to track(hue)
    hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3)
    cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)

    #limit all pixels that don't match our criteria, in this case we are
    #looking for purple but if you want you can adjust the first value in
    #both turples which is the hue range(120,140).  OpenCV uses 0-180 as
    #a hue range for the HSV color model
    thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)

    cv.InRangeS(hsv_img, (20, 100, 100), (30, 255, 255),
                thresholded_img)  #yellow
    #determine the objects moments and check that the area is large
    #enough to be our object
    mat = cv.GetMat(thresholded_img)
    moments = cv.Moments(mat, 0)
    area = cv.GetCentralMoment(moments, 0, 0)

    #there can be noise in the video so ignore objects with small areas
    if (area > 100000):
        #determine the x and y coordinates of the center of the object
        #we are tracking by dividing the 1, 0 and 0, 1 moments by the area

        x = cv.GetSpatialMoment(moments, 1, 0) / area
        y = cv.GetSpatialMoment(moments, 0, 1) / area
        x = int(x)
        y = int(y)
        yx = x
        yy = y
        cv.Circle(img, (x, y), 5, (0, 0, 0), -1)

    cv.InRangeS(hsv_img, (100, 80, 80), (120, 255, 255),
                thresholded_img)  #pink

    #determine the objects moments and check that the area is large
    #enough to be our object
    mat = cv.GetMat(thresholded_img)
    moments = cv.Moments(mat, 0)
    area = cv.GetCentralMoment(moments, 0, 0)

    #there can be noise in the video so ignore objects with small areas
    if (area > 100):
        x = cv.GetSpatialMoment(moments, 1, 0) / area
        y = cv.GetSpatialMoment(moments, 0, 1) / area
        x = int(x)
        y = int(y)
        bx = x
        by = y
        cv.Circle(img, (x, y), 5, (0, 0, 255), -1)
Пример #4
0
def center_of_mass(moments):
    try:
        import cv
    except:
        print 'Module %s:' % sys.modules[__name__]
        print 'OpenCV is not available, the peak characterization functions will not work.'
        return None
    x = cv.GetCentralMoment(moments, 1, 0) / cv.GetCentralMoment(moments, 0, 0)
    y = cv.GetCentralMoment(moments, 0, 1) / cv.GetCentralMoment(moments, 0, 0)
    return x, y
    def angle(self, img):
	# extract position of red blue yellow markers
	# find distance between pairs
	# return angle from inverse cosine
	
	imgHSV = cv.CreateImage(cv.GetSize(img), 8, 3)
	cv.CvtColor(img, imgHSV, cv.CV_BGR2HSV)
	cv.NamedWindow("red", cv.CV_WINDOW_AUTOSIZE)
	cv.MoveWindow("red", 800, 0)
	cv.NamedWindow("blue", cv.CV_WINDOW_AUTOSIZE)
	cv.MoveWindow("blue", 800, 100)
	cv.NamedWindow("yellow", cv.CV_WINDOW_AUTOSIZE)
	cv.MoveWindow("yellow", 800, 200)
	
	dot_coords = []
	# use the corresponding thresholds for each color of marker #
	for h_low, h_high, col in [self.red_hues, self.yellow_hues, self.blue_hues]:
	    imgThresh = cv.CreateImage(cv.GetSize(img), 8, 1)
	    cv.InRangeS(imgHSV, cv.Scalar(h_low, 70, 70), cv.Scalar(h_high, 255, 255), imgThresh)
 	    moments = cv.Moments(cv.GetMat(imgThresh))
	    x_mov = cv.GetSpatialMoment(moments, 1, 0)
	    y_mov = cv.GetSpatialMoment(moments, 0, 1)
	    area = cv.GetCentralMoment(moments, 0, 0)
            small_thresh = cv.CreateImage((self.fit_camera_width, self.fit_camera_height), 8, 1)
	    cv.Resize(imgThresh, small_thresh)

	    if col == "r":
		cv.ShowImage("red", small_thresh)
	    elif col == "b":
		cv.ShowImage("blue", small_thresh)
	    elif col == "y":
		cv.ShowImage("yellow", small_thresh) 
	    if area > 0:
		posX = float(x_mov)/float(area)
	    	posY = float(y_mov)/float(area)
	    else:
		posX = 0
		posY = 0
	    dot_coords.append([posX, posY])	 
	
	r = dot_coords[0]
	y = dot_coords[1]
	b = dot_coords[2]
	# get side lengths
	y_r = self.dist(r[0], r[1], y[0], y[1])
	r_b = self.dist(b[0], b[1], r[0], r[1])
	y_b = self.dist(b[0], b[1], y[0], y[1])
	# apply law of cosines
	angle_in_rads = math.pow(y_r, 2) + math.pow(r_b, 2) - math.pow(y_b, 2)
	denom = 2.0 * y_r * r_b
	if denom > 0:
	     angle_in_rads /= 2.0 * y_r * r_b
	else:
	     angle_in_rads = 0
	rads = math.acos(angle_in_rads)
	# convert to degrees
	degs = rads * float(180.0 / math.pi)
	if degs < 0 or degs > 360: 
	     degs = 0
	return degs	
Пример #6
0
 def do1Image(self, image, prevpoints):
     #http://www.aishack.in/2010/07/tracking-colored-objects-in-opencv/
     #http://nashruddin.com/OpenCV_Region_of_Interest_(ROI)
     #http://opencv-users.1802565.n2.nabble.com/Python-cv-Moments-Need-Help-td6044177.html
     #http://stackoverflow.com/questions/5132874/change-elements-in-a-cvseq-in-python
     img = self.getThreshold(image)
     points = []
     for i in range(4):
         cv.SetImageROI(img, (int(
             self.RectanglePoints[i][0]), int(self.RectanglePoints[i][1]),
                              int(self.RectanglePoints[i][2]),
                              int(self.RectanglePoints[i][3])))
         storage = cv.CreateMemStorage(0)
         contours = cv.FindContours(img, storage)
         moments = cv.Moments(contours)
         moment10 = cv.GetSpatialMoment(moments, 1, 0)
         moment01 = cv.GetSpatialMoment(moments, 0, 1)
         area = cv.GetCentralMoment(moments, 0, 0)
         cv.ResetImageROI(img)
         if (area != 0):
             x = self.RectanglePoints[i][0] + (moment10 / area)
             y = self.RectanglePoints[i][1] + (moment01 / area)
         else:
             if (prevpoints[i][0] == 0):
                 x = self.RectanglePoints[i][0]
                 y = self.RectanglePoints[i][1]
             else:
                 x = prevpoints[i][0]
                 y = prevpoints[i][1]
         points.append([x, y])
     return points
Пример #7
0
def extract_circles(contours, rgb):
    global current_pose_editor, side
    circles = []
    for i in contours:
        moments = cv.Moments(cv.fromarray(i), binary=1)
        area = cv.GetCentralMoment(moments, 0, 0)

        if area > OBJECT_AREA:
            x = int(cv.GetSpatialMoment(moments, 1, 0) / area)
            y = int(cv.GetSpatialMoment(moments, 0, 1) / area)
            radius = int(math.sqrt(area / math.pi))
            circles.append((x, y, int(radius * 1.5)))
            if (y > 100):
                adjust_carrot(x, y, area)
                if (side == 'left' and x < 420):
                    adjust_carrot(x, y, area)  #use just visual servo
                elif (side == 'right' and x > 420):
                    adjust_carrot(x, y, area)  #use just visual servo
            '''
            point = check_lidar((x,y),radius)               #use if you want to use lidar to confirm waypoint
            if (point[0]):
                print 'going to:',point[1]
                waypoint.send_goal_and_wait(current_pose_editor.relative(np.array([point[1][0], point[1][1] + .5, 0])).as_MoveToGoal(speed = .5))
                circles.append((x,y,int(radius*1.5)))
            '''
    return circles
Пример #8
0
def detect_and_draw(img):
    t1 = time.time()

    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # blur the source image to reduce color noise
    cv.Smooth(img, img, cv.CV_BLUR, 3)
    hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3)
    cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)
    thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
    #cv.InRangeS(hsv_img, (120, 80, 80), (140, 255, 255), thresholded_img)

    # White
    sensitivity = 15
    cv.InRangeS(hsv_img, (0, 0, 255 - sensitivity), (255, sensitivity, 255),
                thresholded_img)

    # Red
    #cv.InRangeS(hsv_img, (0, 150, 0), (5, 255, 255), thresholded_img)

    # Blue
    #cv.InRangeS(hsv_img, (100, 50, 50), (140, 255, 255), thresholded_img)

    # Green
    #cv.InRangeS(hsv_img, (40, 50, 50), (80, 255, 255), thresholded_img)

    mat = cv.GetMat(thresholded_img)
    moments = cv.Moments(mat, 0)
    area = cv.GetCentralMoment(moments, 0, 0)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (area > 5000):
        #determine the x and y coordinates of the center of the object
        #we are tracking by dividing the 1, 0 and 0, 1 moments by the area
        x = cv.GetSpatialMoment(moments, 1, 0) / area
        y = cv.GetSpatialMoment(moments, 0, 1) / area
        x = int(round(x))
        y = int(round(y))

        #create an overlay to mark the center of the tracked object
        overlay = cv.CreateImage(cv.GetSize(img), 8, 3)

        cv.Circle(overlay, (x, y), 2, (0, 0, 0), 20)
        cv.Add(img, overlay, img)
        #add the thresholded image back to the img so we can see what was
        #left after it was applied
        #cv.Merge(thresholded_img, None, None, None, img)
        t2 = time.time()
        message = "Color tracked!"
        print "detection time = %gs x=%d,y=%d" % (round(t2 - t1, 3), x, y)

    cv.ShowImage("Color detection", img)
Пример #9
0
def get_contour_center(moments):
    spatial_moment10 = cv.GetSpatialMoment(moments, 1, 0)
    spatial_moment01 = cv.GetSpatialMoment(moments, 0, 1)
    area = abs(cv.GetCentralMoment(moments, 0, 0))

    # Ensuring that threre is no division by zero.
    # PLEASE DO NOT TOUCH THIS, DO NOT TRY TO AVOID 0 DIVISION BY ADDING
    # A VALUE TO AREA BELOW, BECAUSE IT WOULD FAIL IN SOME CASES
    area = area or 0.01
    return (spatial_moment10 / area, spatial_moment01 / area)
Пример #10
0
def main():
    pr_window = "imagen"
    capture = cv.CaptureFromCAM(-1)
    cv.NamedWindow(pr_window, 1)

    #    seteo tamanio de la ventana |-| comentar cuando no se necesite mostrar ventana
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, config.ancho)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, config.alto)
    delay = 0
    while True:
        if (not (delay == 20)):
            delay += 1
            img = cv.QueryFrame(capture)
            #cv.ReleaseCapture( img )
        else:
            delay = 0
            frame = cv.QueryFrame(capture)
            maskN = cv.CreateImage(cv.GetSize(frame), 8, 1)
            hsvN = cv.CloneImage(frame)

            cv.Smooth(frame, frame, cv.CV_BLUR, 3)

            cv.CvtColor(frame, hsvN, cv.CV_BGR2HSV)
            cv.InRangeS(hsvN, config.min_range, config.max_range, maskN)

            moment = cv.Moments(cv.GetMat(maskN), 0)
            a = cv.GetCentralMoment(moment, 0, 0)

            if a > config.min_area:
                X = int(cv.GetSpatialMoment(moment, 1, 0) / a)
                print "X: " + str(X)
                print "min: " + str(config.min_x)
                print "max: " + str(config.max_x)
                #Y = int(cv.GetSpatialMoment (moment, 0, 1) / a)
                if X > config.max_x:
                    print "derecha"
                elif X < config.min_x:
                    print "izquierda"
                else:
                    print "centrado"
            else:
                print "objeto no detectado o muy pequeno"

            cv.ShowImage(pr_window, maskN)


#        descomentar para debug
#        X = int(cv.GetSpatialMoment (moment, 1, 0) / a)
#        print 'x: ' + str (X)  + ' area: ' + str (a)

# Con esto corto y salgo
#        if cv.WaitKey (100) != -1:
#            break

    return
Пример #11
0
def main():
    color_tracker_window = "output"
    thresh_window = "thresh"
    capture = cv.CaptureFromCAM(-1)
    cv.NamedWindow(color_tracker_window, 1)
    cv.NamedWindow(thresh_window, 1)
    imgScrible = None
    global posX
    global posY

    fido.init_servos()

    while True:
        frame = cv.QueryFrame(capture)
        cv.Smooth(frame, frame, cv.CV_BLUR, 3)

        if (imgScrible is None):
            imgScrible = cv.CreateImage(cv.GetSize(frame), 8, 3)

        imgThresh = GetThresholdedImage(frame)

        mat = cv.GetMat(imgThresh)
        #Calculating the moments
        moments = cv.Moments(mat, 0)
        area = cv.GetCentralMoment(moments, 0, 0)
        moment10 = cv.GetSpatialMoment(moments, 1, 0)
        moment01 = cv.GetSpatialMoment(moments, 0, 1)

        #lastX and lastY stores the previous positions
        lastX = posX
        lastY = posY
        #Finding a big enough blob
        if (area > 100000):

            #Calculating the coordinate postition of the centroid
            posX = int(moment10 / area)
            posY = int(moment01 / area)

            print 'x: ' + str(posX) + ' y: ' + str(posY) + ' area: ' + str(
                area)
            #drawing lines to track the movement of the blob
            if (lastX > 0 and lastY > 0 and posX > 0 and posY > 0):
                cv.Line(imgScrible, (posX, posY), (lastX, lastY),
                        cv.Scalar(0, 255, 255), 5)
            #Adds the three layers and stores it in the frame
            #frame -> it has the camera stream
            #imgScrible -> it has the line tracking the movement of the blob
            cv.Add(frame, imgScrible, frame)

        cv.ShowImage(thresh_window, imgThresh)
        cv.ShowImage(color_tracker_window, frame)
        c = cv.WaitKey(10)
        if (c != -1):
            break
Пример #12
0
 def run(self): 
     while True: 
         img = cv.QueryFrame( self.capture ) 
                     
         #blur the source image to reduce color noise 
         cv.Smooth(img, img, cv.CV_BLUR, 3); 
         
         #convert the image to hsv(Hue, Saturation, Value) so its  
         #easier to determine the color to track(hue) 
         hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3) 
         cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV) 
         
         #limit all pixels that don't match our criteria, in this case we are  
         #looking for purple but if you want you can adjust the first value in  
         #both turples which is the hue range(120,140).  OpenCV uses 0-180 as  
         #a hue range for the HSV color model 
         thresholded_img =  cv.CreateImage(cv.GetSize(hsv_img), 8, 1) 
         cv.InRangeS(hsv_img, (115, 75, 75), (135, 255, 255), thresholded_img) 
         
         #determine the objects moments and check that the area is large  
         #enough to be our object 
         thresholded_img2 = cv.GetMat(thresholded_img)
         moments = cv.Moments(thresholded_img2,0) 
         area = cv.GetCentralMoment(moments, 0, 0) 
         
         #there can be noise in the video so ignore objects with small areas 
         if(area > 100000): 
             #determine the x and y coordinates of the center of the object 
             #we are tracking by dividing the 1, 0 and 0, 1 moments by the area 
             x = cv.GetSpatialMoment(moments, 1, 0)/area 
             y = cv.GetSpatialMoment(moments, 0, 1)/area 
         
             # print 'x: ' + str(x) + ' y: ' + str(y) + ' area: ' + str(area) 
             
             
             x = int(x)
             y = int(y)
             
             #create an overlay to mark the center of the tracked object 
             overlay = cv.CreateImage(cv.GetSize(img), 8, 3) 
             
             cv.Circle(overlay, (x, y), 2, (255, 255, 255), 20) 
             cv.Add(img, overlay, img) 
             #add the thresholded image back to the img so we can see what was  
             #left after it was applied 
             cv.Merge(thresholded_img, None, None, None, img) 
          
         #display the image  
         cv.ShowImage(color_tracker_window, img) 
         
         if cv.WaitKey(10) == 27: 
             break 
Пример #13
0
    def _get_pos_spatial(self, th_img):
	print "Getting spacial position (?)"
        moments = cv.Moments(cv.GetMat(th_img))
        mom10 = cv.GetSpatialMoment(moments, 1, 0)
        mom01 = cv.GetSpatialMoment(moments, 0, 1)
        area = cv.GetCentralMoment(moments, 0, 0)

        if area > 10:
            pos = [int(mom10/area), int(mom01/area)]
        else:
            pos = None

        return pos
Пример #14
0
def getBinaryClean(frame):
    resImg = cv.CreateImage(cv.GetSize(frame), 8, 1)
    copy = cv.CloneImage(frame)
    contours = cv.FindContours(copy,
                               cv.CreateMemStorage(0),
                               mode=cv.CV_RETR_EXTERNAL)
    while contours:
        moments = cv.Moments(contours)
        area = cv.GetCentralMoment(moments, 0, 0)
        if (area > 20):
            cv.DrawContours(resImg, contours, (255, 255, 255), (255, 255, 255),
                            2, cv.CV_FILLED)
        contours = contours.h_next()
    return (resImg)
Пример #15
0
    def detectYellow(self, sub):
        img = self.threshold.yellowT(sub)
        yellow = self.segment(cv.CloneImage(img))
        moments = cv.Moments(img, 1)
        asd = cv.GetCentralMoment(moments, 0, 0)
        print "STUFF:", asd
        #yellow = self.segment(self.threshold.yellowT(sub))
        img = None

        for Y in yellow:
            if self.sizeMatch(Y, 'T'):
                logging.info("found a yellow T of size %s at %s",
                             *entDimPos(Y))
                return Y, img
        return None, None
Пример #16
0
def main():
    s = scratch.Scratch()
    capture = cv.CaptureFromCAM(0)
    cv.NamedWindow("Track", 1)

    while True:
        #capture frame
        frame_o = cv.QueryFrame(capture)
        frame = cv.CreateImage((frame_o.width * 3 / 8, frame_o.height * 3 / 8),
                               frame_o.depth, frame_o.nChannels)
        cv.Resize(frame_o, frame)
        cv.Smooth(frame, frame, cv.CV_GAUSSIAN, 3, 3)

        #Convert to HSV
        imgHSV = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.CvtColor(frame, imgHSV, cv.CV_BGR2HSV)

        #Thresh
        imgThreshed = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.InRangeS(imgHSV, cv.Scalar(0, 124, 221), cv.Scalar(10, 255, 256),
                    imgThreshed)
        cv.Smooth(imgThreshed, imgThreshed, cv.CV_GAUSSIAN, 3, 3)

        mat = cv.GetMat(imgThreshed)
        moments = cv.Moments(mat)

        moment10 = cv.GetSpatialMoment(moments, 1, 0)
        moment01 = cv.GetSpatialMoment(moments, 0, 1)
        area = cv.GetCentralMoment(moments, 0, 0)

        if area > 1000:
            posX = int(moment10 / area)
            posY = int(moment01 / area)

            if posX >= 0 and posY >= 0:
                print("X: " + str(posX) + ", Y: " + str(posY))
                cv.Rectangle(frame, (posX - 10, posY - 10),
                             (posX + 10, posY + 10), cv.RGB(0, 255, 0))
                s.sensorupdate({'X': posX})
                s.sensorupdate({'Y': posY})

        cv.ShowImage("Track", frame)
        k = cv.WaitKey(70)
        if k % 0x100 == 27:
            break
Пример #17
0
def extract_circles(contours, rgb):
    circles = []
    for i in contours:
        moments = cv.Moments(cv.fromarray(i), binary=1)
        area = cv.GetCentralMoment(moments, 0, 0)

        if area > OBJECT_AREA:
            x = int(cv.GetSpatialMoment(moments, 1, 0) / area)
            y = int(cv.GetSpatialMoment(moments, 0, 1) / area)
            radius = int(math.sqrt(area / math.pi))

            point = check_lidar((x, y), radius)
            if (point[0]):
                circles.append((x, y, int(radius * 1.5)))
                append_marker(point[1], rgb)
                global new_buoy
                new_buoy = True
    return circles
    def calibrate_screen(self):
        # in case something else is still open
        cv.DestroyAllWindows()
        capture = cv.CaptureFromCAM(self.camera_index)
        if not capture:
	    QMessageBox.information(self, "Camera Error", "Camera not found")
    	    return
	cv.NamedWindow("hold up object at preferred distance from camera", cv.CV_WINDOW_AUTOSIZE)
        cv.NamedWindow("select for max visibility", cv.CV_WINDOW_AUTOSIZE)
        cv.MoveWindow("hold up object at preferred distance from camera", 320, 0) 
        cv.MoveWindow("select for max visibility", 800, 82)
        cv.CreateTrackbar("Start at color", "hold up object at preferred distance from camera", self.low_color, 179, self.update_low_color)
        cv.CreateTrackbar("End at color", "hold up object at preferred distance from camera", self.high_color, 179, self.update_high_color)
        camera_on = True
        while camera_on:
	    if (not self.busy_updating):
		frame = cv.QueryFrame(capture)
		if not frame:
	    		break
		# convert color to hue space for easier tracking
		imgHSV = cv.CreateImage(cv.GetSize(frame), 8, 3)
		cv.CvtColor(frame, imgHSV, cv.CV_BGR2HSV)
		imgThresh = cv.CreateImage(cv.GetSize(frame), 8, 1)
		# interactive thresholding
		cv.InRangeS(imgHSV, cv.Scalar(self.low_color, self.MED_SV, self.MED_SV), cv.Scalar(self.high_color, self.MAX_SV, self.MAX_SV), imgThresh)
  	
		moments = cv.Moments(cv.GetMat(imgThresh))
		self.calibration_area = cv.GetCentralMoment(moments, 0, 0)
		# shrink images for display
		small_thresh = cv.CreateImage((self.fit_camera_width, self.fit_camera_height), 8, 1)
		cv.Resize(imgThresh, small_thresh)
		small_frame = cv.CreateImage((self.fit_camera_width, self.fit_camera_height), 8, 3)
		cv.Resize(frame, small_frame)
		cv.ShowImage("hold up object at preferred distance from camera", small_frame)
		cv.ShowImage("select for max visibility", small_thresh)

		k = cv.WaitKey(1)
		# press q or escape to quit camera view
		if k == 27 or k == 113 or self.end_record:
		    camera_on = False
		    cv.DestroyAllWindows()
		    self.end_record = False
		    break
Пример #19
0
def eccentricity(moments):
    try:
        import cv
    except:
        print 'Module %s:' % sys.modules[__name__]
        print 'OpenCV is not available, the peak characterization functions will not work.'
        return None
    mu11p = cv.GetCentralMoment(moments, 1, 1) / cv.GetCentralMoment(
        moments, 0, 0)
    mu02p = cv.GetCentralMoment(moments, 2, 0) / cv.GetCentralMoment(
        moments, 0, 0)
    mu20p = cv.GetCentralMoment(moments, 0, 2) / cv.GetCentralMoment(
        moments, 0, 0)
    return ((mu20p - mu02p)**2 - 4 * mu11p**2) / (mu20p + mu02p)**2
Пример #20
0
def orientation(moments):
    try:
        import cv
    except:
        print 'Module %s:' % sys.modules[__name__]
        print 'OpenCV is not available, the peak characterization functions will not work.'
        return None
    mu11p = cv.GetCentralMoment(moments, 1, 1) / cv.GetCentralMoment(
        moments, 0, 0)
    mu02p = cv.GetCentralMoment(moments, 2, 0) / cv.GetCentralMoment(
        moments, 0, 0)
    mu20p = cv.GetCentralMoment(moments, 0, 2) / cv.GetCentralMoment(
        moments, 0, 0)
    return 0.5 * np.arctan(2 * mu11p / (mu20p - mu02p))
Пример #21
0
 def getPupil(frame):
   pupilImg = cv.CreateImage(cv.GetSize(frame), 8, 1)
   cv.InRangeS(frame, (30, 30, 30), (80, 80, 80), pupilImg)
   contours = cv.FindContours(pupilImg, cv.CreateMemStorage(0), mode=cv.CV_RETR_EXTERNAL)
   del pupilImg
   pupilImg = cv.CloneImage(frame)
   while contours:
       moments = cv.Moments(contours)
       area = cv.GetCentralMoment(moments, 0, 0)
       if (area > 50):
           pupilArea = area
           x = cv.GetSpatialMoment(moments, 1, 0) / area
           y = cv.GetSpatialMoment(moments, 0, 1) / area
           pupil = contours
           global centroid
           centroid = (int(x), int(y))
           cv.DrawContours(pupilImg, pupil, (0, 0, 0), (0, 0, 0), 2, cv.CV_FILLED)
           break
       contours = contours.h_next()
   return (pupilImg)
Пример #22
0
def get_characteristics(moments):
    try:
        import cv
    except:
        try:
            import cv2.cv as cv
        except:
            print 'Module %s:' % sys.modules[__name__]
            print 'OpenCV is not available, the peak characterization functions will not work.'
            return None
    m = moments
    # these are all central moments!
    mu00 = cv.GetCentralMoment(m, 0, 0)
    mu11 = cv.GetCentralMoment(moments, 1, 1)
    mu02 = cv.GetCentralMoment(moments, 0, 2)
    mu20 = cv.GetCentralMoment(moments, 2, 0)
    mu03 = cv.GetCentralMoment(moments, 0, 3)
    mu30 = cv.GetCentralMoment(moments, 3, 0)

    if mu00 == 0:
        return np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])

    xxVar = mu20 / mu00
    yyVar = mu02 / mu00

    # these use raw moments!
    xCenter = m.m10 / m.m00
    yCenter = m.m01 / m.m00

    xyCenter = mu11 / mu00

    axis_first_term = 0.5 * (xxVar + yyVar)
    axis_second_term = 0.5 * np.sqrt(4 * (xyCenter)**2 + (xxVar - yyVar)**2)
    # the lengths of the two principle components
    long_axis = axis_first_term + axis_second_term
    short_axis = abs(axis_first_term - axis_second_term)
    # how round the peak is.  0 means perfectly round; 1 means it's a line, not a circle.
    eccentricity = np.sqrt(abs(1.0 - short_axis / long_axis))
    # how much the peak is rotated.  0 means the long axis points upward.
    #    45 degrees looks like a backslash.
    orientation = 0.5 * np.arctan2((2.0 * mu11), (mu20 - mu02)) * 180 / np.pi
    xSkew = mu30 / (mu00 * (xxVar**(3.0 / 2)))
    ySkew = mu03 / (mu00 * (yyVar**(3.0 / 2)))
    # 0 is a placeholder for the height.
    return np.array([
        xCenter, yCenter, 0, long_axis, short_axis, orientation, eccentricity,
        xSkew, ySkew
    ])
Пример #23
0
def extract_circles(contours, rgb):
    circles = []
    global shot
    max_ = OBJECT_AREA
    for i in contours:
        moments = cv.Moments(cv.fromarray(i), binary=1)
        area = cv.GetCentralMoment(moments, 0, 0)

        if area > max_:
            max_ = area
            best = [moments, area]
    try:
        x = int(cv.GetSpatialMoment(best[0], 1, 0) / best[1])
        y = int(cv.GetSpatialMoment(best[0], 0, 1) / best[1])
        radius = int(math.sqrt(best[1] / math.pi))
        if (shot == False):
            circles.append((x, y, int(radius)))
            adjust_carrot(x, y)
    except UnboundLocalError:
        print "not found"
    return circles
Пример #24
0
    def track_blobs(self, frame):
        spare = cv.CloneImage(frame)
        size = cv.GetSize(frame)

        hsv = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        out = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        thresh = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)

        print self.min_hue, self.value_dict['min_hue']

        cv.Smooth(spare, spare, cv.CV_BLUR, 22, 22)
        cv.CvtColor(spare, hsv, cv.CV_BGR2HSV)

        cv.InRangeS(hsv, cv.Scalar(self.min_hue, self.min_sat, self.min_val),
                    cv.Scalar(self.max_hue, self.max_sat, self.max_val),
                    thresh)

        cv.Merge(thresh, thresh, thresh, None, out)
        contours = cv.FindContours(thresh, self.storage, cv.CV_RETR_LIST,
                                   cv.CV_CHAIN_APPROX_SIMPLE)

        try:
            M = cv.Moments(contours)
        except:
            return out

        m0 = cv.GetCentralMoment(M, 0, 0)

        if m0 > 1.0:
            self.cx = cv.GetSpatialMoment(M, 1, 0) / m0
            self.cy = cv.GetSpatialMoment(M, 0, 1) / m0
            cv.Circle(frame, (int(self.cx), int(self.cy)), 2, (255, 0, 0), 20)
        if self.show_frame is not True:
            return out
        else:
            return frame

        pass
Пример #25
0
def main ():
    capture = cv.CaptureFromCAM (-1)
    cv.SetCaptureProperty (capture, cv.CV_CAP_PROP_FRAME_WIDTH, 160)
    cv.SetCaptureProperty (capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 120)
    fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)

    while True:
        frame = cv.QueryFrame (capture)
        maskN = cv.CreateImage (cv.GetSize (frame), 8, 1)
        hsvN = cv.CloneImage (frame)

        cv.Smooth (frame, frame, cv.CV_BLUR, 3)

        cv.CvtColor (frame, hsvN, cv.CV_BGR2HSV)
        cv.InRangeS (hsvN, config.min_range, config.max_range, maskN)

        moment = cv.Moments (cv.GetMat (maskN), 0)
        a = cv.GetCentralMoment (moment, 0, 0)

        if a > config.min_area:
            X = int(cv.GetSpatialMoment (moment, 1, 0) / a)
            print "area: " + str (a)
            print "X: " + str (X)
            #Y = int(cv.GetSpatialMoment (moment, 0, 1) / a)
            if X > config.max_x:
                print "derecha"
            elif X < config.min_x:
                print "izquierda"
            else:
                print "centrado"
        else:
            print "objeto no detectado o muy pequeno"

        # Con esto corto y salgo
        if cv.WaitKey (int(1000/fps)) != -1:
            break
 
    return;
Пример #26
0
    def action(self):
        if (not (self.delay == 5)):
            self.delay += 1
        else:
            self.delay = 0
            frame = cv.QueryFrame(self.capture)
            maskN = cv.CreateImage(cv.GetSize(frame), 8, 1)
            hsvN = cv.CloneImage(frame)

            cv.Smooth(frame, frame, cv.CV_BLUR, 3)

            cv.CvtColor(frame, hsvN, cv.CV_BGR2HSV)
            cv.InRangeS(hsvN, config.min_range, config.max_range, maskN)

            moment = cv.Moments(cv.GetMat(maskN), 0)
            a = cv.GetCentralMoment(moment, 0, 0)
            if a == 0:
                a = 1

            self.data.write('Camara::area', a)
            self.data.write('Camara::lata_x',
                            int(cv.GetSpatialMoment(moment, 1, 0) / a))

            cv.WaitKey(10)
Пример #27
0
def camerainfo():
    # initialize camera feed
    capture = cv.CaptureFromCAM(MY_CAMERA)
    if not capture:
        print "Could not initialize camera feed!"
        exit(1)

    # create display windows
    cv.NamedWindow('camera', cv.CV_WINDOW_AUTOSIZE)
    cv.NamedWindow('threshed', cv.CV_WINDOW_AUTOSIZE)
    cv.MoveWindow('threshed', 400,  0)

    # holds the tracked position of the ball
    image_scribble = None
    # the position of the ball
    pos_x = 0
    pos_y = 0
    last_x = 0
    last_y = 0
# my creation to find good ball position
    listofxpos = []
    listofypos = []
    ballcoordinates = [0,  0]
    count_of_measurements = 0
    # read from the camera
    print "Tracking ball... press any key to quit"
    while 1:    
        image = cv.QueryFrame(capture)
        if not image:
            return 'no image found'

        # if this is the first frame, we need to initialize it
        if not image_scribble:
            image_scribble = cv.CreateImage(cv.GetSize(image), image.depth, 3)

        # get the thresholded image
        image_threshed = thresholded_image(image)

        # finds the contours in our binary image
        contours = cv.FindContours(cv.CloneImage(image_threshed), cv.CreateMemStorage())
        # if there is a ball in the frame
        if len(contours) != 0:
            # calculate the moments to estimate the position of the ball
            moments = cv.Moments(contours, 1)
            moment10 = cv.GetSpatialMoment(moments, 1, 0)
            moment01 = cv.GetSpatialMoment(moments, 0, 1)
            area = cv.GetCentralMoment(moments, 0, 0)

            # if we got a good enough blob
            if area>1:
                last_x = pos_x
                last_y = pos_y
                pos_x = moment10/area
                pos_y = moment01/area

                print("pos=(%s,%s)"%(pos_x,pos_y))

                # draw the tracking line
                if last_x>0 and last_y>0 and pos_x>0 and pos_y>0:
                    pt1 = (int(last_x), int(last_y))
                    pt2 = (int(pos_x), int(pos_y))
                    cv.Line(image_scribble, pt1, pt2, (0, 255, 255), 5)

        # add the scribble to the original frame
        cv.Add(image, image_scribble, image)
        cv.ShowImage('threshed', image_threshed)
        cv.ShowImage('camera', image)

        # my creation to find good ball position
        if last_x != pos_x or last_y != pos_y:
            if count_of_measurements < 4:
                listofxpos.append(pos_x)
                listofypos.append(pos_y)
                count_of_measurements += 1
            elif count_of_measurements >= 4 and count_of_measurements < 7:# add coordinate selection/filter
                listofxpos.append(pos_x)
                listofypos.append(pos_y)
                count_of_measurements += 1
            elif count_of_measurements >= 7:
                ballcoordinates[0] = sum(listofxpos)/count_of_measurements
                ballcoordinates[1] = sum(listofypos)/count_of_measurements
#                if ballcoordinates[0]  > 200 or ballcoordinates[0] <150: 
#                    amount = (175 - ballcoordinates[0])/5
#                    if amount > 20:
#                        turn(20)
#                    else:
#                        turn(amount)
#                else:
#                    go(20) 
                count_of_measurements = 0
                listofxpos = []
                listofypos = []

            last_x = pos_x
            last_y = pos_y
            print count_of_measurements

# break from the loop if there is a key press
        c = cv.WaitKey(10)
        if not c == -1:
            break
    cv.InRangeS(hsv_frame, (0, 80, 0), (30, 255, 255), thresholded_frame)  # works fine during day
#    cv.InRangeS(hsv_frame, (10, 10, 0), (15, 255, 255), thresholded_frame)  # works fine evenings
    
    contours = cv.FindContours(cv.CloneImage(thresholded_frame), cv.CreateMemStorage())
    
    
    
    
    
    if len(contours)!=0:
        #determine the objects moments and check that the area is large  
        #enough to be our object 
        moments = cv.Moments(contours,1) 
        moment10 = cv.GetSpatialMoment(moments, 1, 0)
        moment01 = cv.GetSpatialMoment(moments, 0, 1)
        area = cv.GetCentralMoment(moments, 0, 0) 
        
        #there can be noise in the video so ignore objects with small areas 
        if area > 5: 
            #determine the x and y coordinates of the center of the object 
            #we are tracking by dividing the 1, 0 and 0, 1 moments by the area 
            pos_x = moment10/area
            pos_y = moment01/area
        
#            print 'x: ' + str(pos_x) + '\ty: ' + str(pos_y) + '\tarea: ' + str(area) 
#            print '*'
            #create an overlay to mark the center of the tracked object 
            overlay = cv.CreateImage(cv.GetSize(frame), 8, 3) 
            
#            cv.Circle(overlay, (int(pos_x), int(pos_y)), 2, (100, 100, 255), 20) 
#            cv.Add(frame, overlay, frame) 
Пример #29
0
    def runColor(self):
        self.tracking = False
        self.lasttrack = None
        self.hang_around_seconds = 5
        color_tracker_window = "Preston HackSpace 2013 BarCamp Project"
        cv.NamedWindow(color_tracker_window, 1)
        self.capture = cv.CaptureFromCAM(0)
        count = 0
        while True:

            img = cv.QueryFrame(self.capture)
            img2 = cv.QueryFrame(self.capture)
            #blur the source image to reduce color noise
            cv.Smooth(img, img, cv.CV_BLUR, 3)

            #convert the image to hsv(Hue, Saturation, Value) so its
            #easier to determine the color to track(hue)

            hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3)
            cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)

            #limit all pixels that don't match our criteria, in this case we are
            #looking for purple but if you want you can adjust the first value in
            #both turples which is the hue range(120,140).  OpenCV uses 0-180 as
            #a hue range for the HSV color model

            #Orange  0-22
            #Yellow 22- 38
            #Green 38-75
            #Blue 75-130
            #Violet 130-160
            #Red 160-179

            thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
            cv.InRangeS(hsv_img, (0, 120, 120), (15, 255, 255),
                        thresholded_img)
            #cv.InRangeS(hsv_img, (120, 80, 80), (140, 255, 255), thresholded_img)
            #determine the objects moments and check that the area is large
            #enough to be our object
            #moments = cv.Moments(thresholded_img, 0)
            moments = cv.Moments(cv.GetMat(thresholded_img), 0)
            area = cv.GetCentralMoment(moments, 0, 0)

            #there can be noise in the video so ignore objects with small areas
            if (area > 100000):
                self.tracking = True
                self.lasttrack = time.time()
                #determine the x and y coordinates of the center of the object
                #we are tracking by dividing the 1, 0 and 0, 1 moments by the area
                x = cv.GetSpatialMoment(moments, 1, 0) / area
                y = cv.GetSpatialMoment(moments, 0, 1) / area

                #Write the x,y coords to a file for the pyFirmata code to use for controlling the Arduino
                self.WriteXY(x, y)

                #create an overlay to mark the center of the tracked object
                overlay = cv.CreateImage(cv.GetSize(img), 8, 3)

                #cv.Circle(overlay, (x, y), 2, (255, 255, 255), 20)
                cv.Circle(img, (int(x), int(y)), 2, (255, 255, 255), 20)
                cv.Add(img, overlay, img)
                #add the thresholded image back to the img so we can see what was
                #left after it was applied
                cv.Merge(thresholded_img, None, None, None, img)
            else:
                if self.tracking == True:
                    #We have just lost track of the object we need to hang around for a bit
                    #to see if the object comes back.
                    self.WriteXY(-2, -2)
                    if time.time(
                    ) >= self.lasttrack + self.hang_around_seconds:
                        self.tracking = False

                if self.tracking == False:
                    self.WriteXY(-1, -1)
            #display the image
            cv.ShowImage(color_tracker_window, img2)

            if cv.WaitKey(10) == 27:
                break
Пример #30
0
def main():
    color_tracker_window = "output"
    thresh_window = "thresh"
    capture = cv.CaptureFromCAM(-1)
    cv.NamedWindow(color_tracker_window, 1)
    cv.MoveWindow(color_tracker_window, 0, 0)
    cv.NamedWindow(thresh_window, 1)
    cv.MoveWindow(thresh_window, 700, 0)
    imgScrible = None
    storage = None
    global posX
    global posY

    fido.init_servos()
    fido.set_servo(fido.NECK, NECK_DOWN)
    head_x = fido.get_servo_position(fido.HEAD)
    neck_y = fido.get_servo_position(fido.NECK)
    jaw_pos = fido.get_servo_position(fido.JAW)

    #frame = cv.QueryFrame(capture)
    #imgThresh = GetThresholdedImage(frame)

    for f in xrange(2000):
        frame = cv.QueryFrame(capture)
        #cv.Smooth(frame, frame, cv.CV_BLUR, 3)
        cv.Smooth(frame, frame, cv.CV_GAUSSIAN, 9, 9)

        #imgScrible = cv.CreateImage(cv.GetSize(frame), 8, 3)

        imgThresh = GetThresholdedImage(frame)

        # pre-smoothing improves Hough detector

        #if storage is None:
        #  storage = cv.CreateMat(imgThresh.width, 1, cv.CV_32FC3)
        #try:
        #  cv.HoughCircles(imgThresh, storage, cv.CV_HOUGH_GRADIENT, 1, imgThresh.height/4, 50, 20, 10, 240)
        #  circles = np.asarray(storage)
        #except Error, e:
        #  print e
        #  circles = None

        # find largest circle
        #maxRadius = 0
        #x = 0
        #y = 0
        #found = False
        #if circles is not None:
        #  for i in range(len(circles)):
        #    circle = circles[i]
        #    if circle[2] > maxRadius:
        #      found = True
        #      maxRadius = circle[2]
        #      x = circle[0]
        #      y = circle[1]

        #cvShowImage( 'Camera', frame)
        #if found:
        #  posX = x
        #  posY = y
        #  print 'ball detected at position: ',x, ',', y, ' with radius: ', maxRadius
        #else:
        #  print 'no ball'

        mat = cv.GetMat(imgThresh)
        #Calculating the moments
        moments = cv.Moments(mat, 0)
        area = cv.GetCentralMoment(moments, 0, 0)
        moment10 = cv.GetSpatialMoment(moments, 1, 0)
        moment01 = cv.GetSpatialMoment(moments, 0, 1)

        #lastX and lastY stores the previous positions
        lastX = posX
        lastY = posY
        #Finding a big enough blob
        if area > 20000:

            #Calculating the coordinate postition of the centroid
            posX = int(moment10 / area)
            posY = int(moment01 / area)

            print 'x: ' + str(posX) + ' y: ' + str(posY) + ' area: ' + str(
                area) + ' head_x: ' + str(head_x) + ' neck_y: ' + str(
                    neck_y) + ' jaw_pos: ' + str(jaw_pos)
            #drawing lines to track the movement of the blob
            if (lastX > 0 and lastY > 0 and posX > 0 and posY > 0):
                #cv.Circle( imgThresh, (posX, posY), maxRadius, cv.Scalar(0,0,255), 3, 8, 0 );
                #cv.Line(imgScrible, (posX, posY), (lastX, lastY), cv.Scalar(0, 0, 255), 5)
                if posX < CENTER_X - 10:
                    error_x = (posX - CENTER_X) / MAX_X * (HEAD_RIGHT -
                                                           HEAD_LEFT)
                    desired_x = int(error_x) / 4 + head_x
                    head_x = desired_x
                    if head_x < HEAD_LEFT:
                        head_x = HEAD_LEFT
                    fido.set_servo(fido.HEAD, head_x)
                elif posX > CENTER_X + 10:
                    new_x = (posX - CENTER_X) / MAX_X * (HEAD_RIGHT -
                                                         HEAD_LEFT)
                    head_x = int(new_x) / 4 + head_x
                    if head_x > HEAD_RIGHT:
                        head_x = HEAD_RIGHT
                    fido.set_servo(fido.HEAD, head_x)

                if posY < CENTER_Y - 10:
                    new_y = (posY - CENTER_Y) / MAX_Y * (NECK_UP - NECK_DOWN)
                    neck_y = neck_y - (int(new_y) / 8)
                    if neck_y > NECK_UP:
                        neck_y = NECK_UP
                    fido.set_servo(fido.NECK, neck_y)
                elif posY > CENTER_Y + 10:
                    new_y = (posY - CENTER_Y) / MAX_Y * (NECK_UP - NECK_DOWN)
                    neck_y = neck_y - (int(new_y) / 8)
                    if neck_y < NECK_DOWN:
                        neck_y = NECK_DOWN
                    fido.set_servo(fido.NECK, neck_y)

                jaw_pos = int((float(area) - 60000.0) / 1000000.0 *
                              (fido.JAW_OPEN - fido.JAW_CLOSED_EMPTY) +
                              fido.JAW_CLOSED_EMPTY)
                jaw_pos = max(min(jaw_pos, fido.JAW_OPEN),
                              fido.JAW_CLOSED_EMPTY)
                fido.set_servo(fido.JAW, jaw_pos)
            #Adds the three layers and stores it in the frame
            #frame -> it has the camera stream
            #imgScrible -> it has the line tracking the movement of the blob
            #cv.Add(frame, imgScrible, frame)

        cv.ShowImage(thresh_window, imgThresh)
        cv.ShowImage(color_tracker_window, frame)
        c = cv.WaitKey(10)
        if (c != -1):
            break
    print "max frames reached, exiting"