示例#1
0
    def getWatershedMask(self):
        '''
        Uses the watershed algorithm to refine the foreground mask.
        Currently, this doesn't work well on real video...maybe grabcut would be better.
        '''
        cvMarkerImg = cv.CreateImage(self._fgMask.size, cv.IPL_DEPTH_32S, 1)
        cv.SetZero(cvMarkerImg)

        #fill each contour with a different gray level to label connected components
        seq = self._contours
        c = 50
        while not (seq == None) and len(seq) != 0:
            if cv.ContourArea(seq) > self._minArea:
                c += 10
                moments = cv.Moments(seq)
                m00 = cv.GetSpatialMoment(moments, 0, 0)
                m01 = cv.GetSpatialMoment(moments, 0, 1)
                m10 = cv.GetSpatialMoment(moments, 1, 0)
                centroid = (int(m10 / m00), int(m01 / m00))
                cv.Circle(cvMarkerImg, centroid, 3, cv.RGB(c, c, c),
                          cv.CV_FILLED)
            seq = seq.h_next()

        if (c > 0):
            img = self._annotateImg.asOpenCV()
            cv.Watershed(img, cvMarkerImg)

        tmp = cv.CreateImage(cv.GetSize(cvMarkerImg), cv.IPL_DEPTH_8U, 1)
        cv.CvtScale(cvMarkerImg, tmp)
        return pv.Image(tmp)
示例#2
0
 def do1Image(self, image, prevpoints):
     #http://www.aishack.in/2010/07/tracking-colored-objects-in-opencv/
     #http://nashruddin.com/OpenCV_Region_of_Interest_(ROI)
     #http://opencv-users.1802565.n2.nabble.com/Python-cv-Moments-Need-Help-td6044177.html
     #http://stackoverflow.com/questions/5132874/change-elements-in-a-cvseq-in-python
     img = self.getThreshold(image)
     points = []
     for i in range(4):
         cv.SetImageROI(img, (int(
             self.RectanglePoints[i][0]), int(self.RectanglePoints[i][1]),
                              int(self.RectanglePoints[i][2]),
                              int(self.RectanglePoints[i][3])))
         storage = cv.CreateMemStorage(0)
         contours = cv.FindContours(img, storage)
         moments = cv.Moments(contours)
         moment10 = cv.GetSpatialMoment(moments, 1, 0)
         moment01 = cv.GetSpatialMoment(moments, 0, 1)
         area = cv.GetCentralMoment(moments, 0, 0)
         cv.ResetImageROI(img)
         if (area != 0):
             x = self.RectanglePoints[i][0] + (moment10 / area)
             y = self.RectanglePoints[i][1] + (moment01 / area)
         else:
             if (prevpoints[i][0] == 0):
                 x = self.RectanglePoints[i][0]
                 y = self.RectanglePoints[i][1]
             else:
                 x = prevpoints[i][0]
                 y = prevpoints[i][1]
         points.append([x, y])
     return points
示例#3
0
def get_center(moments):
    m00 = cv.GetSpatialMoment(moments, 0, 0)
    m10 = cv.GetSpatialMoment(moments, 1, 0)
    m01 = cv.GetSpatialMoment(moments, 0, 1)
    x = float(m10) / m00
    y = float(m01) / m00
    return (x, y)
示例#4
0
 def __init__(
     self, BW
 ):  #Constructor. BW es una imagen binaria en forma de una matriz numpy
     self.BW = BW
     cs = cv.FindContours(cv.fromarray(self.BW.astype(np.uint8)),
                          cv.CreateMemStorage(),
                          mode=cv.CV_RETR_EXTERNAL)  #Finds the contours
     counter = 0
     """
     Estas son listas dinamicas usadas para almacenar variables
     """
     centroid = list()
     cHull = list()
     contours = list()
     cHullArea = list()
     contourArea = list()
     while cs:  #Iterar a traves de CvSeq, cs.
         if abs(
                 cv.ContourArea(cs)
         ) > 2000:  #Filtra contornos de menos de 2000 pixeles en el area
             contourArea.append(
                 cv.ContourArea(cs)
             )  #Se agrega contourArea con el area de contorno mas reciente
             m = cv.Moments(
                 cs)  #Encuentra todos los momentos del contorno filtrado
             try:
                 m10 = int(cv.GetSpatialMoment(m, 1,
                                               0))  #Momento espacial m10
                 m00 = int(cv.GetSpatialMoment(m, 0,
                                               0))  #Momento espacial m00
                 m01 = int(cv.GetSpatialMoment(m, 0,
                                               1))  #Momento espacial m01
                 centroid.append(
                     (int(m10 / m00), int(m01 / m00))
                 )  #Aniade la lista de centroides con las coordenadas mas nuevas del centro de gravedad del contorno
                 convexHull = cv.ConvexHull2(
                     cs, cv.CreateMemStorage(), return_points=True
                 )  #Encuentra el casco convexo de cs en el tipo CvSeq
                 cHullArea.append(
                     cv.ContourArea(convexHull)
                 )  #Agrega el area del casco convexo a la lista cHullArea
                 cHull.append(
                     list(convexHull)
                 )  #Agrega la lista del casco convexo a la lista de cHull
                 contours.append(
                     list(cs)
                 )  #Agrega la forma de lista del contorno a la lista de contornos
                 counter += 1  #Agrega al contador para ver cuantos blobs hay
             except:
                 pass
         cs = cs.h_next()  #Pasa al siguiente contorno en cs CvSeq
     """
     A continuacion, las variables se convierten en campos para hacer referencias posteriores
     """
     self.centroid = centroid
     self.counter = counter
     self.cHull = cHull
     self.contours = contours
     self.cHullArea = cHullArea
     self.contourArea = contourArea
示例#5
0
def extract_circles(contours, rgb):
    global current_pose_editor, side
    circles = []
    for i in contours:
        moments = cv.Moments(cv.fromarray(i), binary=1)
        area = cv.GetCentralMoment(moments, 0, 0)

        if area > OBJECT_AREA:
            x = int(cv.GetSpatialMoment(moments, 1, 0) / area)
            y = int(cv.GetSpatialMoment(moments, 0, 1) / area)
            radius = int(math.sqrt(area / math.pi))
            circles.append((x, y, int(radius * 1.5)))
            if (y > 100):
                adjust_carrot(x, y, area)
                if (side == 'left' and x < 420):
                    adjust_carrot(x, y, area)  #use just visual servo
                elif (side == 'right' and x > 420):
                    adjust_carrot(x, y, area)  #use just visual servo
            '''
            point = check_lidar((x,y),radius)               #use if you want to use lidar to confirm waypoint
            if (point[0]):
                print 'going to:',point[1]
                waypoint.send_goal_and_wait(current_pose_editor.relative(np.array([point[1][0], point[1][1] + .5, 0])).as_MoveToGoal(speed = .5))
                circles.append((x,y,int(radius*1.5)))
            '''
    return circles
示例#6
0
    def getStandardizedRects(self):
        '''
        @return: the boxes centered on the target center of mass +- n_sigma*std
        @note: You must call detect() before getStandardizedRects() to see updated results.
        '''
        #create a list of the top-level contours found in the contours (cv.Seq) structure
        rects = []
        if len(self._contours) < 1: return (rects)
        seq = self._contours
        while not (seq == None):
            (x, y, w, h) = cv.BoundingRect(seq)
            if (cv.ContourArea(seq) >
                    self._minArea):  # and  self._filter(rect)
                r = pv.Rect(x, y, w, h)
                moments = cv.Moments(seq)
                m_0_0 = cv.GetSpatialMoment(moments, 0, 0)
                m_0_1 = cv.GetSpatialMoment(moments, 0, 1)
                m_1_0 = cv.GetSpatialMoment(moments, 1, 0)
                mu_2_0 = cv.GetCentralMoment(moments, 2, 0)
                mu_0_2 = cv.GetCentralMoment(moments, 0, 2)

                cx = m_1_0 / m_0_0
                cy = m_0_1 / m_0_0
                w = 2.0 * self._rect_sigma * np.sqrt(mu_2_0 / m_0_0)
                h = 2.0 * self._rect_sigma * np.sqrt(mu_0_2 / m_0_0)

                r = pv.CenteredRect(cx, cy, w, h)

                rects.append(r)
            seq = seq.h_next()

        if self._filter != None:
            rects = self._filter(rects)

        return rects
    def angle(self, img):
	# extract position of red blue yellow markers
	# find distance between pairs
	# return angle from inverse cosine
	
	imgHSV = cv.CreateImage(cv.GetSize(img), 8, 3)
	cv.CvtColor(img, imgHSV, cv.CV_BGR2HSV)
	cv.NamedWindow("red", cv.CV_WINDOW_AUTOSIZE)
	cv.MoveWindow("red", 800, 0)
	cv.NamedWindow("blue", cv.CV_WINDOW_AUTOSIZE)
	cv.MoveWindow("blue", 800, 100)
	cv.NamedWindow("yellow", cv.CV_WINDOW_AUTOSIZE)
	cv.MoveWindow("yellow", 800, 200)
	
	dot_coords = []
	# use the corresponding thresholds for each color of marker #
	for h_low, h_high, col in [self.red_hues, self.yellow_hues, self.blue_hues]:
	    imgThresh = cv.CreateImage(cv.GetSize(img), 8, 1)
	    cv.InRangeS(imgHSV, cv.Scalar(h_low, 70, 70), cv.Scalar(h_high, 255, 255), imgThresh)
 	    moments = cv.Moments(cv.GetMat(imgThresh))
	    x_mov = cv.GetSpatialMoment(moments, 1, 0)
	    y_mov = cv.GetSpatialMoment(moments, 0, 1)
	    area = cv.GetCentralMoment(moments, 0, 0)
            small_thresh = cv.CreateImage((self.fit_camera_width, self.fit_camera_height), 8, 1)
	    cv.Resize(imgThresh, small_thresh)

	    if col == "r":
		cv.ShowImage("red", small_thresh)
	    elif col == "b":
		cv.ShowImage("blue", small_thresh)
	    elif col == "y":
		cv.ShowImage("yellow", small_thresh) 
	    if area > 0:
		posX = float(x_mov)/float(area)
	    	posY = float(y_mov)/float(area)
	    else:
		posX = 0
		posY = 0
	    dot_coords.append([posX, posY])	 
	
	r = dot_coords[0]
	y = dot_coords[1]
	b = dot_coords[2]
	# get side lengths
	y_r = self.dist(r[0], r[1], y[0], y[1])
	r_b = self.dist(b[0], b[1], r[0], r[1])
	y_b = self.dist(b[0], b[1], y[0], y[1])
	# apply law of cosines
	angle_in_rads = math.pow(y_r, 2) + math.pow(r_b, 2) - math.pow(y_b, 2)
	denom = 2.0 * y_r * r_b
	if denom > 0:
	     angle_in_rads /= 2.0 * y_r * r_b
	else:
	     angle_in_rads = 0
	rads = math.acos(angle_in_rads)
	# convert to degrees
	degs = rads * float(180.0 / math.pi)
	if degs < 0 or degs > 360: 
	     degs = 0
	return degs	
示例#8
0
def detect_and_draw(img):
    t1 = time.time()

    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # blur the source image to reduce color noise
    cv.Smooth(img, img, cv.CV_BLUR, 3)
    hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3)
    cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)
    thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
    #cv.InRangeS(hsv_img, (120, 80, 80), (140, 255, 255), thresholded_img)

    # White
    sensitivity = 15
    cv.InRangeS(hsv_img, (0, 0, 255 - sensitivity), (255, sensitivity, 255),
                thresholded_img)

    # Red
    #cv.InRangeS(hsv_img, (0, 150, 0), (5, 255, 255), thresholded_img)

    # Blue
    #cv.InRangeS(hsv_img, (100, 50, 50), (140, 255, 255), thresholded_img)

    # Green
    #cv.InRangeS(hsv_img, (40, 50, 50), (80, 255, 255), thresholded_img)

    mat = cv.GetMat(thresholded_img)
    moments = cv.Moments(mat, 0)
    area = cv.GetCentralMoment(moments, 0, 0)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (area > 5000):
        #determine the x and y coordinates of the center of the object
        #we are tracking by dividing the 1, 0 and 0, 1 moments by the area
        x = cv.GetSpatialMoment(moments, 1, 0) / area
        y = cv.GetSpatialMoment(moments, 0, 1) / area
        x = int(round(x))
        y = int(round(y))

        #create an overlay to mark the center of the tracked object
        overlay = cv.CreateImage(cv.GetSize(img), 8, 3)

        cv.Circle(overlay, (x, y), 2, (0, 0, 0), 20)
        cv.Add(img, overlay, img)
        #add the thresholded image back to the img so we can see what was
        #left after it was applied
        #cv.Merge(thresholded_img, None, None, None, img)
        t2 = time.time()
        message = "Color tracked!"
        print "detection time = %gs x=%d,y=%d" % (round(t2 - t1, 3), x, y)

    cv.ShowImage("Color detection", img)
示例#9
0
def yellow(img):
    global yx
    global yy
    global bx
    global by

    #blur the source image to reduce color noise
    cv.Smooth(img, img, cv.CV_BLUR, 3)

    #convert the image to hsv(Hue, Saturation, Value) so its
    #easier to determine the color to track(hue)
    hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3)
    cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)

    #limit all pixels that don't match our criteria, in this case we are
    #looking for purple but if you want you can adjust the first value in
    #both turples which is the hue range(120,140).  OpenCV uses 0-180 as
    #a hue range for the HSV color model
    thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)

    cv.InRangeS(hsv_img, (20, 100, 100), (30, 255, 255),
                thresholded_img)  #yellow
    #determine the objects moments and check that the area is large
    #enough to be our object
    mat = cv.GetMat(thresholded_img)
    moments = cv.Moments(mat, 0)
    area = cv.GetCentralMoment(moments, 0, 0)

    #there can be noise in the video so ignore objects with small areas
    if (area > 100000):
        #determine the x and y coordinates of the center of the object
        #we are tracking by dividing the 1, 0 and 0, 1 moments by the area

        x = cv.GetSpatialMoment(moments, 1, 0) / area
        y = cv.GetSpatialMoment(moments, 0, 1) / area
        x = int(x)
        y = int(y)
        yx = x
        yy = y
        cv.Circle(img, (x, y), 5, (0, 0, 0), -1)

    cv.InRangeS(hsv_img, (100, 80, 80), (120, 255, 255),
                thresholded_img)  #pink

    #determine the objects moments and check that the area is large
    #enough to be our object
    mat = cv.GetMat(thresholded_img)
    moments = cv.Moments(mat, 0)
    area = cv.GetCentralMoment(moments, 0, 0)

    #there can be noise in the video so ignore objects with small areas
    if (area > 100):
        x = cv.GetSpatialMoment(moments, 1, 0) / area
        y = cv.GetSpatialMoment(moments, 0, 1) / area
        x = int(x)
        y = int(y)
        bx = x
        by = y
        cv.Circle(img, (x, y), 5, (0, 0, 255), -1)
示例#10
0
 def __init__(
         self, BW
 ):  #Constructor. BW is a binary image in the form of a numpy array
     self.BW = BW
     cs = cv.FindContours(cv.fromarray(self.BW.astype(np.uint8)),
                          cv.CreateMemStorage(),
                          mode=cv.CV_RETR_EXTERNAL)  #Finds the contours
     counter = 0
     """
     These are dynamic lists used to store variables
     """
     centroid = list()
     cHull = list()
     contours = list()
     cHullArea = list()
     contourArea = list()
     while cs:  #Iterate through the CvSeq, cs.
         if abs(
                 cv.ContourArea(cs)
         ) > 2000:  #Filters out contours smaller than 2000 pixels in area
             contourArea.append(cv.ContourArea(
                 cs))  #Appends contourArea with newest contour area
             m = cv.Moments(
                 cs)  #Finds all of the moments of the filtered contour
             try:
                 m10 = int(cv.GetSpatialMoment(m, 1,
                                               0))  #Spatial moment m10
                 m00 = int(cv.GetSpatialMoment(m, 0,
                                               0))  #Spatial moment m00
                 m01 = int(cv.GetSpatialMoment(m, 0,
                                               1))  #Spatial moment m01
                 centroid.append(
                     (int(m10 / m00), int(m01 / m00))
                 )  #Appends centroid list with newest coordinates of centroid of contour
                 convexHull = cv.ConvexHull2(
                     cs, cv.CreateMemStorage(), return_points=True
                 )  #Finds the convex hull of cs in type CvSeq
                 cHullArea.append(
                     cv.ContourArea(convexHull)
                 )  #Adds the area of the convex hull to cHullArea list
                 cHull.append(
                     list(convexHull)
                 )  #Adds the list form of the convex hull to cHull list
                 contours.append(
                     list(cs)
                 )  #Adds the list form of the contour to contours list
                 counter += 1  #Adds to the counter to see how many blobs are there
             except:
                 pass
         cs = cs.h_next()  #Goes to next contour in cs CvSeq
     """
     Below the variables are made into fields for referencing later
     """
     self.centroid = centroid
     self.counter = counter
     self.cHull = cHull
     self.contours = contours
     self.cHullArea = cHullArea
     self.contourArea = contourArea
示例#11
0
def center_of_mass(contour):
    moment = cv.Moments(contour)
    mass = cv.GetSpatialMoment(moment, 0, 0)
    mx = cv.GetSpatialMoment(moment, 1, 0)
    my = cv.GetSpatialMoment(moment, 0, 1)
    X = mx / mass
    Y = my / mass
    return X, Y
示例#12
0
    def computeMoment(self, contour):
        moments = cv.Moments(contour, 1)

        area = cv.GetSpatialMoment(moments, 0, 0)
        if area == 0: return
        x = cv.GetSpatialMoment(moments, 1, 0)
        y = cv.GetSpatialMoment(moments, 0, 1)
        return x, y, area
示例#13
0
def get_contour_center(moments):
    spatial_moment10 = cv.GetSpatialMoment(moments, 1, 0)
    spatial_moment01 = cv.GetSpatialMoment(moments, 0, 1)
    area = abs(cv.GetCentralMoment(moments, 0, 0))

    # Ensuring that threre is no division by zero.
    # PLEASE DO NOT TOUCH THIS, DO NOT TRY TO AVOID 0 DIVISION BY ADDING
    # A VALUE TO AREA BELOW, BECAUSE IT WOULD FAIL IN SOME CASES
    area = area or 0.01
    return (spatial_moment10 / area, spatial_moment01 / area)
示例#14
0
def contourCenter(thisContour, smoothness=4): 
    positions_x, positions_y = [0] * smoothness, [0] * smoothness 
    if cv.ContourArea(thisContour) > 2.0:
        moments = cv.Moments(thisContour, 1)
        positions_x.append(cv.GetSpatialMoment(moments, 1, 0) / cv.GetSpatialMoment(moments, 0, 0))
        positions_y.append(cv.GetSpatialMoment(moments, 0, 1) / cv.GetSpatialMoment(moments, 0, 0))
        positions_x, positions_y = positions_x[-smoothness:], positions_y[-smoothness:]
        pos_x = (sum(positions_x) / len(positions_x))
        pos_y = (sum(positions_y) / len(positions_y))
        return (int(pos_x * smoothness), int(pos_y * smoothness))
示例#15
0
def main():
    color_tracker_window = "output"
    thresh_window = "thresh"
    capture = cv.CaptureFromCAM(-1)
    cv.NamedWindow(color_tracker_window, 1)
    cv.NamedWindow(thresh_window, 1)
    imgScrible = None
    global posX
    global posY

    fido.init_servos()

    while True:
        frame = cv.QueryFrame(capture)
        cv.Smooth(frame, frame, cv.CV_BLUR, 3)

        if (imgScrible is None):
            imgScrible = cv.CreateImage(cv.GetSize(frame), 8, 3)

        imgThresh = GetThresholdedImage(frame)

        mat = cv.GetMat(imgThresh)
        #Calculating the moments
        moments = cv.Moments(mat, 0)
        area = cv.GetCentralMoment(moments, 0, 0)
        moment10 = cv.GetSpatialMoment(moments, 1, 0)
        moment01 = cv.GetSpatialMoment(moments, 0, 1)

        #lastX and lastY stores the previous positions
        lastX = posX
        lastY = posY
        #Finding a big enough blob
        if (area > 100000):

            #Calculating the coordinate postition of the centroid
            posX = int(moment10 / area)
            posY = int(moment01 / area)

            print 'x: ' + str(posX) + ' y: ' + str(posY) + ' area: ' + str(
                area)
            #drawing lines to track the movement of the blob
            if (lastX > 0 and lastY > 0 and posX > 0 and posY > 0):
                cv.Line(imgScrible, (posX, posY), (lastX, lastY),
                        cv.Scalar(0, 255, 255), 5)
            #Adds the three layers and stores it in the frame
            #frame -> it has the camera stream
            #imgScrible -> it has the line tracking the movement of the blob
            cv.Add(frame, imgScrible, frame)

        cv.ShowImage(thresh_window, imgThresh)
        cv.ShowImage(color_tracker_window, frame)
        c = cv.WaitKey(10)
        if (c != -1):
            break
示例#16
0
 def run(self): 
     while True: 
         img = cv.QueryFrame( self.capture ) 
                     
         #blur the source image to reduce color noise 
         cv.Smooth(img, img, cv.CV_BLUR, 3); 
         
         #convert the image to hsv(Hue, Saturation, Value) so its  
         #easier to determine the color to track(hue) 
         hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3) 
         cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV) 
         
         #limit all pixels that don't match our criteria, in this case we are  
         #looking for purple but if you want you can adjust the first value in  
         #both turples which is the hue range(120,140).  OpenCV uses 0-180 as  
         #a hue range for the HSV color model 
         thresholded_img =  cv.CreateImage(cv.GetSize(hsv_img), 8, 1) 
         cv.InRangeS(hsv_img, (115, 75, 75), (135, 255, 255), thresholded_img) 
         
         #determine the objects moments and check that the area is large  
         #enough to be our object 
         thresholded_img2 = cv.GetMat(thresholded_img)
         moments = cv.Moments(thresholded_img2,0) 
         area = cv.GetCentralMoment(moments, 0, 0) 
         
         #there can be noise in the video so ignore objects with small areas 
         if(area > 100000): 
             #determine the x and y coordinates of the center of the object 
             #we are tracking by dividing the 1, 0 and 0, 1 moments by the area 
             x = cv.GetSpatialMoment(moments, 1, 0)/area 
             y = cv.GetSpatialMoment(moments, 0, 1)/area 
         
             # print 'x: ' + str(x) + ' y: ' + str(y) + ' area: ' + str(area) 
             
             
             x = int(x)
             y = int(y)
             
             #create an overlay to mark the center of the tracked object 
             overlay = cv.CreateImage(cv.GetSize(img), 8, 3) 
             
             cv.Circle(overlay, (x, y), 2, (255, 255, 255), 20) 
             cv.Add(img, overlay, img) 
             #add the thresholded image back to the img so we can see what was  
             #left after it was applied 
             cv.Merge(thresholded_img, None, None, None, img) 
          
         #display the image  
         cv.ShowImage(color_tracker_window, img) 
         
         if cv.WaitKey(10) == 27: 
             break 
示例#17
0
    def _get_pos_spatial(self, th_img):
	print "Getting spacial position (?)"
        moments = cv.Moments(cv.GetMat(th_img))
        mom10 = cv.GetSpatialMoment(moments, 1, 0)
        mom01 = cv.GetSpatialMoment(moments, 0, 1)
        area = cv.GetCentralMoment(moments, 0, 0)

        if area > 10:
            pos = [int(mom10/area), int(mom01/area)]
        else:
            pos = None

        return pos
示例#18
0
def find_orientation(mask, center_point):
    cv.Circle(mask, center_point, 19, cv.RGB(0, 0, 0), -1)

    moments = cv.Moments(mask, 1)
    M00 = cv.GetSpatialMoment(moments, 0, 0)
    M10 = cv.GetSpatialMoment(moments, 1, 0)
    M01 = cv.GetSpatialMoment(moments, 0, 1)

    if M00 == 0:
        M00 = 0.01

    center_of_mass = (round(M10 / M00), round(M01 / M00))

    return (int(calculate_bearing(center_of_mass, center_point)) - 180) % 360
示例#19
0
def main():
    s = scratch.Scratch()
    capture = cv.CaptureFromCAM(0)
    cv.NamedWindow("Track", 1)

    while True:
        #capture frame
        frame_o = cv.QueryFrame(capture)
        frame = cv.CreateImage((frame_o.width * 3 / 8, frame_o.height * 3 / 8),
                               frame_o.depth, frame_o.nChannels)
        cv.Resize(frame_o, frame)
        cv.Smooth(frame, frame, cv.CV_GAUSSIAN, 3, 3)

        #Convert to HSV
        imgHSV = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.CvtColor(frame, imgHSV, cv.CV_BGR2HSV)

        #Thresh
        imgThreshed = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.InRangeS(imgHSV, cv.Scalar(0, 124, 221), cv.Scalar(10, 255, 256),
                    imgThreshed)
        cv.Smooth(imgThreshed, imgThreshed, cv.CV_GAUSSIAN, 3, 3)

        mat = cv.GetMat(imgThreshed)
        moments = cv.Moments(mat)

        moment10 = cv.GetSpatialMoment(moments, 1, 0)
        moment01 = cv.GetSpatialMoment(moments, 0, 1)
        area = cv.GetCentralMoment(moments, 0, 0)

        if area > 1000:
            posX = int(moment10 / area)
            posY = int(moment01 / area)

            if posX >= 0 and posY >= 0:
                print("X: " + str(posX) + ", Y: " + str(posY))
                cv.Rectangle(frame, (posX - 10, posY - 10),
                             (posX + 10, posY + 10), cv.RGB(0, 255, 0))
                s.sensorupdate({'X': posX})
                s.sensorupdate({'Y': posY})

        cv.ShowImage("Track", frame)
        k = cv.WaitKey(70)
        if k % 0x100 == 27:
            break
示例#20
0
def extract_circles(contours, rgb):
    circles = []
    for i in contours:
        moments = cv.Moments(cv.fromarray(i), binary=1)
        area = cv.GetCentralMoment(moments, 0, 0)

        if area > OBJECT_AREA:
            x = int(cv.GetSpatialMoment(moments, 1, 0) / area)
            y = int(cv.GetSpatialMoment(moments, 0, 1) / area)
            radius = int(math.sqrt(area / math.pi))

            point = check_lidar((x, y), radius)
            if (point[0]):
                circles.append((x, y, int(radius * 1.5)))
                append_marker(point[1], rgb)
                global new_buoy
                new_buoy = True
    return circles
示例#21
0
    def __init__(
            self, BW
    ):  #Constructor. BW is a binary image in the form of a numpy array
        self.BW = BW
        cs = cv.FindContours(cv.fromarray(self.BW.astype(np.uint8)),
                             cv.CreateMemStorage(),
                             mode=cv.CV_RETR_EXTERNAL)  #Finds the contours
        counter = 0

        centroid = list()
        cHull = list()
        contours = list()
        cHullArea = list()
        contourArea = list()
        while cs:  #Iterate through the CvSeq, cs.
            if abs(cv.ContourArea(cs)) > 2000:
                contourArea.append(cv.ContourArea(cs))
                m = cv.Moments(cs)
                try:
                    m10 = int(cv.GetSpatialMoment(m, 1,
                                                  0))  #Spatial moment m10
                    m00 = int(cv.GetSpatialMoment(m, 0,
                                                  0))  #Spatial moment m00
                    m01 = int(cv.GetSpatialMoment(m, 0,
                                                  1))  #Spatial moment m01
                    centroid.append((int(m10 / m00), int(m01 / m00)))
                    convexHull = cv.ConvexHull2(cs,
                                                cv.CreateMemStorage(),
                                                return_points=True)
                    cHullArea.append(cv.ContourArea(convexHull))
                    cHull.append(list(convexHull))
                    contours.append(list(cs))
                    counter += 1
                except:
                    pass
            cs = cs.h_next()

        self.centroid = centroid
        self.counter = counter
        self.cHull = cHull
        self.contours = contours
        self.cHullArea = cHullArea
        self.contourArea = contourArea
示例#22
0
def main():
    pr_window = "imagen"
    capture = cv.CaptureFromCAM(-1)
    cv.NamedWindow(pr_window, 1)

    #    seteo tamanio de la ventana |-| comentar cuando no se necesite mostrar ventana
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, config.ancho)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, config.alto)
    delay = 0
    while True:
        if (not (delay == 20)):
            delay += 1
            img = cv.QueryFrame(capture)
            #cv.ReleaseCapture( img )
        else:
            delay = 0
            frame = cv.QueryFrame(capture)
            maskN = cv.CreateImage(cv.GetSize(frame), 8, 1)
            hsvN = cv.CloneImage(frame)

            cv.Smooth(frame, frame, cv.CV_BLUR, 3)

            cv.CvtColor(frame, hsvN, cv.CV_BGR2HSV)
            cv.InRangeS(hsvN, config.min_range, config.max_range, maskN)

            moment = cv.Moments(cv.GetMat(maskN), 0)
            a = cv.GetCentralMoment(moment, 0, 0)

            if a > config.min_area:
                X = int(cv.GetSpatialMoment(moment, 1, 0) / a)
                print "X: " + str(X)
                print "min: " + str(config.min_x)
                print "max: " + str(config.max_x)
                #Y = int(cv.GetSpatialMoment (moment, 0, 1) / a)
                if X > config.max_x:
                    print "derecha"
                elif X < config.min_x:
                    print "izquierda"
                else:
                    print "centrado"
            else:
                print "objeto no detectado o muy pequeno"

            cv.ShowImage(pr_window, maskN)


#        descomentar para debug
#        X = int(cv.GetSpatialMoment (moment, 1, 0) / a)
#        print 'x: ' + str (X)  + ' area: ' + str (a)

# Con esto corto y salgo
#        if cv.WaitKey (100) != -1:
#            break

    return
示例#23
0
 def getPupil(frame):
   pupilImg = cv.CreateImage(cv.GetSize(frame), 8, 1)
   cv.InRangeS(frame, (30, 30, 30), (80, 80, 80), pupilImg)
   contours = cv.FindContours(pupilImg, cv.CreateMemStorage(0), mode=cv.CV_RETR_EXTERNAL)
   del pupilImg
   pupilImg = cv.CloneImage(frame)
   while contours:
       moments = cv.Moments(contours)
       area = cv.GetCentralMoment(moments, 0, 0)
       if (area > 50):
           pupilArea = area
           x = cv.GetSpatialMoment(moments, 1, 0) / area
           y = cv.GetSpatialMoment(moments, 0, 1) / area
           pupil = contours
           global centroid
           centroid = (int(x), int(y))
           cv.DrawContours(pupilImg, pupil, (0, 0, 0), (0, 0, 0), 2, cv.CV_FILLED)
           break
       contours = contours.h_next()
   return (pupilImg)
示例#24
0
def has_intersection(amap, apoly, maxwidth, maxheight):
    polymap = cv.CreateImage((maxwidth,
                         maxheight),
                         cv.IPL_DEPTH_8U,1)
    cv.FillPoly(polymap, [apoly], im.color.blue)
    intersection = cv.CreateImage((maxwidth,
                         maxheight),
                         cv.IPL_DEPTH_8U,1)
    cv.And(polymap, amap, intersection)
    m=cv.Moments(cv.GetMat(intersection), True)
    return bool(cv.GetSpatialMoment(m, 0, 0))
示例#25
0
def extract_circles(contours, rgb):
    circles = []
    global shot
    max_ = OBJECT_AREA
    for i in contours:
        moments = cv.Moments(cv.fromarray(i), binary=1)
        area = cv.GetCentralMoment(moments, 0, 0)

        if area > max_:
            max_ = area
            best = [moments, area]
    try:
        x = int(cv.GetSpatialMoment(best[0], 1, 0) / best[1])
        y = int(cv.GetSpatialMoment(best[0], 0, 1) / best[1])
        radius = int(math.sqrt(best[1] / math.pi))
        if (shot == False):
            circles.append((x, y, int(radius)))
            adjust_carrot(x, y)
    except UnboundLocalError:
        print "not found"
    return circles
示例#26
0
    def track_blobs(self, frame):
        spare = cv.CloneImage(frame)
        size = cv.GetSize(frame)

        hsv = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        out = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        thresh = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)

        print self.min_hue, self.value_dict['min_hue']

        cv.Smooth(spare, spare, cv.CV_BLUR, 22, 22)
        cv.CvtColor(spare, hsv, cv.CV_BGR2HSV)

        cv.InRangeS(hsv, cv.Scalar(self.min_hue, self.min_sat, self.min_val),
                    cv.Scalar(self.max_hue, self.max_sat, self.max_val),
                    thresh)

        cv.Merge(thresh, thresh, thresh, None, out)
        contours = cv.FindContours(thresh, self.storage, cv.CV_RETR_LIST,
                                   cv.CV_CHAIN_APPROX_SIMPLE)

        try:
            M = cv.Moments(contours)
        except:
            return out

        m0 = cv.GetCentralMoment(M, 0, 0)

        if m0 > 1.0:
            self.cx = cv.GetSpatialMoment(M, 1, 0) / m0
            self.cy = cv.GetSpatialMoment(M, 0, 1) / m0
            cv.Circle(frame, (int(self.cx), int(self.cy)), 2, (255, 0, 0), 20)
        if self.show_frame is not True:
            return out
        else:
            return frame

        pass
示例#27
0
def main ():
    capture = cv.CaptureFromCAM (-1)
    cv.SetCaptureProperty (capture, cv.CV_CAP_PROP_FRAME_WIDTH, 160)
    cv.SetCaptureProperty (capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 120)
    fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)

    while True:
        frame = cv.QueryFrame (capture)
        maskN = cv.CreateImage (cv.GetSize (frame), 8, 1)
        hsvN = cv.CloneImage (frame)

        cv.Smooth (frame, frame, cv.CV_BLUR, 3)

        cv.CvtColor (frame, hsvN, cv.CV_BGR2HSV)
        cv.InRangeS (hsvN, config.min_range, config.max_range, maskN)

        moment = cv.Moments (cv.GetMat (maskN), 0)
        a = cv.GetCentralMoment (moment, 0, 0)

        if a > config.min_area:
            X = int(cv.GetSpatialMoment (moment, 1, 0) / a)
            print "area: " + str (a)
            print "X: " + str (X)
            #Y = int(cv.GetSpatialMoment (moment, 0, 1) / a)
            if X > config.max_x:
                print "derecha"
            elif X < config.min_x:
                print "izquierda"
            else:
                print "centrado"
        else:
            print "objeto no detectado o muy pequeno"

        # Con esto corto y salgo
        if cv.WaitKey (int(1000/fps)) != -1:
            break
 
    return;
示例#28
0
    def action(self):
        if (not (self.delay == 5)):
            self.delay += 1
        else:
            self.delay = 0
            frame = cv.QueryFrame(self.capture)
            maskN = cv.CreateImage(cv.GetSize(frame), 8, 1)
            hsvN = cv.CloneImage(frame)

            cv.Smooth(frame, frame, cv.CV_BLUR, 3)

            cv.CvtColor(frame, hsvN, cv.CV_BGR2HSV)
            cv.InRangeS(hsvN, config.min_range, config.max_range, maskN)

            moment = cv.Moments(cv.GetMat(maskN), 0)
            a = cv.GetCentralMoment(moment, 0, 0)
            if a == 0:
                a = 1

            self.data.write('Camara::area', a)
            self.data.write('Camara::lata_x',
                            int(cv.GetSpatialMoment(moment, 1, 0) / a))

            cv.WaitKey(10)
示例#29
0
def main():

    # Initialize capturing live feed from the camera
    capture = 0
    capture = cv.CaptureFromCAM(0)

    # Couldn't get a device? Throw an error and quit
    if (not capture):
        print "Could not initialize capturing...\n"
        return -1

    # The two windows we'll be using
    cv.NamedWindow("video")
    cv.NamedWindow("thresh")

    # This image holds the "scribble" data...
    imgScribble = 0

    # a flag which indicates a valid mouse click
    clicked = 0
    clicked1 = 0

    # to held previous co-ordinate values.
    prevXred = 0
    prevYred = 0
    prevXyellow = 0
    prevYyellow = 0
    prevXblue = 0
    prevYblue = 0

    #initialising fake motioning
    import ctypes
    c = ctypes.CDLL("libhelper.so.1")
    libc = ctypes.CDLL("libc.so.6")
    c.helper_init()

    #to held current co-ordinate values
    Xred = -1
    Yred = -1
    Xyellow = -1
    Yyellow = -1
    Xblue = -1
    Yblue = -1

    # An infinite loop
    while (True):
        #----------------------------------------------------------------------------------------

        # Will hold a frame captured from the camera
        frame = 0
        frame = cv.QueryFrame(capture)

        # If we couldn't grab a frame... quit
        if (not frame):
            break

        # If this is the first frame, we need to initialize it
        if (imgScribble == 0):
            imgScribble = cv.CreateImage(cv.GetSize(frame), 8, 3)

        # representative co-ordinates of RED & YELLOW finger-tips, initialized.
        dx = 0
        dy = 0
        dz = 0

        #----------------------------------------------------------------------------------------

        # Holds the RED thresholded image (red = white, rest = black)
        imgRedThresh = GetRedThresholded(frame)
        moments = 0
        # Calculate the moments to estimate the position of RED finger-tip
        moments = cv.Moments(imgRedThresh)

        # The actual moment values
        moment01 = cv.GetSpatialMoment(moments, 0, 1)
        moment10 = cv.GetSpatialMoment(moments, 1, 0)
        area = cv.GetSpatialMoment(moments, 0, 0)

        if (area == 0):
            continue

        prevXred = Xred
        prevYred = Yred
        if area:
            Xred = moment10 / area
            Yred = moment01 / area

        # Print it out for debugging purposes
        #print "position "+ str(Xred)+' '+ str(Yred)+'\n'

        #----------------------------------------------------------------------------------------

        # Holds the BLUE thresholded image (red = white, rest = black)
        imgBlueThresh = GetBlueThresholded(frame)
        moments = 0
        # Calculate the moments to estimate the position of RED finger-tip
        moments = cv.Moments(imgBlueThresh)

        # The actual moment values
        moment01 = cv.GetSpatialMoment(moments, 0, 1)
        moment10 = cv.GetSpatialMoment(moments, 1, 0)
        area = cv.GetSpatialMoment(moments, 0, 0)

        if (area == 0):
            continue

        prevXblue = Xblue
        prevYblue = Yblue
        if area:
            Xblue = moment10 / area
            Yblue = moment01 / area

        # Print it out for debugging purposes
        #print "position "+ str(Xred)+' '+ str(Yred)+'\n'

        #----------------------------------------------------------------------------------------

        # Holds the YELLOW thresholded image (yellow = white, rest = black)
        imgYellowThresh = GetYellowThresholded(frame)
        moments = 0
        # Calculate the moments to estimate the position of YELLOW finger-tip
        moments = cv.Moments(imgYellowThresh)

        # The actual moment values
        moment01 = cv.GetSpatialMoment(moments, 0, 1)
        moment10 = cv.GetSpatialMoment(moments, 1, 0)
        area = cv.GetSpatialMoment(moments, 0, 0)

        if (area == 0): continue

        prevXyellow = Xyellow
        prevYyellow = Yyellow
        Xyellow = moment10 / area
        Yyellow = moment01 / area

        #dx = (Xyellow - prevXyellow)
        #dy = (Yyellow - prevYyellow)

        #x=1390-Xyellow*2.125
        #y=Yyellow*1.575

        Xt = 1390 - (abs(Xred + Xyellow) * 1.125)
        Yt = (abs(Yred + Yyellow) * 0.787)

        libc.usleep(150)
        c.helper_mov_absxy(int(Xt), int(Yt))
        #c.helper_mov_relxy(int(dx),int(dy))

        # Print it out for debugging purposes
        #print "position "+ str(Xyellow)+' '+ str(Yyellow)

        #----------------------------------------------------------------------------------------

        # find distance between RED & YELLOW finger-tips
        # for faster calculation, individually Xdiff, Ydiff
        # considered, instead of sqrt((x2-x1)^2 + (y2-y2)^2)

        Xdiff = abs(Xred - Xyellow)
        Ydiff = abs(Yred - Yyellow)

        Xdiff1 = abs(Xred - Xblue)
        Ydiff1 = abs(Yred - Yblue)

        # determine the 'clicked' state, using approximation to circle method.
        d = 50
        #if((21*Xdiff + 50*Ydiff <= 50*d) and (50*Xdiff + 21*Ydiff <= 50*d)):
        if (Xdiff * Xdiff + Ydiff * Ydiff < d * d):
            #if(Xdiff < 25 and Ydiff < 25):
            if (not clicked):
                clicked = 1
                print "left clicked"

        else:
            if (clicked):
                clicked = 0
                c.helper_release(1)

        if clicked:
            c.helper_press(1)

        d = 45
        #if((21*Xdiff + 50*Ydiff <= 50*d) and (50*Xdiff + 21*Ydiff <= 50*d)):
        if (Xdiff1 * Xdiff1 + Ydiff1 * Ydiff1 < d * d):
            #if(Xdiff < 25 and Ydiff < 25):
            if (not clicked1):
                clicked1 = 1
                print "right clicked"

        else:
            if (clicked1):
                clicked1 = 0

        if clicked1:
            c.helper_press(3)
            c.helper_release(3)

    #----------------------------------------------------------------------------------------


#		# We want to draw a line only if its a valid position
#		if(lastX>0 and lastY>0 and posX>0 and posY>0):
#
#			# Draw a yellow line from the previous point to the current point
#			cv.Line(imgScribble, cv.Point(posX, posY), cv.Point(lastX, lastY), cv.Scalar(0,255,255), 5)
#
#
#		# Add the scribbling image and the frame... and we get a combination of the two
#		cv.Add(frame, imgScribble, frame)
#cv.ShowImage("thresh", imgYellowThresh)
#cv.ShowImage("video", frame)

#----------------------------------------------------------------------------------------

# Wait for a keypress
        cin = cv.WaitKey(10)
        if not (cin == -1):
            # If pressed, break out of the loop
            break

        # Release the thresholded image... we need no memory leaks.. please

        #----------------------------------------------------------------------------------------

    # We're done using the camera. Other applications can now use it
    return 0
示例#30
0
def camerainfo():
    # initialize camera feed
    capture = cv.CaptureFromCAM(MY_CAMERA)
    if not capture:
        print "Could not initialize camera feed!"
        exit(1)

    # create display windows
    cv.NamedWindow('camera', cv.CV_WINDOW_AUTOSIZE)
    cv.NamedWindow('threshed', cv.CV_WINDOW_AUTOSIZE)
    cv.MoveWindow('threshed', 400,  0)

    # holds the tracked position of the ball
    image_scribble = None
    # the position of the ball
    pos_x = 0
    pos_y = 0
    last_x = 0
    last_y = 0
# my creation to find good ball position
    listofxpos = []
    listofypos = []
    ballcoordinates = [0,  0]
    count_of_measurements = 0
    # read from the camera
    print "Tracking ball... press any key to quit"
    while 1:    
        image = cv.QueryFrame(capture)
        if not image:
            return 'no image found'

        # if this is the first frame, we need to initialize it
        if not image_scribble:
            image_scribble = cv.CreateImage(cv.GetSize(image), image.depth, 3)

        # get the thresholded image
        image_threshed = thresholded_image(image)

        # finds the contours in our binary image
        contours = cv.FindContours(cv.CloneImage(image_threshed), cv.CreateMemStorage())
        # if there is a ball in the frame
        if len(contours) != 0:
            # calculate the moments to estimate the position of the ball
            moments = cv.Moments(contours, 1)
            moment10 = cv.GetSpatialMoment(moments, 1, 0)
            moment01 = cv.GetSpatialMoment(moments, 0, 1)
            area = cv.GetCentralMoment(moments, 0, 0)

            # if we got a good enough blob
            if area>1:
                last_x = pos_x
                last_y = pos_y
                pos_x = moment10/area
                pos_y = moment01/area

                print("pos=(%s,%s)"%(pos_x,pos_y))

                # draw the tracking line
                if last_x>0 and last_y>0 and pos_x>0 and pos_y>0:
                    pt1 = (int(last_x), int(last_y))
                    pt2 = (int(pos_x), int(pos_y))
                    cv.Line(image_scribble, pt1, pt2, (0, 255, 255), 5)

        # add the scribble to the original frame
        cv.Add(image, image_scribble, image)
        cv.ShowImage('threshed', image_threshed)
        cv.ShowImage('camera', image)

        # my creation to find good ball position
        if last_x != pos_x or last_y != pos_y:
            if count_of_measurements < 4:
                listofxpos.append(pos_x)
                listofypos.append(pos_y)
                count_of_measurements += 1
            elif count_of_measurements >= 4 and count_of_measurements < 7:# add coordinate selection/filter
                listofxpos.append(pos_x)
                listofypos.append(pos_y)
                count_of_measurements += 1
            elif count_of_measurements >= 7:
                ballcoordinates[0] = sum(listofxpos)/count_of_measurements
                ballcoordinates[1] = sum(listofypos)/count_of_measurements
#                if ballcoordinates[0]  > 200 or ballcoordinates[0] <150: 
#                    amount = (175 - ballcoordinates[0])/5
#                    if amount > 20:
#                        turn(20)
#                    else:
#                        turn(amount)
#                else:
#                    go(20) 
                count_of_measurements = 0
                listofxpos = []
                listofypos = []

            last_x = pos_x
            last_y = pos_y
            print count_of_measurements

# break from the loop if there is a key press
        c = cv.WaitKey(10)
        if not c == -1:
            break