Beispiel #1
0
def yellow(img):
    global yx
    global yy
    global bx
    global by

    #blur the source image to reduce color noise
    cv.Smooth(img, img, cv.CV_BLUR, 3)

    #convert the image to hsv(Hue, Saturation, Value) so its
    #easier to determine the color to track(hue)
    hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3)
    cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)

    #limit all pixels that don't match our criteria, in this case we are
    #looking for purple but if you want you can adjust the first value in
    #both turples which is the hue range(120,140).  OpenCV uses 0-180 as
    #a hue range for the HSV color model
    thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)

    cv.InRangeS(hsv_img, (20, 100, 100), (30, 255, 255),
                thresholded_img)  #yellow
    #determine the objects moments and check that the area is large
    #enough to be our object
    mat = cv.GetMat(thresholded_img)
    moments = cv.Moments(mat, 0)
    area = cv.GetCentralMoment(moments, 0, 0)

    #there can be noise in the video so ignore objects with small areas
    if (area > 100000):
        #determine the x and y coordinates of the center of the object
        #we are tracking by dividing the 1, 0 and 0, 1 moments by the area

        x = cv.GetSpatialMoment(moments, 1, 0) / area
        y = cv.GetSpatialMoment(moments, 0, 1) / area
        x = int(x)
        y = int(y)
        yx = x
        yy = y
        cv.Circle(img, (x, y), 5, (0, 0, 0), -1)

    cv.InRangeS(hsv_img, (100, 80, 80), (120, 255, 255),
                thresholded_img)  #pink

    #determine the objects moments and check that the area is large
    #enough to be our object
    mat = cv.GetMat(thresholded_img)
    moments = cv.Moments(mat, 0)
    area = cv.GetCentralMoment(moments, 0, 0)

    #there can be noise in the video so ignore objects with small areas
    if (area > 100):
        x = cv.GetSpatialMoment(moments, 1, 0) / area
        y = cv.GetSpatialMoment(moments, 0, 1) / area
        x = int(x)
        y = int(y)
        bx = x
        by = y
        cv.Circle(img, (x, y), 5, (0, 0, 255), -1)
    def angle(self, img):
	# extract position of red blue yellow markers
	# find distance between pairs
	# return angle from inverse cosine
	
	imgHSV = cv.CreateImage(cv.GetSize(img), 8, 3)
	cv.CvtColor(img, imgHSV, cv.CV_BGR2HSV)
	cv.NamedWindow("red", cv.CV_WINDOW_AUTOSIZE)
	cv.MoveWindow("red", 800, 0)
	cv.NamedWindow("blue", cv.CV_WINDOW_AUTOSIZE)
	cv.MoveWindow("blue", 800, 100)
	cv.NamedWindow("yellow", cv.CV_WINDOW_AUTOSIZE)
	cv.MoveWindow("yellow", 800, 200)
	
	dot_coords = []
	# use the corresponding thresholds for each color of marker #
	for h_low, h_high, col in [self.red_hues, self.yellow_hues, self.blue_hues]:
	    imgThresh = cv.CreateImage(cv.GetSize(img), 8, 1)
	    cv.InRangeS(imgHSV, cv.Scalar(h_low, 70, 70), cv.Scalar(h_high, 255, 255), imgThresh)
 	    moments = cv.Moments(cv.GetMat(imgThresh))
	    x_mov = cv.GetSpatialMoment(moments, 1, 0)
	    y_mov = cv.GetSpatialMoment(moments, 0, 1)
	    area = cv.GetCentralMoment(moments, 0, 0)
            small_thresh = cv.CreateImage((self.fit_camera_width, self.fit_camera_height), 8, 1)
	    cv.Resize(imgThresh, small_thresh)

	    if col == "r":
		cv.ShowImage("red", small_thresh)
	    elif col == "b":
		cv.ShowImage("blue", small_thresh)
	    elif col == "y":
		cv.ShowImage("yellow", small_thresh) 
	    if area > 0:
		posX = float(x_mov)/float(area)
	    	posY = float(y_mov)/float(area)
	    else:
		posX = 0
		posY = 0
	    dot_coords.append([posX, posY])	 
	
	r = dot_coords[0]
	y = dot_coords[1]
	b = dot_coords[2]
	# get side lengths
	y_r = self.dist(r[0], r[1], y[0], y[1])
	r_b = self.dist(b[0], b[1], r[0], r[1])
	y_b = self.dist(b[0], b[1], y[0], y[1])
	# apply law of cosines
	angle_in_rads = math.pow(y_r, 2) + math.pow(r_b, 2) - math.pow(y_b, 2)
	denom = 2.0 * y_r * r_b
	if denom > 0:
	     angle_in_rads /= 2.0 * y_r * r_b
	else:
	     angle_in_rads = 0
	rads = math.acos(angle_in_rads)
	# convert to degrees
	degs = rads * float(180.0 / math.pi)
	if degs < 0 or degs > 360: 
	     degs = 0
	return degs	
def get_box(x1, y1, x2, y2, x3, y3, x4, y4):
    height1 = int(
        round(math.sqrt((x1 - x4) * (x1 - x4) + (y1 - y4) * (y1 - y4))))
    height2 = int(
        round(math.sqrt((x3 - x2) * (x3 - x2) + (y3 - y2) * (y3 - y2))))

    height = height1
    if height2 > height:
        height = height2

    # add 25% to the height
    height *= options.zoom_out_percent
    #height += (height * .05)

    #print "Height: %d - %d" % (height1, height2)

    points = [(x1, y1), (x2, y2), (x3, y3), (x4, y4)]
    moment = cv.Moments(points)
    centerx = int(round(moment.m10 / moment.m00))
    centery = int(round(moment.m01 / moment.m00))

    training_aspect = options.plate_width / options.plate_height
    width = int(round(training_aspect * height))

    # top_left = ( int(centerx - (width / 2)), int(centery - (height / 2)))
    # bottom_right = ( int(centerx + (width / 2)), int(centery + (height / 2)))

    top_left_x = int(round(centerx - (width / 2)))
    top_left_y = int(round(centery - (height / 2)))

    return (top_left_x, top_left_y, width, int(round(height)))
Beispiel #4
0
def get_angle(pts):
    moments = cv.Moments(pts, 0)
    mu11 = cv.GetCentralMoment(moments, 1, 1)
    mu20 = cv.GetCentralMoment(moments, 2, 0)
    mu02 = cv.GetCentralMoment(moments, 0, 2)
    print "Got moments"
    return 1 / 2.0 * arctan((2 * mu11 / float(mu20 - mu02)))
Beispiel #5
0
 def do1Image(self, image, prevpoints):
     #http://www.aishack.in/2010/07/tracking-colored-objects-in-opencv/
     #http://nashruddin.com/OpenCV_Region_of_Interest_(ROI)
     #http://opencv-users.1802565.n2.nabble.com/Python-cv-Moments-Need-Help-td6044177.html
     #http://stackoverflow.com/questions/5132874/change-elements-in-a-cvseq-in-python
     img = self.getThreshold(image)
     points = []
     for i in range(4):
         cv.SetImageROI(img, (int(
             self.RectanglePoints[i][0]), int(self.RectanglePoints[i][1]),
                              int(self.RectanglePoints[i][2]),
                              int(self.RectanglePoints[i][3])))
         storage = cv.CreateMemStorage(0)
         contours = cv.FindContours(img, storage)
         moments = cv.Moments(contours)
         moment10 = cv.GetSpatialMoment(moments, 1, 0)
         moment01 = cv.GetSpatialMoment(moments, 0, 1)
         area = cv.GetCentralMoment(moments, 0, 0)
         cv.ResetImageROI(img)
         if (area != 0):
             x = self.RectanglePoints[i][0] + (moment10 / area)
             y = self.RectanglePoints[i][1] + (moment01 / area)
         else:
             if (prevpoints[i][0] == 0):
                 x = self.RectanglePoints[i][0]
                 y = self.RectanglePoints[i][1]
             else:
                 x = prevpoints[i][0]
                 y = prevpoints[i][1]
         points.append([x, y])
     return points
Beispiel #6
0
 def find_centroid(self, binary):
     mat = cv.GetMat(binary)
     moments = cv.Moments(mat)
     return (
         int(moments.m10 / moments.m00),
         int(moments.m01 / moments.m00)
     )
Beispiel #7
0
    def getStandardizedRects(self):
        '''
        @return: the boxes centered on the target center of mass +- n_sigma*std
        @note: You must call detect() before getStandardizedRects() to see updated results.
        '''
        #create a list of the top-level contours found in the contours (cv.Seq) structure
        rects = []
        if len(self._contours) < 1: return (rects)
        seq = self._contours
        while not (seq == None):
            (x, y, w, h) = cv.BoundingRect(seq)
            if (cv.ContourArea(seq) >
                    self._minArea):  # and  self._filter(rect)
                r = pv.Rect(x, y, w, h)
                moments = cv.Moments(seq)
                m_0_0 = cv.GetSpatialMoment(moments, 0, 0)
                m_0_1 = cv.GetSpatialMoment(moments, 0, 1)
                m_1_0 = cv.GetSpatialMoment(moments, 1, 0)
                mu_2_0 = cv.GetCentralMoment(moments, 2, 0)
                mu_0_2 = cv.GetCentralMoment(moments, 0, 2)

                cx = m_1_0 / m_0_0
                cy = m_0_1 / m_0_0
                w = 2.0 * self._rect_sigma * np.sqrt(mu_2_0 / m_0_0)
                h = 2.0 * self._rect_sigma * np.sqrt(mu_0_2 / m_0_0)

                r = pv.CenteredRect(cx, cy, w, h)

                rects.append(r)
            seq = seq.h_next()

        if self._filter != None:
            rects = self._filter(rects)

        return rects
Beispiel #8
0
 def __init__(
     self, BW
 ):  #Constructor. BW es una imagen binaria en forma de una matriz numpy
     self.BW = BW
     cs = cv.FindContours(cv.fromarray(self.BW.astype(np.uint8)),
                          cv.CreateMemStorage(),
                          mode=cv.CV_RETR_EXTERNAL)  #Finds the contours
     counter = 0
     """
     Estas son listas dinamicas usadas para almacenar variables
     """
     centroid = list()
     cHull = list()
     contours = list()
     cHullArea = list()
     contourArea = list()
     while cs:  #Iterar a traves de CvSeq, cs.
         if abs(
                 cv.ContourArea(cs)
         ) > 2000:  #Filtra contornos de menos de 2000 pixeles en el area
             contourArea.append(
                 cv.ContourArea(cs)
             )  #Se agrega contourArea con el area de contorno mas reciente
             m = cv.Moments(
                 cs)  #Encuentra todos los momentos del contorno filtrado
             try:
                 m10 = int(cv.GetSpatialMoment(m, 1,
                                               0))  #Momento espacial m10
                 m00 = int(cv.GetSpatialMoment(m, 0,
                                               0))  #Momento espacial m00
                 m01 = int(cv.GetSpatialMoment(m, 0,
                                               1))  #Momento espacial m01
                 centroid.append(
                     (int(m10 / m00), int(m01 / m00))
                 )  #Aniade la lista de centroides con las coordenadas mas nuevas del centro de gravedad del contorno
                 convexHull = cv.ConvexHull2(
                     cs, cv.CreateMemStorage(), return_points=True
                 )  #Encuentra el casco convexo de cs en el tipo CvSeq
                 cHullArea.append(
                     cv.ContourArea(convexHull)
                 )  #Agrega el area del casco convexo a la lista cHullArea
                 cHull.append(
                     list(convexHull)
                 )  #Agrega la lista del casco convexo a la lista de cHull
                 contours.append(
                     list(cs)
                 )  #Agrega la forma de lista del contorno a la lista de contornos
                 counter += 1  #Agrega al contador para ver cuantos blobs hay
             except:
                 pass
         cs = cs.h_next()  #Pasa al siguiente contorno en cs CvSeq
     """
     A continuacion, las variables se convierten en campos para hacer referencias posteriores
     """
     self.centroid = centroid
     self.counter = counter
     self.cHull = cHull
     self.contours = contours
     self.cHullArea = cHullArea
     self.contourArea = contourArea
def extract_circles(contours, rgb):
    global current_pose_editor, side
    circles = []
    for i in contours:
        moments = cv.Moments(cv.fromarray(i), binary=1)
        area = cv.GetCentralMoment(moments, 0, 0)

        if area > OBJECT_AREA:
            x = int(cv.GetSpatialMoment(moments, 1, 0) / area)
            y = int(cv.GetSpatialMoment(moments, 0, 1) / area)
            radius = int(math.sqrt(area / math.pi))
            circles.append((x, y, int(radius * 1.5)))
            if (y > 100):
                adjust_carrot(x, y, area)
                if (side == 'left' and x < 420):
                    adjust_carrot(x, y, area)  #use just visual servo
                elif (side == 'right' and x > 420):
                    adjust_carrot(x, y, area)  #use just visual servo
            '''
            point = check_lidar((x,y),radius)               #use if you want to use lidar to confirm waypoint
            if (point[0]):
                print 'going to:',point[1]
                waypoint.send_goal_and_wait(current_pose_editor.relative(np.array([point[1][0], point[1][1] + .5, 0])).as_MoveToGoal(speed = .5))
                circles.append((x,y,int(radius*1.5)))
            '''
    return circles
Beispiel #10
0
    def getWatershedMask(self):
        '''
        Uses the watershed algorithm to refine the foreground mask.
        Currently, this doesn't work well on real video...maybe grabcut would be better.
        '''
        cvMarkerImg = cv.CreateImage(self._fgMask.size, cv.IPL_DEPTH_32S, 1)
        cv.SetZero(cvMarkerImg)

        #fill each contour with a different gray level to label connected components
        seq = self._contours
        c = 50
        while not (seq == None) and len(seq) != 0:
            if cv.ContourArea(seq) > self._minArea:
                c += 10
                moments = cv.Moments(seq)
                m00 = cv.GetSpatialMoment(moments, 0, 0)
                m01 = cv.GetSpatialMoment(moments, 0, 1)
                m10 = cv.GetSpatialMoment(moments, 1, 0)
                centroid = (int(m10 / m00), int(m01 / m00))
                cv.Circle(cvMarkerImg, centroid, 3, cv.RGB(c, c, c),
                          cv.CV_FILLED)
            seq = seq.h_next()

        if (c > 0):
            img = self._annotateImg.asOpenCV()
            cv.Watershed(img, cvMarkerImg)

        tmp = cv.CreateImage(cv.GetSize(cvMarkerImg), cv.IPL_DEPTH_8U, 1)
        cv.CvtScale(cvMarkerImg, tmp)
        return pv.Image(tmp)
Beispiel #11
0
 def __init__(
         self, BW
 ):  #Constructor. BW is a binary image in the form of a numpy array
     self.BW = BW
     cs = cv.FindContours(cv.fromarray(self.BW.astype(np.uint8)),
                          cv.CreateMemStorage(),
                          mode=cv.CV_RETR_EXTERNAL)  #Finds the contours
     counter = 0
     """
     These are dynamic lists used to store variables
     """
     centroid = list()
     cHull = list()
     contours = list()
     cHullArea = list()
     contourArea = list()
     while cs:  #Iterate through the CvSeq, cs.
         if abs(
                 cv.ContourArea(cs)
         ) > 2000:  #Filters out contours smaller than 2000 pixels in area
             contourArea.append(cv.ContourArea(
                 cs))  #Appends contourArea with newest contour area
             m = cv.Moments(
                 cs)  #Finds all of the moments of the filtered contour
             try:
                 m10 = int(cv.GetSpatialMoment(m, 1,
                                               0))  #Spatial moment m10
                 m00 = int(cv.GetSpatialMoment(m, 0,
                                               0))  #Spatial moment m00
                 m01 = int(cv.GetSpatialMoment(m, 0,
                                               1))  #Spatial moment m01
                 centroid.append(
                     (int(m10 / m00), int(m01 / m00))
                 )  #Appends centroid list with newest coordinates of centroid of contour
                 convexHull = cv.ConvexHull2(
                     cs, cv.CreateMemStorage(), return_points=True
                 )  #Finds the convex hull of cs in type CvSeq
                 cHullArea.append(
                     cv.ContourArea(convexHull)
                 )  #Adds the area of the convex hull to cHullArea list
                 cHull.append(
                     list(convexHull)
                 )  #Adds the list form of the convex hull to cHull list
                 contours.append(
                     list(cs)
                 )  #Adds the list form of the contour to contours list
                 counter += 1  #Adds to the counter to see how many blobs are there
             except:
                 pass
         cs = cs.h_next()  #Goes to next contour in cs CvSeq
     """
     Below the variables are made into fields for referencing later
     """
     self.centroid = centroid
     self.counter = counter
     self.cHull = cHull
     self.contours = contours
     self.cHullArea = cHullArea
     self.contourArea = contourArea
Beispiel #12
0
def detect_and_draw(img):
    t1 = time.time()

    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # blur the source image to reduce color noise
    cv.Smooth(img, img, cv.CV_BLUR, 3)
    hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3)
    cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)
    thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
    #cv.InRangeS(hsv_img, (120, 80, 80), (140, 255, 255), thresholded_img)

    # White
    sensitivity = 15
    cv.InRangeS(hsv_img, (0, 0, 255 - sensitivity), (255, sensitivity, 255),
                thresholded_img)

    # Red
    #cv.InRangeS(hsv_img, (0, 150, 0), (5, 255, 255), thresholded_img)

    # Blue
    #cv.InRangeS(hsv_img, (100, 50, 50), (140, 255, 255), thresholded_img)

    # Green
    #cv.InRangeS(hsv_img, (40, 50, 50), (80, 255, 255), thresholded_img)

    mat = cv.GetMat(thresholded_img)
    moments = cv.Moments(mat, 0)
    area = cv.GetCentralMoment(moments, 0, 0)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (area > 5000):
        #determine the x and y coordinates of the center of the object
        #we are tracking by dividing the 1, 0 and 0, 1 moments by the area
        x = cv.GetSpatialMoment(moments, 1, 0) / area
        y = cv.GetSpatialMoment(moments, 0, 1) / area
        x = int(round(x))
        y = int(round(y))

        #create an overlay to mark the center of the tracked object
        overlay = cv.CreateImage(cv.GetSize(img), 8, 3)

        cv.Circle(overlay, (x, y), 2, (0, 0, 0), 20)
        cv.Add(img, overlay, img)
        #add the thresholded image back to the img so we can see what was
        #left after it was applied
        #cv.Merge(thresholded_img, None, None, None, img)
        t2 = time.time()
        message = "Color tracked!"
        print "detection time = %gs x=%d,y=%d" % (round(t2 - t1, 3), x, y)

    cv.ShowImage("Color detection", img)
Beispiel #13
0
    def computeMoment(self, contour):
        moments = cv.Moments(contour, 1)

        area = cv.GetSpatialMoment(moments, 0, 0)
        if area == 0: return
        x = cv.GetSpatialMoment(moments, 1, 0)
        y = cv.GetSpatialMoment(moments, 0, 1)
        return x, y, area
Beispiel #14
0
def center_of_mass(contour):
    moment = cv.Moments(contour)
    mass = cv.GetSpatialMoment(moment, 0, 0)
    mx = cv.GetSpatialMoment(moment, 1, 0)
    my = cv.GetSpatialMoment(moment, 0, 1)
    X = mx / mass
    Y = my / mass
    return X, Y
def get_principle_info(shape):

    moments = cv.Moments(shape, 0)
    center = get_center(moments)

    theta = get_angle(moments)
    (top_pt, scale) = get_top(shape, center, theta)
    return (center, top_pt, theta, scale)
Beispiel #16
0
def blob_statistics(binary_image,
                    max_area=99999.0,
                    max_dim=99999.0):  #, show=False):
    statistics = []
    storage = cv.CreateMemStorage(0)
    #FindContours(image,        storage, mode=CV_RETR_LIST, method=CV_CHAIN_APPROX_SIMPLE, offset=(0, 0))
    contours = cv.FindContours(binary_image, storage, cv.CV_RETR_TREE,
                               cv.CV_CHAIN_APPROX_SIMPLE, (0, 0))
    #number_contours, contours = cv.FindContours(binary_image, storage, cv.sizeof_CvContour, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, (0,0))
    #TODO: FIGURE OUT WHAT THE EQUIV OF SIZEOF IS IN OPENCV2
    #import pdb
    #pdb.set_trace()

    original_ptr = contours
    while contours != None:
        try:
            bx, by, bwidth, bheight = cv.BoundingRect(contours, 0)
            bounding_rect = Rect(bx, by, bwidth, bheight)
            moments = cv.Moments(contours, 0)
            #area = moments.m00
            #approximation to area since cvMoments' area seem broken
            area = bounding_rect.width * bounding_rect.height
            if False:
                #TODO NOT WORKING!!
                if moments.m00 == 0.0:
                    centroid = (bounding_rect.x, bounding_rect.y)
                else:
                    centroid = (moments.m10 / moments.m00,
                                moments.m01 / moments.m00)
            else:
                if bwidth > 0:
                    cx = bx + bwidth / 2.
                else:
                    cx = bx

                if bheight > 0:
                    cy = by + bheight / 2.
                else:
                    cy = by
                centroid = (cx, cy)
                #if show:
                #    print 'areas is', area, bounding_rect.width, bounding_rect.height
                if area > max_area or bounding_rect.width > max_dim or bounding_rect.height > max_dim:
                    cv.DrawContours(binary_image, contours, cv.Scalar(0),
                                    cv.Scalar(0), 0, cv.CV_FILLED)
                else:
                    stats = {
                        'area': area,
                        'centroid': centroid,
                        'rect': bounding_rect
                    }
                    statistics.append(stats)
                contours = contours.h_next()
        except Exception, e:
            pass
            #This is due to OPENCV BUG and not being able to see inside contour object'
            break
def contourCenter(thisContour, smoothness=4): 
    positions_x, positions_y = [0] * smoothness, [0] * smoothness 
    if cv.ContourArea(thisContour) > 2.0:
        moments = cv.Moments(thisContour, 1)
        positions_x.append(cv.GetSpatialMoment(moments, 1, 0) / cv.GetSpatialMoment(moments, 0, 0))
        positions_y.append(cv.GetSpatialMoment(moments, 0, 1) / cv.GetSpatialMoment(moments, 0, 0))
        positions_x, positions_y = positions_x[-smoothness:], positions_y[-smoothness:]
        pos_x = (sum(positions_x) / len(positions_x))
        pos_y = (sum(positions_y) / len(positions_y))
        return (int(pos_x * smoothness), int(pos_y * smoothness))
Beispiel #18
0
def main():
    pr_window = "imagen"
    capture = cv.CaptureFromCAM(-1)
    cv.NamedWindow(pr_window, 1)

    #    seteo tamanio de la ventana |-| comentar cuando no se necesite mostrar ventana
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, config.ancho)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, config.alto)
    delay = 0
    while True:
        if (not (delay == 20)):
            delay += 1
            img = cv.QueryFrame(capture)
            #cv.ReleaseCapture( img )
        else:
            delay = 0
            frame = cv.QueryFrame(capture)
            maskN = cv.CreateImage(cv.GetSize(frame), 8, 1)
            hsvN = cv.CloneImage(frame)

            cv.Smooth(frame, frame, cv.CV_BLUR, 3)

            cv.CvtColor(frame, hsvN, cv.CV_BGR2HSV)
            cv.InRangeS(hsvN, config.min_range, config.max_range, maskN)

            moment = cv.Moments(cv.GetMat(maskN), 0)
            a = cv.GetCentralMoment(moment, 0, 0)

            if a > config.min_area:
                X = int(cv.GetSpatialMoment(moment, 1, 0) / a)
                print "X: " + str(X)
                print "min: " + str(config.min_x)
                print "max: " + str(config.max_x)
                #Y = int(cv.GetSpatialMoment (moment, 0, 1) / a)
                if X > config.max_x:
                    print "derecha"
                elif X < config.min_x:
                    print "izquierda"
                else:
                    print "centrado"
            else:
                print "objeto no detectado o muy pequeno"

            cv.ShowImage(pr_window, maskN)


#        descomentar para debug
#        X = int(cv.GetSpatialMoment (moment, 1, 0) / a)
#        print 'x: ' + str (X)  + ' area: ' + str (a)

# Con esto corto y salgo
#        if cv.WaitKey (100) != -1:
#            break

    return
def track(bgr_image, threshold=100):
    '''Accepts BGR image and optional object threshold between 0 and 255 (default = 100).
       Returns: (x,y) coordinates of centroid if found
                (-1,-1) if no centroid was found
                None if user hit ESC
    '''
    
    # Extract bytes, width, and height
    bgr_bytes = bgr_image.tostring()
    width = bgr_image.width
    height = bgr_image.height
    
    # Create separate red, green, and blue image matrices from bytes
    r_image = _create_grayscale_mat(bgr_bytes, width, height, 2)
    b_image = _create_grayscale_mat(bgr_bytes, width, height, 0)
    g_image = _create_grayscale_mat(bgr_bytes, width, height, 1)

    # Remove 1/3 of red and blue components from green
    threes_image = cv.CreateImage((width,height), cv.IPL_DEPTH_8U, 1)  
    cv.Set(threes_image, 3)
    _div_and_sub(g_image, r_image, threes_image)
    _div_and_sub(g_image, b_image, threes_image)

    # Threshold and erode green image
    cv.Threshold(g_image, g_image, threshold, 255, cv.CV_THRESH_BINARY)
    cv.Erode(g_image, g_image)

    # Find centroid of eroded image
    moments = cv.Moments(cv.GetMat(g_image), 1) # binary flag
    centroid_x = _centroid(moments, 1, 0)
    centroid_y = _centroid(moments, 0, 1)

    # Assume no centroid
    ctr = (width/2,height/2)
    err = ctr

    # Use centroid if it exists
    if centroid_x != None and centroid_y != None:

        ctr = (centroid_x, centroid_y)

        # Put black circle in at centroid in image
        cv.Circle(bgr_image, ctr, 4, (0,0,0))

    # Display full-color image
    cv.NamedWindow(WINDOW_NAME)
    cv.ShowImage(WINDOW_NAME, bgr_image)

    # Force image display, setting centroid to None on ESC key input
    if cv.WaitKey(5) == 27:
        ctr = None
    
    # Return coordinates of centroid
    return ctr if ctr != err else None
Beispiel #20
0
def has_intersection(amap, apoly, maxwidth, maxheight):
    polymap = cv.CreateImage((maxwidth,
                         maxheight),
                         cv.IPL_DEPTH_8U,1)
    cv.FillPoly(polymap, [apoly], im.color.blue)
    intersection = cv.CreateImage((maxwidth,
                         maxheight),
                         cv.IPL_DEPTH_8U,1)
    cv.And(polymap, amap, intersection)
    m=cv.Moments(cv.GetMat(intersection), True)
    return bool(cv.GetSpatialMoment(m, 0, 0))
def main():
    color_tracker_window = "output"
    thresh_window = "thresh"
    capture = cv.CaptureFromCAM(-1)
    cv.NamedWindow(color_tracker_window, 1)
    cv.NamedWindow(thresh_window, 1)
    imgScrible = None
    global posX
    global posY

    fido.init_servos()

    while True:
        frame = cv.QueryFrame(capture)
        cv.Smooth(frame, frame, cv.CV_BLUR, 3)

        if (imgScrible is None):
            imgScrible = cv.CreateImage(cv.GetSize(frame), 8, 3)

        imgThresh = GetThresholdedImage(frame)

        mat = cv.GetMat(imgThresh)
        #Calculating the moments
        moments = cv.Moments(mat, 0)
        area = cv.GetCentralMoment(moments, 0, 0)
        moment10 = cv.GetSpatialMoment(moments, 1, 0)
        moment01 = cv.GetSpatialMoment(moments, 0, 1)

        #lastX and lastY stores the previous positions
        lastX = posX
        lastY = posY
        #Finding a big enough blob
        if (area > 100000):

            #Calculating the coordinate postition of the centroid
            posX = int(moment10 / area)
            posY = int(moment01 / area)

            print 'x: ' + str(posX) + ' y: ' + str(posY) + ' area: ' + str(
                area)
            #drawing lines to track the movement of the blob
            if (lastX > 0 and lastY > 0 and posX > 0 and posY > 0):
                cv.Line(imgScrible, (posX, posY), (lastX, lastY),
                        cv.Scalar(0, 255, 255), 5)
            #Adds the three layers and stores it in the frame
            #frame -> it has the camera stream
            #imgScrible -> it has the line tracking the movement of the blob
            cv.Add(frame, imgScrible, frame)

        cv.ShowImage(thresh_window, imgThresh)
        cv.ShowImage(color_tracker_window, frame)
        c = cv.WaitKey(10)
        if (c != -1):
            break
Beispiel #22
0
class uav_image_orange:
    def __init__(self):
        self.image_pub = rospy.Publisher("/image", Image)

        cv2.namedWindow("Image window", 1)
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/uav/downward_cam/camera/image",
                                          Image, self.callback)

    def callback(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError, e:
            print e

    ### Start of Image Processing ######
        hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
        lower_orange = np.array([10, 0, 0])
        upper_orange = np.array([25, 255, 255])
        mask = cv2.inRange(hsv, lower_orange, upper_orange)
        mat = cv.GetMat(cv.fromarray(mask))
        moments = cv.Moments(mat)
        if ((moments.m01 >
             (-10000000000000)) and (moments.m01 < 10000000000000)
                and (moments.m00 >
                     (-1000000000000)) and (moments.m00 < 10000000000000)
                and (moments.m10 >
                     (-1000000000000)) and (moments.m10 < 10000000000000)):
            global yc_orange
            global xc_orange
            yc_orange = moments.m01 / moments.m00
            xc_orange = moments.m10 / moments.m00
            width, height = cv.GetSize(mat)

            global nxc_orange
            global nyc_orange
            nxc_orange = xc_orange - 320
            nyc_orange = yc_orange - 240
            focal = 1690.0  #1097.51
            q = nxc_orange / focal
            global bearing
            bearing = math.atan(q) * ((180.0) / (3.14159))
            #cv2.circle(cv_image,(int(xc_orange),int(yc_orange)),3,(0,0,255),-1)
            #cv2.imshow('Thresholded', mask)

            ### End of Image Processing ######
            #cv2.imshow('UAV Image Orange', cv_image)
        cv2.waitKey(1)
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError, e:
            print e
Beispiel #23
0
def track(img_bytes, img_width, img_height):
    '''Accepts BGR image bytes, image width, and image height. 
           Returns: (x,y) coordinates of centroid if found
                    None if no centroid was found
                    (0,0) if user hit ESC
    '''

    # Create full-color image from bytes
    full_image = _create_image_header(img_width, img_height, 3)  
    cv.SetData(full_image, img_bytes, img_width*3)
  
    # Create separate red, green, and blue images from bytes
    r_image = _create_grayscale_image(img_bytes, img_width, img_height, 2)
    b_image = _create_grayscale_image(img_bytes, img_width, img_height, 0)
    g_image = _create_grayscale_image(img_bytes, img_width, img_height, 1)

    # Remove 1/3 of red and blue components from green
    threes_image = cv.CreateImage((img_width,img_height), cv.IPL_DEPTH_8U, 1)  
    cv.Set(threes_image, 3)
    _div_and_sub(g_image, r_image, threes_image)
    _div_and_sub(g_image, b_image, threes_image)

    # Threshold and erode green image
    cv.Threshold(g_image, g_image, THRESHOLD, 255, cv.CV_THRESH_BINARY)
    cv.Erode(g_image, g_image)

    # Find centroid of eroded image
    moments = cv.Moments(cv.GetMat(g_image), 1) # binary flag
    centroid_x = _centroid(moments, 1, 0)
    centroid_y = _centroid(moments, 0, 1)

    # Assume no centroid
    ctr = None

    # Use centroid if it exists
    if centroid_x != None and centroid_y != None:

        ctr = (centroid_x, centroid_y)

        # Put black circle in at centroid in image
        cv.Circle(full_image, ctr, 4, (0,0,0))

    # Display full-color image
    cv.NamedWindow(WINDOW_NAME)
    cv.ShowImage(WINDOW_NAME, full_image)

    # Force image display, setting centroid to (0,0) on ESC key input
    if cv.WaitKey(5) == 27:
        ctr = (0,0)
    
    # Return coordinates of centroid
    return ctr
 def run(self): 
     while True: 
         img = cv.QueryFrame( self.capture ) 
                     
         #blur the source image to reduce color noise 
         cv.Smooth(img, img, cv.CV_BLUR, 3); 
         
         #convert the image to hsv(Hue, Saturation, Value) so its  
         #easier to determine the color to track(hue) 
         hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3) 
         cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV) 
         
         #limit all pixels that don't match our criteria, in this case we are  
         #looking for purple but if you want you can adjust the first value in  
         #both turples which is the hue range(120,140).  OpenCV uses 0-180 as  
         #a hue range for the HSV color model 
         thresholded_img =  cv.CreateImage(cv.GetSize(hsv_img), 8, 1) 
         cv.InRangeS(hsv_img, (115, 75, 75), (135, 255, 255), thresholded_img) 
         
         #determine the objects moments and check that the area is large  
         #enough to be our object 
         thresholded_img2 = cv.GetMat(thresholded_img)
         moments = cv.Moments(thresholded_img2,0) 
         area = cv.GetCentralMoment(moments, 0, 0) 
         
         #there can be noise in the video so ignore objects with small areas 
         if(area > 100000): 
             #determine the x and y coordinates of the center of the object 
             #we are tracking by dividing the 1, 0 and 0, 1 moments by the area 
             x = cv.GetSpatialMoment(moments, 1, 0)/area 
             y = cv.GetSpatialMoment(moments, 0, 1)/area 
         
             # print 'x: ' + str(x) + ' y: ' + str(y) + ' area: ' + str(area) 
             
             
             x = int(x)
             y = int(y)
             
             #create an overlay to mark the center of the tracked object 
             overlay = cv.CreateImage(cv.GetSize(img), 8, 3) 
             
             cv.Circle(overlay, (x, y), 2, (255, 255, 255), 20) 
             cv.Add(img, overlay, img) 
             #add the thresholded image back to the img so we can see what was  
             #left after it was applied 
             cv.Merge(thresholded_img, None, None, None, img) 
          
         #display the image  
         cv.ShowImage(color_tracker_window, img) 
         
         if cv.WaitKey(10) == 27: 
             break 
Beispiel #25
0
    def _get_pos_spatial(self, th_img):
	print "Getting spacial position (?)"
        moments = cv.Moments(cv.GetMat(th_img))
        mom10 = cv.GetSpatialMoment(moments, 1, 0)
        mom01 = cv.GetSpatialMoment(moments, 0, 1)
        area = cv.GetCentralMoment(moments, 0, 0)

        if area > 10:
            pos = [int(mom10/area), int(mom01/area)]
        else:
            pos = None

        return pos
Beispiel #26
0
def getBinaryClean(frame):
    resImg = cv.CreateImage(cv.GetSize(frame), 8, 1)
    copy = cv.CloneImage(frame)
    contours = cv.FindContours(copy,
                               cv.CreateMemStorage(0),
                               mode=cv.CV_RETR_EXTERNAL)
    while contours:
        moments = cv.Moments(contours)
        area = cv.GetCentralMoment(moments, 0, 0)
        if (area > 20):
            cv.DrawContours(resImg, contours, (255, 255, 255), (255, 255, 255),
                            2, cv.CV_FILLED)
        contours = contours.h_next()
    return (resImg)
Beispiel #27
0
 def push(self, contour):
     self.historyContours.append(contour)
     ms = cv.Moments(contour)
     center = (0, 0)
     if ms.m00:
         Xc = ms.m10 / ms.m00
         Yc = ms.m01 / ms.m00
         #print (Xc, Yc)
         center = (Xc, Yc)
     self.positions.append(center)
     if len(self.historyContours) > self.NCONTOURS:
         self.historyContours.pop(0)
     if len(self.positions) > 2 * self.NCONTOURS:
         self.positions.pop(0)
Beispiel #28
0
def find_orientation(mask, center_point):
    cv.Circle(mask, center_point, 19, cv.RGB(0, 0, 0), -1)

    moments = cv.Moments(mask, 1)
    M00 = cv.GetSpatialMoment(moments, 0, 0)
    M10 = cv.GetSpatialMoment(moments, 1, 0)
    M01 = cv.GetSpatialMoment(moments, 0, 1)

    if M00 == 0:
        M00 = 0.01

    center_of_mass = (round(M10 / M00), round(M01 / M00))

    return (int(calculate_bearing(center_of_mass, center_point)) - 180) % 360
Beispiel #29
0
    def get_back_dir(self, t_contour, centroid, rad, t_dir, thickness, size):
        roi = self.draw_contour(t_contour, size)
        #roi = self.get_circular_roi(centroid, rad, bin_im)
        # draw a thick line from tip of 'dir_vec' to tip of the inverted
        # 'dir_vec'. This is done, to ensure that the 'stand' of the T is
        # removed and it's hat is split in two.
        cv.Line(roi, Math.add_vectors(t_dir, centroid),
                Math.add_vectors(Math.invert_vec(t_dir), centroid),
                ColorSpace.RGB_BLACK, thickness)

        tmp_im = cv.CreateImage(cv.GetSize(roi), cv.IPL_DEPTH_8U, 1)
        cv.Copy(roi, tmp_im)  # create image for FindContours
        seq = cv.FindContours(tmp_im, cv.CreateMemStorage(0),
                              cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_NONE)

        # sort contours from ROI and try to take two with the biggest area
        contours = contours_area_sort(seq)[-2:]
        if not contours: return None
        nr_contours = len(contours)
        if nr_contours == 2:  # if two available, get vec to midpoint
            pt1 = get_contour_center(cv.Moments(contours[0]))
            pt2 = get_contour_center(cv.Moments(contours[1]))
            mid = Math.add_vectors(pt1, pt2, 1 / 2.0)
        elif nr_contours:  # if only one, retun it as mid point
            mid = get_contour_center(cv.Moments(contours[0]))
        # no contours found, check failed, get prev value
        else:
            return None

        mid = Math.int_vec(mid)
        dir_vec = Math.sub_vectors(mid, centroid)

        # show vector
        cv.Line(roi, centroid, Math.add_vectors(centroid, dir_vec),
                ColorSpace.RGB_WHITE, 1)
        cv.ShowImage('w', roi)
        return dir_vec  # return the back direction vec
Beispiel #30
0
class uav_image:

  def __init__(self):
    self.image_pub = rospy.Publisher("/image",Image)

    cv2.namedWindow("Image window", 1)
    self.bridge = CvBridge()
    self.image_sub = rospy.Subscriber("/uav/downward_cam/camera/image",Image,self.callback)

  def callback(self,data):
    try:
      cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
    except CvBridgeError, e:
      print e

    (rows,cols,channels) = cv_image.shape
    if cols > 60 and rows > 60 :
      cv2.circle(cv_image, (50,50), 10, 255)
    ### Start of Image Processing ######

    hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
    lower_red = np.array([0,100,100])
    upper_red = np.array([10,255,255])
    mask = cv2.inRange(hsv, lower_red, upper_red)
    mat=cv.GetMat(cv.fromarray(mask))
    moments=cv.Moments(mat)
    if((moments.m01>(-10000000000000)) and (moments.m01<10000000000000) and (moments.m00>(-1000000000000)) and (moments.m00<10000000000000) and (moments.m10>(-1000000000000)) and (moments.m10<10000000000000)):
        yc= moments.m01/moments.m00
        xc=moments.m10/moments.m00
        global ncx
        global nyc
        nxc=xc-640
        nyc=yc-283
	focal=1690.0#1097.51
	q= nxc/focal
	global bearing
	bearing=math.atan(q)*((180.0)/(3.14159))
        cv2.circle(cv_image,(int(xc),int(yc)),10,(0,0,255),-1)
        #cv2.imshow('Thresholded', mask)

    ### End of Image Processing ######
    cv2.imshow('UAV Image', cv_image)
    cv2.waitKey(3)

    try:
      cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
      
    except CvBridgeError, e:
      print e