Beispiel #1
0
    frame = cv.QueryFrame(cam)
                
    #blur the source image to reduce color noise 
    cv.Smooth(frame, frame, cv.CV_BLUR, 3); 
    
    #convert the image to hsv(Hue, Saturation, Value) so its  
    #easier to determine the color to track(hue) 
    hsv_frame = cv.CreateImage(cv.GetSize(frame), 8, 3) 
    cv.CvtColor(frame, hsv_frame, cv.CV_BGR2HSV) 
    
    #limit all pixels that don't match our criteria, in this case we are  
    #looking for purple but if you want you can adjust the first value in  
    #both turples which is the hue range(120,140).  OpenCV uses 0-180 as  
    #a hue range for the HSV color model 
    thresholded_frame =  cv.CreateImage(cv.GetSize(hsv_frame), 8, 1) 
    cv.InRangeS(hsv_frame, (0, 80, 0), (30, 255, 255), thresholded_frame)  # works fine during day
#    cv.InRangeS(hsv_frame, (10, 10, 0), (15, 255, 255), thresholded_frame)  # works fine evenings
    
    contours = cv.FindContours(cv.CloneImage(thresholded_frame), cv.CreateMemStorage())
    
    
    
    
    
    if len(contours)!=0:
        #determine the objects moments and check that the area is large  
        #enough to be our object 
        moments = cv.Moments(contours,1) 
        moment10 = cv.GetSpatialMoment(moments, 1, 0)
        moment01 = cv.GetSpatialMoment(moments, 0, 1)
        area = cv.GetCentralMoment(moments, 0, 0) 
Beispiel #2
0
flag = False #GoodFeatureToTrack execution flag

while True: #Main loop
    #Acquiring the image
    img = cv.QueryFrame(capture)
    #Showing image
    if flag_true_image:
        cv.ShowImage(window1, gray_image)
    else:
        cv.ShowImage(window1, render_image)
    #Image processing
    cv.CvtColor(img, gray_image, cv.CV_RGB2GRAY)
    cv.Copy(gray_image, register1_image)
    cv.Smooth(register1_image, register1_image, cv.CV_GAUSSIAN, 3, 3)
    cv.AbsDiff(register1_image, register2_image, accumulator)
    cv.InRangeS(accumulator, (threshold_limit1_lower), (threshold_limit1_upper), accumulator)
    cv.Dilate(accumulator, accumulator, None, 2)
    cv.Add(accumulator, sum_image, sum_image, accumulator)
    cv.SubS(sum_image, (fading_factor), sum_image)
    cv.InRangeS(sum_image, (threshold_limit2_lower), (threshold_limit2_upper), accumulator)
    cv.Copy(register1_image, register2_image)
    #Motion detection
    new_corners, status, track_error = cv.CalcOpticalFlowPyrLK(prev_image, gray_image, pyramid1, pyramid2, corners, (10,10), 2, (cv.CV_TERMCRIT_ITER, 10, 0), 0)
    counter = (counter + 1) % skip
    if(counter == 0):
        corners = cv.GoodFeaturesToTrack(gray_image, eigen_image, temp_image, cornerCount = corner_count, qualityLevel = quality, minDistance = min_distance) #Good features to track
        flag = True
    cv.Copy(img, render_image)
    cv.CvtColor(accumulator, render_image, cv.CV_GRAY2RGB)
    #cv.Copy(img, render_image)
    cv.Copy(gray_image, prev_image)
Beispiel #3
0
    def show(self):
        """ Process and show the current frame """
        source = cv.LoadImage(self.files[self.index])
        width, height = cv.GetSize(source)

        center = (width / 2) + self.offset

        cv.Line(source, (center, 0), (center, height), (0, 255, 0), 1)

        if self.roi:
            x, y, a, b = self.roi

            print self.roi

            width, height = ((a - x), (b - y))
            mask = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)

            cv.SetImageROI(source, (x, y, width, height))
            cv.Split(source, None, None, mask, None)

            gray = cv.CloneImage(mask)

            cv.InRangeS(mask, self.thresholdMin, self.thresholdMax, mask)
            cv.And(mask, gray, gray)

            line = []
            points = []

            for i in range(0, height - 1):
                row = cv.GetRow(gray, i)

                minVal, minLoc, maxLoc, maxVal = cv.MinMaxLoc(row)

                y = i
                x = maxVal[0]
                point = (0, 0, height - i)

                if x > 0:
                    line.append((x, y))

                    s = x / sin(radians(self.camAngle))
                    x = s * cos(self.angles[self.index])
                    z = height - y
                    y = s * sin(self.angles[self.index])

                    point = (round(x, 2), round(y, 2), z)

                points.append(point)

            cv.PolyLine(source, [line], False, (255, 0, 0), 2, 8)
            cv.ResetImageROI(source)
            x, y, a, b = self.roi
            cv.Rectangle(source, (int(x), int(y)), (int(a), int(b)),
                         (255.0, 255, 255, 0))

        if self.roi:
            x, y, a, b = self.roi

            width, height = ((a - x), (b - y))
            mask = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)

            cv.SetImageROI(
                source, (x - width, y, width, height))  # moves roi to the left
            cv.Split(source, None, None, mask, None)

            gray = cv.CloneImage(mask)

            cv.InRangeS(mask, self.thresholdMin, self.thresholdMax, mask)
            cv.And(mask, gray, gray)

            line = []
            points2 = []

            for i in range(0, height - 1):
                row = cv.GetRow(gray, i)

                minVal, minLoc, maxLoc, maxVal = cv.MinMaxLoc(row)

                y = i
                x = maxVal[0]
                point = (0, 0, height - i)

                if x > 0:
                    line.append((x, y))

                    x = width - x
                    # left to the x-axis

                    s = x / sin(radians(self.camAngle))

                    x = s * cos(self.angles[self.index])
                    z = height - y  # 500 higher then the other.
                    y = s * sin(self.angles[self.index])

                    a = radians(300)

                    nx = (cos(a) * x) - (sin(a) * y)
                    ny = (sin(a) * x) + (cos(a) * y)

                    point = (nx, ny, z)

                points2.append(point)

            cv.PolyLine(source, [line], False, (255, 0, 0), 2, 8)
            cv.ResetImageROI(source)
            x, y, a, b = self.roi
            cv.Rectangle(source, (int(x), int(y)), (int(a), int(b)),
                         (255.0, 255, 255, 0))

        if self.mode == 'mask':
            cv.ShowImage('preview', mask)
            return

        if self.mode == 'record' and self.roi:
            font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 1)
            cv.PutText(source, "recording %d" % self.index, (20, 20), font,
                       (0, 0, 255))
            self.points.extend(points)
            self.points2.extend(points2)
            #self.colors.extend(colors);

        cv.ShowImage('preview', source)
Beispiel #4
0
    def show(self):
        """ Process and show the current frame """
        source = cv.LoadImage(self.files[self.index])
        width, height = cv.GetSize(source)

        red = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)
        mask = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)

        cv.CvtColor(source, red, cv.CV_RGB2GRAY)

        #cv.Split( source, None, None, red, None )
        cv.InRangeS(red, self.thresholdMin, self.thresholdMax, red)

        cv.Copy(red, mask)

        if self.roi:
            # print self.roi
            x, y, x1, y1 = self.roi
            font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 1)
            cv.PutText(source, "ROI %d,%d,%d,%d" % self.roi, (20, 35), font,
                       (0, 0, 255))
            cv.PutText(source, "%d x %d" % (x1 - x, y1 - y), (20, 50), font,
                       (0, 0, 255))

            cv.Rectangle(source, (x, y), (x1, y1), (255.0, 0, 0, 0))
            cv.Rectangle(mask, (x, y), (x1, y1), (128.0, 0, 0, 0))

            cv.SetImageROI(red, (x, y, x1 - x, y1 - y))
            cv.SetImageROI(source, (x, y, x1 - x, y1 - y))

            points = self.lineScan(red, source)
            points = [
                self.rotate((self.index * self.step), (x, y), x1 / 2)
                for x, y in points
            ]

        cv.ResetImageROI(red)
        cv.ResetImageROI(source)

        if self.mode == 'mask':
            cv.ShowImage('preview', mask)
            return

        if self.mode == 'record' and self.roi:
            font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 1)
            cv.PutText(source, "recording %d" % self.index, (20, 20), font,
                       (0, 0, 255))

            if self.prevpoints:
                # for each Y in our ROI
                # check if we can create a face between
                # points from previous and current

                a = [y for _, y, _ in points]
                b = [y for _, y, _ in self.prevpoints]

                itr = a if len(a) > len(b) else b
                p = 0

                for n in itr:
                    size = len(self.points) - len(self.prevpoints)
                    asize = size + len(self.prevpoints)
                    if n in a and n in b and p in a and p in b:
                        face = (size + b.index(n), asize + a.index(n),
                                asize + a.index(p), size + b.index(p))
                        self.faces.append(face)

                    p = n

            self.points.extend(points)

            self.prevpoints = points

        cv.ShowImage('preview', source)
Beispiel #5
0
  cv.SetMouseCallback('unwrapped', on_mouse)
  #on_mouse(cv.CV_EVENT_LBUTTONDOWN, centerX, centerY, None, None)
  
  # The magic number M determines how deep the polar transformation goes.
  M = 69

  #This is the main loop
  while True:
    cam = cv.QueryFrame(capture)
    cv.LogPolar(cam, polar, (centerX, centerY), M+1, cv.CV_INTER_NN) #possible speedup - get subrect src
    #unwrapped = cv.GetSubRect(polar,(280,0,40,360))
    #cv.Transpose(unwrapped, unwrapped)
    cv.Transpose(cv.GetSubRect(polar,(280,0,40,360)), unwrapped)
    cv.Flip(unwrapped) #just for viewing (possible speedup)

    cv.InRangeS(unwrapped, lower, upper, cones)
    cv.Erode(cones, cones) # just once might be too much, but unavoidable

    k = cv.CreateStructuringElementEx(3, 43, 1, 1, cv.CV_SHAPE_RECT) # create a 3x43 rectangular dilation element k
    cv.Dilate(cones, cones, k) 

    #Display (should probably be disabled with a usage flag)
    cv.ShowImage('cam', cam)
    cv.ShowImage('unwrapped', unwrapped)
    cv.ShowImage('cones', cones)
    #cv.ShowImage('polar', polar)
    #cv.ShowImage('hsvcopy', hsvcopy)

    #scan top row of thresholded, eroded, dilated image, find the number of contiguous segments and their location
    s = 0 # size of contiguous segment
    ss = 0 #number of contiguous segments
    def get_projector_line_associations(self):
        rospy.loginfo("Scanning...")
        positives = []
        negatives = []
        for i in range(self.number_of_projection_patterns):
            positives.append(
                self.get_picture_of_projection(
                    self.predistorted_positive_projections[i]))
            negatives.append(
                self.get_picture_of_projection(
                    self.predistorted_negative_projections[i]))

        rospy.loginfo("Thresholding...")
        strike_sum = cv.CreateMat(self.camera_info.height,
                                  self.camera_info.width, cv.CV_32SC1)
        cv.SetZero(strike_sum)
        gray_codes = cv.CreateMat(self.camera_info.height,
                                  self.camera_info.width, cv.CV_32SC1)
        cv.SetZero(gray_codes)
        for i in range(self.number_of_projection_patterns):
            difference = cv.CreateMat(self.camera_info.height,
                                      self.camera_info.width, cv.CV_8UC1)
            cv.Sub(positives[i], negatives[i], difference)

            absolute_difference = cv.CreateMat(self.camera_info.height,
                                               self.camera_info.width,
                                               cv.CV_8UC1)
            cv.AbsDiff(positives[i], negatives[i], absolute_difference)

            #Mark all the pixels that were "too close to call" and add them to the running total
            strike_mask = cv.CreateMat(self.camera_info.height,
                                       self.camera_info.width, cv.CV_8UC1)
            cv.CmpS(absolute_difference, self.threshold, strike_mask,
                    cv.CV_CMP_LT)
            strikes = cv.CreateMat(self.camera_info.height,
                                   self.camera_info.width, cv.CV_32SC1)
            cv.Set(strikes, 1, strike_mask)
            cv.Add(strikes, strike_sum, strike_sum)

            #Set the corresponding bit in the gray_code
            bit_mask = cv.CreateMat(self.camera_info.height,
                                    self.camera_info.width, cv.CV_8UC1)
            cv.CmpS(difference, 0, bit_mask, cv.CV_CMP_GT)
            bit_values = cv.CreateMat(self.camera_info.height,
                                      self.camera_info.width, cv.CV_32SC1)
            cv.Set(bit_values, 2**i, bit_mask)
            cv.Or(bit_values, gray_codes, gray_codes)

        rospy.loginfo("Decoding...")
        # Decode every gray code into binary
        projector_line_associations = cv.CreateMat(self.camera_info.height,
                                                   self.camera_info.width,
                                                   cv.CV_32SC1)
        cv.Copy(gray_codes, projector_line_associations)
        for i in range(
                cv.CV_MAT_DEPTH(cv.GetElemType(projector_line_associations)),
                -1, -1):
            projector_line_associations_bitshifted_right = cv.CreateMat(
                self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
            #Using ConvertScale with a shift of -0.5 to do integer division for bitshifting right
            cv.ConvertScale(projector_line_associations,
                            projector_line_associations_bitshifted_right,
                            (2**-(2**i)), -0.5)
            cv.Xor(projector_line_associations,
                   projector_line_associations_bitshifted_right,
                   projector_line_associations)

        rospy.loginfo("Post processing...")

        # Remove all pixels that were "too close to call" more than once
        strikeout_mask = cv.CreateMat(self.camera_info.height,
                                      self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(strike_sum, 1, strikeout_mask, cv.CV_CMP_GT)
        cv.Set(projector_line_associations, -1, strikeout_mask)

        # Remove all pixels that don't decode to a valid scanline number
        invalid_scanline_mask = cv.CreateMat(self.camera_info.height,
                                             self.camera_info.width,
                                             cv.CV_8UC1)
        cv.InRangeS(projector_line_associations, 0, self.number_of_scanlines,
                    invalid_scanline_mask)
        cv.Not(invalid_scanline_mask, invalid_scanline_mask)
        cv.Set(projector_line_associations, -1, invalid_scanline_mask)

        self.display_scanline_associations(projector_line_associations)

        return projector_line_associations
Beispiel #7
0
while True:

    frame = cv.QueryFrame(capture)
    frame = cv.CloneImage(frame)

    imgHSV = cv.CreateImage(cv.GetSize(frame), 8, 3)
    cv.CvtColor(frame, imgHSV, cv.CV_BGR2HSV)

    imgThresh = cv.CreateImage(cv.GetSize(imgHSV), 8, 1)

    #    print("LH: " + str(lowerH) + ", LS: " + str(lowerS) + ", LV: " + str(lowerV))
    #    global lowerH
    #    global lowerS
    #    global lowerV
    #    global upperH
    #    global upperS
    #    gloval upperV
    print("LH: " + str(lowerH) + ", LS: " + str(lowerS) + ", LV: " +
          str(lowerV))
    cv.InRangeS(imgHSV, cv.Scalar(lowerH, lowerS, lowerV),
                cv.Scalar(upperH, upperS, upperV), imgThresh)

    cv.ShowImage("Ball", imgThresh)
    #     cv.ShowImage("Video", frame)

    k = cv.WaitKey(10)
    if k % 0x100 == 27:
        break

cv.DestroyAllWindows()
Beispiel #8
0
change_object(0)
while True:
    #Get camera frame
    frame = cv.QueryFrame(cam)

    obj = cv.GetTrackbarPos('object', objects)
    currentcolors = []
    for i in range(6):
        currentcolors.append(int(colors[obj * 6 + i]))

    hsv_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
    cv.CvtColor(frame, hsv_frame, cv.CV_BGR2HSV)
    thresholded_frame = cv.CreateImage(cv.GetSize(hsv_frame), 8, 1)
    if obj < 3 or obj > 4:
        cv.InRangeS(hsv_frame,
                    (currentcolors[0], currentcolors[1], currentcolors[2]),
                    (currentcolors[3], currentcolors[4], currentcolors[5]),
                    thresholded_frame)
    else:
        cv.InRangeS(frame,
                    (currentcolors[0], currentcolors[1], currentcolors[2]),
                    (currentcolors[3], currentcolors[4], currentcolors[5]),
                    thresholded_frame)

    cv.ShowImage("webcam", frame)
    cv.ShowImage("Thresholded", thresholded_frame)

    key = cv.WaitKey(100)
    if key == 27:
        break

cv.DestroyAllWindows()
Beispiel #9
0
    def getCoordinates(self, target="ball", debug=False):
        t = time.time()
        """
        This function will return the best coordinates found by thresholding the received image
        by the chosen threshold.
        """
        """Get the latest frame from the camera"""
        global cam, lock
        lock.acquire()
        try:
            cv.GrabFrame(cam)
            frame = cv.RetrieveFrame(cam)
        finally:
            lock.release()
        """Initialize the coordinates to -1, which means that the object is not found"""
        x = -1
        y = -1
        """Prepair image for thresholding"""
        #cv.Smooth(thresholded_frame, thresholded_frame, cv.CV_GAUSSIAN, 5, 5)
        cv.Smooth(frame, frame, cv.CV_BLUR, 3)
        cv.CvtColor(frame, self.hsv_frame, cv.CV_BGR2HSV)
        """Threshold the image according to the chosen thresholds"""
        if target == "ball":
            cv.InRangeS(self.hsv_frame, self.ball_threshold_low,
                        self.ball_threshold_high, self.thresholded_frame)
        elif target == "blue gate":
            cv.InRangeS(self.hsv_frame, self.blue_gate_threshold_low,
                        self.blue_gate_threshold_high, self.thresholded_frame)
        elif target == "yellow gate":
            cv.InRangeS(self.hsv_frame, self.yellow_gate_threshold_low,
                        self.yellow_gate_threshold_high,
                        self.thresholded_frame)
        elif target == "black":
            cv.InRangeS(self.hsv_frame, self.black_threshold_low,
                        self.black_threshold_high, self.thresholded_frame)
        elif target == "white":
            cv.InRangeS(self.hsv_frame, self.white_threshold_low,
                        self.white_threshold_high, self.thresholded_frame)

        cv.InRangeS(self.hsv_frame, self.green_threshold_low,
                    self.green_threshold_high, self.thresholded_field)
        """Now use some function to find the object"""
        blobs_image = SimpleCV.Image(self.thresholded_frame)
        field_image = SimpleCV.Image(self.thresholded_field)

        blobs = blobs_image.findBlobs(minsize=2)
        if blobs:
            if target == "ball":
                for i in range(len(blobs)):
                    i = len(blobs) - 1 - i
                    pos_x = blobs[i].maxX()
                    pos_y = blobs[i].maxY()
                    on_field = False
                    for py in range(0, pos_y):
                        if field_image.getPixel(pos_x, py) == (255, 255, 255):
                            on_field = True
                            break
                    if on_field:
                        x, y = pos_x, pos_y
                        break
            else:
                x, y = blobs[-1].coordinates()
        """Old, openCV using contours
        contours = cv.FindContours(cv.CloneImage(thresholded_frame), cv.CreateMemStorage(),mode=cv.CV_RETR_EXTERNAL)
        
        if len(contours)!=0:
            #determine the objects moments and check that the area is large  
            #enough to be our object 
            moments = cv.Moments(contours,1) 
            moment10 = cv.GetSpatialMoment(moments, 1, 0)
            moment01 = cv.GetSpatialMoment(moments, 0, 1)
            area = cv.GetCentralMoment(moments, 0, 0) 
            
            #there can be noise in the video so ignore objects with small areas 
            if area > 2: 
                #determine the x and y coordinates of the center of the object 
                #we are tracking by dividing the 1, 0 and 0, 1 moments by the area 
                x = moment10/area
                y = moment01/area"""
        if debug:
            cv.ShowImage("Camera", self.thresholded_frame)
            #thresholded_frame=SimpleCV.Image(thresholded_frame)
            #thresholded_frame.show()
        print time.time() - t

        return x, y
Beispiel #10
0
img = cv.LoadImage("Bruce_Blob.jpg", 1)

blobs = cvblob.Blobs()
size = cv.GetSize(img)

hsv = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
h = cv.CreateImage(size, 8, 1)
grey = cv.CreateImage(size, 8, 1)
yuv = cv.CreateImage(size, 8, 3)
red = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
labelImg = cv.CreateImage(size,cvblob.IPL_DEPTH_LABEL, 1)

hsv_min = cv.Scalar(0,10,10,0)
hsv_max = cv.Scalar(180,255,255,0)

cv.InRangeS(img, cv.RGB(200,0,0), cv.RGB(255,225,225),red);

result = cvblob.Label(red,labelImg,blobs)
numblobs = len(blobs.keys())

avgsize = int(result/numblobs)
print str(numblobs) + " Blobs found covering " + str(result) + "px"
filtered = cv.CreateImage(cv.GetSize(img),cv.IPL_DEPTH_8U,1)
cvblob.FilterLabels(labelImg,filtered,blobs)

bigblob = cvblob.GreaterBlob(blobs)
print "largest blob is " + str(bigblob) + " which is " + str(blobs[bigblob].area + "px"

bigblobs = copy.copy(blobs)
cvblob.FilterByLabel(bigblobs,bigblob)
print str(len(bigblobs.keys())) + " blobs with label " + str(bigblob)
Beispiel #11
0
    def show(self):
        """ Process and show the current frame """
        source = cv.LoadImage(self.files[self.index])
        width, height = cv.GetSize(source)

        #cv.Flip(source, None, 1);

        red = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)
        mask = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)

        test = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 3)

        cv.Zero(mask)
        cv.Zero(test)

        hsv = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 3)

        cv.CvtColor(source, hsv, cv.CV_RGB2HSV)

        # cv.CvtColor( source, red, cv.CV_RGB2GRAY );
        # cv.Split( source, None, None, red, None );

        cv.Split(hsv, None, None, red, None)
        cv.InRangeS(red, self.thresholdMin, self.thresholdMax, red)

        # cv.Copy( red, mask )

        if self.roi:
            # print self.roi
            x, y, x1, y1 = self.roi

            width = float(x1 - x)
            height = float(y1 - y)

            cv.Rectangle(source, (x, y), (x1, y1), (255.0, 255, 255, 0))
            cv.Rectangle(test, (x, y), (x1, y1), (255.0, 255, 255, 0))
            cv.Rectangle(mask, (x, y), (x1, y1), (128.0, 0, 0, 0))
            #Line(img, pt1, pt2, color, thickness=1, lineType=8, shift=0) ? None?

            cv.Line(source, (x + (width / 2), y), (x + (width / 2), y1),
                    (0, 0, 255), 1)

            cv.SetImageROI(red, (x, y, x1 - x, y1 - y))
            cv.SetImageROI(source, (x, y, x1 - x, y1 - y))
            cv.SetImageROI(test, (x, y, x1 - x, y1 - y))

            points = self.lineScan(red, source)

            colors = [source[y, x + 2] for x, y in points]
            cv.PolyLine(source, [points], False, (255, 0, 255), 3, 8, 0)

            # rotate the line back
            def translate(x, y, arc):
                x, y = float(x), float(y)

                arc = atan(y / x) - arc

                len = sqrt(pow(x, 2) + pow(y, 2))
                x, y = (cos(arc) * len), (sin(arc) * len)
                return x, y

            points = [translate(x, y, math.radians(-45)) for x, y in points]

            cv.PolyLine(test, [points], False, (255, 0, 255), 3, 8, 0)

            cv.ShowImage('test', test)

            # normalize points and mirror right of the x-axis, mirror bottom-top
            points = [(0.5 - (x / width), 1.0 - (y / height), 0.0)
                      for x, y in points]

            def rotate(angle, x, y, z=0.0):
                angle = math.radians(angle)
                zn = z * (math.cos(angle)) - (x * math.sin(angle))
                xn = z * (math.sin(angle)) + (x * math.cos(angle))

                return (xn, y, zn)

            # points = [self.rotate( self.index * 1, (x, y), x1/2) for x,y in points]

            points = [
                rotate((self.index * self.step), x, y, z) for x, y, z in points
            ]
            #
            #points = [(x, y, self.index/100.00) for x,y in points]

            cv.ResetImageROI(red)
            cv.ResetImageROI(source)

        if self.mode == 'mask':
            cv.ShowImage('preview', mask)
            return

        if self.mode == 'record' and self.roi:
            #if self.index == 45:
            #    o = open('points.asc', 'w');
            #
            #    p = ["%f %f %f\n" % point for point in points];
            #    p = str.join("", p);
            #
            #    o.write(p);
            #    o.close();
            #    exit();

            if self.index == 1 or self.index == 76 or self.index == 38 or self.index == 114:
                font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 1)
                cv.PutText(source, "recording %d" % self.index, (20, 20), font,
                           (0, 0, 255))
                self.points.extend(points)
                self.colors.extend(colors)

        cv.ShowImage('preview', source)
Beispiel #12
0
def video():
    global motionProxy
    global post
    global sonarProxy
    global memoryProxy
    # work ! set current to servos
    stiffnesses = 1.0
    time.sleep(0.5)

    # init video
    cameraProxy = ALProxy("ALVideoDevice", IP, PORT)
    resolution = 1  # 0 : QQVGA, 1 : QVGA, 2 : VGA
    colorSpace = 11  # RGB
    camNum = 0  # 0:top cam, 1: bottom cam
    fps = 1
    # frame Per Second
    cameraProxy.setParam(18, camNum)
    try:
        videoClient = cameraProxy.subscribe("python_client", resolution,
                                            colorSpace, fps)
    except:
        cameraProxy.unsubscribe("python_client")
        videoClient = cameraProxy.subscribe("python_client", resolution,
                                            colorSpace, fps)
    print "videoClient ", videoClient
    # Get a camera image.
    # image[6] contains the image data passed as an array of ASCII chars.
    naoImage = cameraProxy.getImageRemote(videoClient)
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]

    # define display window
    #cv.ResizeWindow("proc",imageWidth,imageHeight)

    found = True
    posx = 0
    posy = 0
    mem = cv.CreateMemStorage(0)
    i = 0
    cv.NamedWindow("Real")
    cv.MoveWindow("Real", 0, 0)
    cv.NamedWindow("Threshold")
    cv.MoveWindow("Real", imageWidth + 100, 0)
    error = 0.0
    nframe = 0.0
    closing = 3
    tstp, tu = 0, 0
    K = 2
    try:
        while found:
            nframe = nframe + 1
            # Get current image (top cam)
            naoImage = cameraProxy.getImageRemote(videoClient)
            # Get the image size and pixel array.
            imageWidth = naoImage[0]
            imageHeight = naoImage[1]
            array = naoImage[6]
            # Create a PIL Image from our pixel array.
            pilImg = Image.fromstring("RGB", (imageWidth, imageHeight), array)
            # Convert Image to OpenCV
            cvImg = cv.CreateImageHeader((imageWidth, imageHeight),
                                         cv.IPL_DEPTH_8U, 3)
            cv.SetData(cvImg, pilImg.tostring())
            cv.CvtColor(cvImg, cvImg, cv.CV_RGB2BGR)
            hsv_img = cv.CreateImage(cv.GetSize(cvImg), 8, 3)
            cv.CvtColor(cvImg, hsv_img, cv.CV_BGR2HSV)
            thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
            thresholded_img2 = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
            # Get the orange on the image
            cv.InRangeS(hsv_img, (0, 150, 150), (40, 255, 255),
                        thresholded_img)
            cv.InRangeS(hsv_img, (0, 150, 150), (40, 255, 255),
                        thresholded_img2)
            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(thresholded_img, storage,
                                      cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)
            cv.Smooth(thresholded_img, thresholded_img, cv.CV_GAUSSIAN, 5, 5)
            cv.Erode(thresholded_img, thresholded_img, None, closing)
            cv.Dilate(thresholded_img, thresholded_img, None, closing)

            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(thresholded_img, storage,
                                      cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)
            points = []

            d = []
            data = []
            while contour:
                # Draw bounding rectangles
                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()
                # for more details about cv.BoundingRect,see documentation
                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2],
                       bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                cv.Rectangle(cvImg, pt1, pt2, cv.CV_RGB(255, 0, 0), 1)
                lastx = posx
                lasty = posy
                posx = cv.Round((pt1[0] + pt2[0]) / 2)
                posy = cv.Round((pt1[1] + pt2[1]) / 2)
                data.append([posx, posy])
                d.append(math.sqrt(pt1[0]**2 + pt2[0]**2))
                d.append(math.sqrt(pt1[1]**2 + pt2[1]**2))

            cvImg, error, centroid, labels = clustering(
                data, cvImg, nframe, error, K)
            # Update the closing size towards the number of found labels
            if labels.size < 2:
                closing = 1
            if labels.size < 6:
                closing = 2
            if labels.size > 10:
                closing = 3
            if closing < 1:
                closing = 0
            if centroid.size != 0:

                uh = 0
                try:
                    x = int(centroid[0][0])
                    y = int(centroid[0][1])
                except:
                    print "NaN"
                l = memoryProxy.getData(
                    "Device/SubDeviceList/US/Left/Sensor/Value")
                # Same thing for right.
                r = memoryProxy.getData(
                    "Device/SubDeviceList/US/Right/Sensor/Value")
                kh = 30.0 / (imageWidth / 2)
                km = 0.2
                uh = -kh * (x - imageWidth / 2)
                um = km * (math.sqrt(l**2 + r**2) - 0.5)
                print "um: ", um

                if (uh > 4 or uh < -4):
                    motionProxy.moveTo(0.0, 0.0, uh * almath.TO_RAD)
                    tu = time.time()
                if (l > 0.5 and r > 0.5):
                    motionProxy.moveTo(um, 0.0, 0.0)
                    tu = time.time()

                else:
                    print "-------------------------"
                    print "K = 1"
                    K = 1
                    tstp = time.time()
                print l, r

                # quand il est proche mettre K=1, on admet qu il n y a plus de parasites

                tact = tstp - tu

                if tact > 5:
                    found = False
                    tts.say("You can't hide forever")
                    post.goToPosture("StandInit", 1.0)

                #print "diff",x-imageWidth/2
                #print imageWidth,imageHeight

            cv.ShowImage("Real", cvImg)
            cv.ShowImage("Threshold", thresholded_img2)
            cv.WaitKey(1)

    except KeyboardInterrupt:
        pNames = "Body"
        post.goToPosture("Crouch", 1.0)
        time.sleep(1.0)
        pStiffnessLists = 0.0
        pTimeLists = 1.0
        proxy = ALProxy("ALMotion", IP, 9559)
        proxy.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists)
        #tts.say("exit")
        print
        print "Interrupted by user, shutting down"
        cameraProxy.unsubscribe(videoClient)
        sys.exit(0)
Beispiel #13
0
        
imagen=cv.LoadImage('e.bmp')#im.
cv.ShowImage('Prueba',imagen)

rojo      = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)
verde     = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)
azul      = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)
amarillo  = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)

pixel_rojo     = [11,21,109]
pixel_verde    = [16,47,27 ]
pixel_azul     = [221,174,68]
#pixel_amarillo = [82,140,142] pelot
pixel_amarillo = [53,117,177] 

cv.InRangeS(imagen,(pixel_rojo[0]-tolerancia,pixel_rojo[1]-tolerancia,pixel_rojo[2]-tolerancia),(pixel_rojo[0]+tolerancia,pixel_rojo[1]+tolerancia,pixel_rojo[2]+tolerancia),rojo)

cv.InRangeS(imagen,(pixel_verde[0]-tolerancia,pixel_verde[1]-tolerancia,pixel_verde[2]-tolerancia),(pixel_verde[0]+tolerancia,pixel_verde[1]+tolerancia,pixel_verde[2]+tolerancia),verde)

cv.InRangeS(imagen,(pixel_azul[0]-tolerancia,pixel_azul[1]-tolerancia,pixel_azul[2]-tolerancia),(pixel_azul[0]+tolerancia,pixel_azul[1]+tolerancia,pixel_azul[2]+tolerancia),azul)

cv.InRangeS(imagen,(pixel_amarillo[0]-tolerancia,pixel_amarillo[1]-tolerancia,pixel_amarillo[2]-tolerancia),(pixel_amarillo[0]+tolerancia,pixel_amarillo[1]+tolerancia,pixel_amarillo[2]+tolerancia),amarillo)

cv.ShowImage('Color Rojo'     ,rojo)
cv.ShowImage('Color Verde'    ,verde)
cv.ShowImage('Color Azul'     ,azul)
cv.ShowImage('Color Amarillo' ,amarillo)

cr = []
cg = []
ca = []
Beispiel #14
0
class image_converter:

  def __init__(self):
    self.image_pub = rospy.Publisher("image_converter1",Image)

    cv.NamedWindow("Image window", 1)
    self.bridge = CvBridge()
    #self.image_sub = rospy.Subscriber("ardrone/bottom/image_raw",Image,self.callback)
    self.image_sub = rospy.Subscriber("usb_cam1/image_raw",Image,self.callback)

  def callback(self,data):
    try:
      cv_image = self.bridge.imgmsg_to_cv(data, "bgr8")
    except CvBridgeError, e:
      print e

    #(cols,rows) = cv.GetSize(cv_image)
    #if cols > 60 and rows > 60 :
    #  cv.Circle(cv_image, (50,50), 10, 255)

    #######################################
    #img1=cv.CreateImage((150,150),8,3)
    #cv.Rectangle(cv_image, (60,60),(70,90), (255,0,255))
    #sub1=cv.GetSubRect(cv_image,(60,60,150,150))
    #save1=cv.CloneMat(sub1)
    #sub2=cv.GetSubRect(img1,(0,0,150,150))
    #cv.Copy(save1,sub2)
    img=cv.CreateImage((cv.GetSize(cv_image)[0],cv.GetSize(cv_image)[1]),8,3)    
    img1=cv.CreateImage((cv.GetSize(cv_image)[0],cv.GetSize(cv_image)[1]),8,3)    
    #img=cv.CreateImage(cv.GetSize(cv_image),8,3)    
    img_r=cv.CreateImage(cv.GetSize(img),8,1)
    img_g=cv.CreateImage(cv.GetSize(img),8,1)
    img_b=cv.CreateImage(cv.GetSize(img),8,1)
    img_r1=cv.CreateImage(cv.GetSize(img),8,1)
    img_r2=cv.CreateImage(cv.GetSize(img),8,1)

    img2=cv.LoadImage("/home/petin/ros_pkgs/vp_ardrone2/ris1.jpg",cv.CV_LOAD_IMAGE_GRAYSCALE)
    
    
    sub1=cv.GetSubRect(cv_image,(0,0,cv.GetSize(cv_image)[0],cv.GetSize(cv_image)[1]))
    save1=cv.CloneMat(sub1)
    sub2=cv.GetSubRect(img,(0,0,cv.GetSize(cv_image)[0],cv.GetSize(cv_image)[1]))
    cv.Copy(save1,sub2)

    #cv.CvtColor(img, img1, cv.CV_BGR2HSV)
    #cv.CvtPixToPlane(img1,img_h,img_s,img_v,None)
    #cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
    #cv.Smooth(gray,gray,cv.CV_GAUSSIAN,5,5)
    cv.Split(img,img_b,img_g,img_r,None)
    #
    #cv.ShowImage("Image window1", img)
    #cv.ShowImage("Image windowb", img_b)
    #cv.ShowImage("Image windowg", img_g)
    #cv.ShowImage("Image windowr", img_r)
    #
    cv.InRangeS(img_r, cv.Scalar(0), cv.Scalar(120), img_r1);
    #cv.InRangeS(img_s, cv.Scalar(135), cv.Scalar(255), img_s1);
    #cv.InRangeS(img_b, cv.Scalar(0), cv.Scalar(61), img_b1);
    #cv.Invert(img_g1,img_g2,cv.CV_SVD)
    ##cv.Smooth(img_g,img_g,cv.CV_GAUSSIAN,9,9)
    #
    #cv.ShowImage("Image windowg1", img_g1)
    #
    ##storage=cv.CreateMemStorage(0)
    ##contours = cv.FindContours (img_r1, storage, cv.CV_CHAIN_APPROX_NONE)
    ##print len(contours)
    cv.ShowImage("Image windowg1", img_r1)
    #for contour in contours.hrange():
    #  size = abs(cvContourArea(contour))
    #  is_convex = cvCheckContourConvexity(contour)
    #  bbox = cvBoundingRect( contour, 0 )
    #  x, y = bbox.x+bbox.width*0.5, bbox.y+bbox.height*0.5
    #  print (x,y)
    ##print(8888888888888888888888888888888888888888888888888888888888)
    ##for (x,y) in contours:
    ##  print (x,y)
    #cv.ShowImage("Image windowg1", img_h1)
    #cv.ShowImage("Image windowr1", img_r1)
    #cv.ShowImage("Image gray", gray)
    # search circle
    #storage = cv.CreateMat(img2.width, 1, cv.CV_32FC3)
    #cv.ShowImage("Image window1", img2)
    #cv.HoughCircles(img2, storage, cv.CV_HOUGH_GRADIENT, 2, 100, 100, 50, 10, 400)
    #rospy.loginfo(len()np.asarray(storage))
    #for i in xrange(storage.width - 1):
    #for i in xrange(storage.width - 1):
    #  radius = storage[i, 2]
    #  center = (storage[i, 0], storage[i, 1])
    #  rospy.loginfo('444')
    #  cv.Circle(cv_image, center, radius, (0, 0, 255), 3, 10, 200)
    #search_circle=cv.HoughCircles(img_g1,np.asarray(storage),3,10,150)
    #for res in search_circles:
    #   p = float (cv.GetSeqElem(res))
    #   pt = cv.Point( cv.Round( p[0] ), cv.Round( p[1] ) );
    #   cv.Circle( cv_image, pt, cv.Round( p[2] ), 255 );
    #
    #cv.And(img_g,img_r,img_a)
    #cv.And(img_a,img_b,img_a)
    cv.ShowImage("Image window2", img2)
     #
    cv.ShowImage("Image window", cv_image)
    cv.WaitKey(3)

    try:
      self.image_pub.publish(self.bridge.cv_to_imgmsg(cv_image, "bgr8"))
    except CvBridgeError, e:
      print e
    #convert the image to hsv(Hue, Saturation, Value) so its
    #easier to determine the color to track(hue)
    hsv_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
    cv.CvtColor(frame, hsv_frame, cv.CV_BGR2HSV)

    #limit all pixels that don't match our criteria, in this case we are
    #looking for purple but if you want you can adjust the first value in
    #both turples which is the hue range(120,140).  OpenCV uses 0-180 as
    #a hue range for the HSV color model
    thresholded_frame = cv.CreateImage(cv.GetSize(hsv_frame), 8, 1)
    #    cv.InRangeS(hsv_frame, (0, 80, 0), (30, 255, 255), thresholded_frame)  # works fine during day
    #    cv.InRangeS(hsv_frame, (10, 150, 0), (25, 255, 255), thresholded_frame)  # works fine evenings2
    #    cv.InRangeS(hsv_frame, (11, 100, 0), (13, 255, 255), thresholded_frame)  # works fine 17:00
    cv.InRangeS(
        hsv_frame, (0, 157, 194), (25, 255, 255),
        thresholded_frame)  # ANDRES - module for creating constants threshold

    contours = cv.FindContours(cv.CloneImage(thresholded_frame),
                               cv.CreateMemStorage())

    if len(contours) != 0:
        #determine the objects moments and check that the area is large
        #enough to be our object
        moments = cv.Moments(contours, 1)
        moment10 = cv.GetSpatialMoment(moments, 1, 0)
        moment01 = cv.GetSpatialMoment(moments, 0, 1)
        area = cv.GetCentralMoment(moments, 0, 0)

        #there can be noise in the video so ignore objects with small areas
        if area > 5:
Beispiel #16
0
def main():

    # create windows
    create_and_position_window('Thresholded_HSV_Image', 10, 10)
    create_and_position_window('RGB_VideoFrame', 10 + cam_width, 10)

    create_and_position_window('Hue', 10, 10 + cam_height)
    create_and_position_window('Saturation', 210, 10 + cam_height)
    create_and_position_window('Value', 410, 10 + cam_height)
    create_and_position_window('LaserPointer', 0, 0)

    capture = setup_camera_capture()

    # create images for the different channels
    h_img = cv.CreateImage((cam_width, cam_height), 8, 1)
    s_img = cv.CreateImage((cam_width, cam_height), 8, 1)
    v_img = cv.CreateImage((cam_width, cam_height), 8, 1)
    laser_img = cv.CreateImage((cam_width, cam_height), 8, 1)
    cv.SetZero(h_img)
    cv.SetZero(s_img)
    cv.SetZero(v_img)
    cv.SetZero(laser_img)

    while True:
        # 1. capture the current image
        frame = cv.QueryFrame(capture)
        if frame is None:
            # no image captured... end the processing
            break

        hsv_image = cv.CloneImage(frame)  # temporary copy of the frame
        cv.CvtColor(frame, hsv_image, cv.CV_BGR2HSV)  # convert to HSV

        # split the video frame into color channels
        cv.Split(hsv_image, h_img, s_img, v_img, None)

        # Threshold ranges of HSV components.
        cv.InRangeS(h_img, hmin, hmax, h_img)
        cv.InRangeS(s_img, smin, smax, s_img)
        cv.InRangeS(v_img, vmin, vmax, v_img)

        # Perform an AND on HSV components to identify the laser!
        cv.And(h_img, v_img, laser_img)
        # This actually Worked OK for me without using Saturation.
        #cv.cvAnd(laser_img, s_img,laser_img)

        # Merge the HSV components back together.
        cv.Merge(h_img, s_img, v_img, None, hsv_image)

        #-----------------------------------------------------
        # NOTE: default color space in OpenCV is BGR!!
        # we can now display the images
        cv.ShowImage('Thresholded_HSV_Image', hsv_image)
        cv.ShowImage('RGB_VideoFrame', frame)
        cv.ShowImage('Hue', h_img)
        cv.ShowImage('Saturation', s_img)
        cv.ShowImage('Value', v_img)
        cv.ShowImage('LaserPointer', laser_img)

        # handle events
        k = cv.WaitKey(10)

        if k == '\x1b' or k == 'q':
            # user has press the ESC key, so exit
            break
Beispiel #17
0
def reconocimiento_pelotas_y_robot ():
    global pixel_rojo, pixel_verde , width , height
    peq_imagen, imagen = imagen_homografia(1)
    tolerancia = 30    
         
    rojo      = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)
    verde     = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)
 
    cv.InRangeS(imagen,(pixel_rojo[0]-tolerancia,pixel_rojo[1]-tolerancia,pixel_rojo[2]-tolerancia),(pixel_rojo[0]+tolerancia,pixel_rojo[1]+tolerancia,pixel_rojo[2]+tolerancia),rojo)
 
    cv.InRangeS(imagen,(pixel_verde[0]-tolerancia,pixel_verde[1]-tolerancia,pixel_verde[2]-tolerancia),(pixel_verde[0]+tolerancia,pixel_verde[1]+tolerancia,pixel_verde[2]+tolerancia),verde)
     
    (cg, cr) = ([],[])
    (p1, p2, p3) = ([],[],[])
    sumx_g = sumy_g =  0
    sumx_p1 = sumx_p2 = sumx_p3 = sumy_p1 = sumy_p2 = sumy_p3 = 0
    inicio = inicio2 = inicio3 = 0
    off = 100
    old_x = old_y = 0
    for j in range(1,height,1):  
      for i in range(1,width,1):
    r = cv.Get2D(rojo ,j,i)
    g = cv.Get2D(verde,j,i)
    if r[0] == 255: 
        cr += [(i,j)]
        if   inicio == 0 :
        inicio = 1
        old_x = i  
        old_y = j
        elif i > old_x-off and i < old_x+off and j > old_y-off and j < old_y+off :
        p1 += [(i,j)]
        sumx_p1 = sumx_p1 + i
        sumy_p1 = sumy_p1 + j 
        old_x = i 
        old_y = j
        else:
        if   inicio2 == 0 :
            inicio2 = 1
            old_x2 = i  
            old_y2 = j
        elif i > old_x2-off and i < old_x2+off and j > old_y2-off and j < old_y2+off :
            p2 += [(i,j)]
            sumx_p2 = sumx_p2 + i
            sumy_p2 = sumy_p2 + j 
            old_x2 = i 
            old_y2 = j
        else:
            if  inicio3 == 0 :
            inicio3 = 1
            old_x3 = i  
            old_y3 = j
            elif i > old_x3-off and i < old_x3+off and j > old_y3-off and j < old_y3+off :
            p3 += [(i,j)]
            sumx_p3 = sumx_p3 + i
            sumy_p3 = sumy_p3 + j 
            old_x3 = i 
            old_y3 = j
    if g[0] == 255: 
        cg    += [(i,j)]
        sumx_g  = sumx_g + i
        sumy_g  = sumy_g + j  
    p_old1_x = i 
    p_old1_y = j  
 
    ro_x = sumx_g/len(cg)      #0
    ro_y = sumy_g/len(cg)   
     
    num_pel = 0
 
    if(len(p1)==0):
      p1_x = ro_x 
      p1_y = ro_y
    else:
      p1_x = sumx_p1/len(p1)      #1
      p1_y = sumy_p1/len(p1) 
      num_pel = num_pel + 1
 
    if(len(p2)==0):
      p2_x = ro_x       
      p2_y = ro_y 
    else:
      p2_x = sumx_p2/len(p2)      #2
      p2_y = sumy_p2/len(p2) 
      num_pel = num_pel + 1
       
    if(len(p3)==0):
      p3_x = ro_x       
      p3_y = ro_y
    else:
      p3_x = sumx_p3/len(p3)      #3
      p3_y = sumy_p3/len(p3) 
      num_pel = num_pel + 1
       
      
    return ro_x,ro_y,p1_x,p1_y,p2_x,p2_y,p3_x,p3_y,num_pel
 
def reconocimiento_robot_pos ():
   
    global pixel_azul, pixel_verde , width , height
    peq_imagen, imagen = imagen_homografia(1)
    tolerancia = 30    
 
    verde     = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)
    azul      = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)
     
    cv.InRangeS(imagen,(pixel_verde[0]-tolerancia,pixel_verde[1]-tolerancia,pixel_verde[2]-tolerancia),(pixel_verde[0]+tolerancia,pixel_verde[1]+tolerancia,pixel_verde[2]+tolerancia),verde)
 
    cv.InRangeS(imagen,(pixel_azul[0]-tolerancia,pixel_azul[1]-tolerancia,pixel_azul[2]-tolerancia),(pixel_azul[0]+tolerancia,pixel_azul[1]+tolerancia,pixel_azul[2]+tolerancia),azul)
 
    (cg, cb, cy) = ([],[],[])
    sumx_g = sumy_g = sumx_b = sumy_b = sumx_y = sumy_y = 0
     
    for j in range(1,height,1):  
      for i in range(1,width,1):
    b = cv.Get2D(azul ,j,i)
    g = cv.Get2D(verde,j,i)
    if g[0] == 255: 
        cg    += [(i,j)]
        sumx_g  = sumx_g + i
        sumy_g  = sumy_g + j  
    if b[0] == 255: 
        cb += [(i,j)]
        sumx_b  = sumx_b + i
        sumy_b  = sumy_b + j 
    p_old1_x = i 
    p_old1_y = j  
     
    ro_x = sumx_g/len(cg)   
    ro_y = sumy_g/len(cg)   
    rf_x = sumx_b/len(cb)   
    rf_y = sumy_b/len(cb)
     
     
    alt_real_tripode = alt_tripode - (float(dist_trip_cancha*alt_tripode)/float(dist_trip_cancha+largo_cancha))
    corr_ro_y =  float(ro_y) + (float(ro_y * alt_robot) / float(alt_real_tripode))
    corr_rf_y =  float(rf_y) + (float(rf_y * alt_robot) / float(alt_real_tripode))
  
    if ro_x > width/2 :
      corr_ro_x =  float(ro_x) - (float(ro_x * alt_robot) / float(alt_real_tripode))
    else:
      corr_ro_x =  float(ro_x) + (float(ro_x * alt_robot) / float(alt_real_tripode))
       
    if rf_x > width/2 :
      corr_rf_x =  float(rf_x) - (float(rf_x * alt_robot) / float(alt_real_tripode))
    else:
      corr_rf_x =  float(rf_x) + (float(rf_x * alt_robot) / float(alt_real_tripode))
       
    return ro_x,corr_ro_y+20,rf_x,corr_rf_y+20
 
def RaizCuadrada(X):
    R = X
    T = 0
    while (T != R):
        T = R
        R = (X/R + R)/2
    return R
     
def distancia(x1,y1,x2,y2):
    u = RaizCuadrada(((y2-y1)*(y2-y1))+((x2-x1)*(x2-x1)))
    return u
     
def sec_tray(ro_x, ro_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):
    dis_0_1 = distancia(ro_x, ro_y, p1_x, p1_y) 
    dis_0_2 = distancia(ro_x, ro_y, p2_x, p2_y) 
    dis_0_3 = distancia(ro_x, ro_y, p3_x, p3_y) 
    dis_1_2 = distancia(p1_x, p1_y, p2_x, p2_y)  
    dis_1_3 = distancia(p1_x, p1_y, p3_x, p3_y)  
    dis_2_3 = distancia(p2_x, p2_y, p3_x, p3_y) 
 
    m_dist = [[0,1,dis_0_1],[0,2,dis_0_2],[0,3,dis_0_3],[1,2,dis_1_2],[1,3,dis_1_3],[2,3,dis_2_3]]
     
    sec1 = dis_0_1 + dis_1_2 + dis_2_3 + dis_0_3
    sec2 = dis_0_1 + dis_1_3 + dis_2_3 + dis_0_2
    sec3 = dis_0_2 + dis_1_2 + dis_1_3 + dis_0_3
     
    m_coord = []
    menor=0; 
    if(sec1<sec2):
      if(sec1<sec3):
      menor  = sec1
      m_cord = [[ro_x,ro_y],[p1_x,p1_y],[p2_x,p2_y],[p3_x,p3_y],[ro_x,ro_y]] 
      else:
      menor  = sec3
      m_cord = [[ro_x,ro_y],[p2_x,p2_y],[p1_x,p1_y],[p3_x,p3_y],[ro_x,ro_y]] 
    else:
      if(sec2<sec3):
      menor  = sec2
      m_cord = [[ro_x,ro_y],[p1_x,p1_y],[p3_x,p3_y],[p2_x,p2_y],[ro_x,ro_y]] 
      else:
      menor  = sec3
      m_cord = [[ro_x,ro_y],[p2_x,p2_y],[p1_x,p1_y],[p3_x,p3_y],[ro_x,ro_y]] 
 
    for j in range(0,5):
        if (((m_cord[j][0] != ro_x) and (m_cord[j][1] != ro_y)) or j==0) or j==4:
      m_coord += [(m_cord[j][0],m_cord[j][1])]
          
    return m_coord
 
def correccion(ro_x,ro_y,rf_x,rf_y,x_evaluado,y_evaluado):
    if (float(ro_x-rf_x)==0):
      calc = y_evaluado-ro_y-((float(ro_y-rf_y)/(0.001))*(x_evaluado-ro_x))
      print 'Division por 0 evitada'
    else:  
      calc = y_evaluado-ro_y-((float(ro_y-rf_y)/float(ro_x-rf_x))*(x_evaluado-ro_x))
    print 'giro=',calc
    g=50
    if ro_x > rf_x :
      if calc < -g:
     direccion='d'
      elif calc > g:
     direccion='i'
      else:
    def runColor(self):
        self.tracking = False
        self.lasttrack = None
        self.hang_around_seconds = 5
        color_tracker_window = "Preston HackSpace 2013 BarCamp Project"
        cv.NamedWindow(color_tracker_window, 1)
        self.capture = cv.CaptureFromCAM(0)
        count = 0
        while True:

            img = cv.QueryFrame(self.capture)
            img2 = cv.QueryFrame(self.capture)
            #blur the source image to reduce color noise
            cv.Smooth(img, img, cv.CV_BLUR, 3)

            #convert the image to hsv(Hue, Saturation, Value) so its
            #easier to determine the color to track(hue)

            hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3)
            cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)

            #limit all pixels that don't match our criteria, in this case we are
            #looking for purple but if you want you can adjust the first value in
            #both turples which is the hue range(120,140).  OpenCV uses 0-180 as
            #a hue range for the HSV color model

            #Orange  0-22
            #Yellow 22- 38
            #Green 38-75
            #Blue 75-130
            #Violet 130-160
            #Red 160-179

            thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
            cv.InRangeS(hsv_img, (0, 120, 120), (15, 255, 255),
                        thresholded_img)
            #cv.InRangeS(hsv_img, (120, 80, 80), (140, 255, 255), thresholded_img)
            #determine the objects moments and check that the area is large
            #enough to be our object
            #moments = cv.Moments(thresholded_img, 0)
            moments = cv.Moments(cv.GetMat(thresholded_img), 0)
            area = cv.GetCentralMoment(moments, 0, 0)

            #there can be noise in the video so ignore objects with small areas
            if (area > 100000):
                self.tracking = True
                self.lasttrack = time.time()
                #determine the x and y coordinates of the center of the object
                #we are tracking by dividing the 1, 0 and 0, 1 moments by the area
                x = cv.GetSpatialMoment(moments, 1, 0) / area
                y = cv.GetSpatialMoment(moments, 0, 1) / area

                #Write the x,y coords to a file for the pyFirmata code to use for controlling the Arduino
                self.WriteXY(x, y)

                #create an overlay to mark the center of the tracked object
                overlay = cv.CreateImage(cv.GetSize(img), 8, 3)

                #cv.Circle(overlay, (x, y), 2, (255, 255, 255), 20)
                cv.Circle(img, (int(x), int(y)), 2, (255, 255, 255), 20)
                cv.Add(img, overlay, img)
                #add the thresholded image back to the img so we can see what was
                #left after it was applied
                cv.Merge(thresholded_img, None, None, None, img)
            else:
                if self.tracking == True:
                    #We have just lost track of the object we need to hang around for a bit
                    #to see if the object comes back.
                    self.WriteXY(-2, -2)
                    if time.time(
                    ) >= self.lasttrack + self.hang_around_seconds:
                        self.tracking = False

                if self.tracking == False:
                    self.WriteXY(-1, -1)
            #display the image
            cv.ShowImage(color_tracker_window, img2)

            if cv.WaitKey(10) == 27:
                break
Beispiel #19
0
def filter_red(src_img, dest_img):
    cv.Smooth(src_img, src_img)

    # Histogram
    # h = np.zeros((300,256,3))
    # b,g,r = cv.Split(src_img)
    # bins = np.arange(256).reshape(256,1)
    # color = [ (255,0,0),(0,255,0),(0,0,255) ]

    # CalcHist(image, hist, accumulate=0, mask=NULL)
    # histogram = cv.CalcHist(h_plane,[0],None,[256],[0,255])
    # # cv.NormalizeHist(histogram, 1)
    # hist=np.int32(np.around(hist_item))
    # pts = np.column_stack((bins,hist))
    # cv2.polylines(h,[pts],False,col)

    # h_bins = 30
    # s_bins = 32
    # hist_size = [h_bins, s_bins]
    # # hue varies from 0 (~0 deg red) to 180 (~360 deg red again */
    # h_ranges = [0, 180]
    # # saturation varies from 0 (black-gray-white) to
    # # 255 (pure spectrum color)
    # s_ranges = [0, 255]
    # ranges = [h_ranges, s_ranges]
    # scale = 10
    # hist = cv.CreateHist(h_bins, cv.CV_HIST_ARRAY, ranges, 1)
    # cv.CalcHist(src_img, hist)
    # (_, max_value, _, _) = cv.GetMinMaxHistValue(hist)
    # for h in range(h_bins):
    #     bin_val = cv.QueryHistValue_2D(hist, h, s)
    #     intensity = cv.Round(bin_val * 255 / max_value)
    #     cv.Rectangle(hist_img,
    #                  (h*scale, s*scale),
    #                  ((h+1)*scale - 1, (s+1)*scale - 1),
    #                  cv.RGB(intensity, intensity, intensity),
    #                  cv.CV_FILLED)

    (rows, cols) = cv.GetSize(src_img)

    img_hsv = cv.CreateImage((rows, cols), cv.IPL_DEPTH_8U, 3)
    h_plane = cv.CreateImage((rows, cols), 8, 1)
    s_plane = cv.CreateImage((rows, cols), 8, 1)
    img_binary = cv.CreateImage((rows, cols), 8, 1)
    cv.CvtColor(src_img, img_hsv, cv.CV_RGB2HSV)
    # cv.Split(img_hsv, h_plane, s_plane, None, None)

    # cv.EqualizeHist(h_plane, h_plane) # Gives better result in form of less flickering

    cv.InRangeS(img_hsv, cv.Scalar(COLOR_RANGE_LOW, 100, 100),
                cv.Scalar(COLOR_RANGE_HIGH, 255, 255), h_plane)

    # create temps used by algorithm images
    # eig = cv.CreateImage(cv.GetSize(src_img), cv.IPL_DEPTH_32F, 1)
    # temp = cv.CreateImage(cv.GetSize(src_img), cv.IPL_DEPTH_32F, 1)
    # the default parameters
    # quality = 0.01
    # min_distance = 30 # min distance between detected points
    # search the good points
    # features = cv.GoodFeaturesToTrack(h_plane, eig, temp, 20, quality, min_distance)
    # for (x,y) in features:
    #     cv.Circle(dest_img, (x, y), 3, (0, 255, 0), -1, 8, 0)

    cv.Canny(h_plane, h_plane, CANNY_LOW, CANNY_HIGH)
    cv.ShowImage("thresh", h_plane)

    contours = cv.FindContours(h_plane, cv.CreateMemStorage(),
                               cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE)
    while contours:
        rect = cv.MinAreaRect2(contours)
        if cv.ContourArea(contours) > 1000:
            print "FOUND RECT WITH AREA: %s" % cv.ContourArea(contours)
            box = cv.BoxPoints(rect)
            for i in range(4):
                cv.Line(dest_img, box[i], box[(i + 1) % 4], (0, 255, 0), 6, 8)
        contours = contours.h_next()

    # HoughLines2(image, storage, method, rho, theta, threshold, param1=0, param2=0)
    # lines = cv.HoughLines2(h_plane, cv.CreateMemStorage(), cv.CV_HOUGH_PROBABILISTIC, cv.CV_PI/180, 1, 50, 1)
    # for l in lines:
    #     (p1, p2) = l
    #     # Line(img, pt1, pt2, color, thickness=1, lineType=8, shift=0)
    #     cv.Line(dest_img, p1, p2, cv.Scalar(0,0,255), 2, cv.CV_AA)

    cv.ShowImage("result", dest_img)
    if count % 10 == 0:
        face = cv.HaarDetectObjects(grey, hc, cv.CreateMemStorage(), 1.2, 2,
                                    cv.CV_HAAR_DO_CANNY_PRUNING, (0, 0))

    #print face
    for [(x, y, w, h), k] in face:
        cv.Rectangle(image, (x, y), (x + w, y + h), (0, 255, 0))

    window_width, window_height = cv.GetSize(image)
    hsv_image = cv.CreateImage(cv.GetSize(image), 8, 3)
    hsv_mask = cv.CreateImage(cv.GetSize(image), 8, 1)
    hsv_edge = cv.CreateImage(cv.GetSize(image), 8, 1)
    hsv_min = cv.Scalar(0, 30, minV, 0)
    hsv_max = cv.Scalar(20, 150, 255, 0)
    cv.CvtColor(image, hsv_image, cv.CV_BGR2HSV)
    cv.InRangeS(hsv_image, hsv_min, hsv_max, hsv_mask)
    element_shape = cv.CV_SHAPE_RECT
    pos = 1
    element = cv.CreateStructuringElementEx(pos * 2 + 1, pos * 2 + 1, pos, pos,
                                            element_shape)
    cv.Dilate(hsv_mask, hsv_mask, element, 2)
    cv.Erode(hsv_mask, hsv_mask, element, 2)
    cv.Smooth(hsv_mask, hsv_mask, cv.CV_MEDIAN, 25, 0, 0, 0)
    cv.Canny(hsv_mask, hsv_edge, 1, 3, 5)
    contours = cv.FindContours(hsv_mask, cv.CreateMemStorage(),
                               cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE,
                               (0, 0))
    des = cv.CreateImage(cv.GetSize(image), 8, 3)
    contours2 = False
    hull = False
    blank = cv.CreateImage(cv.GetSize(image), 8, 3)
Beispiel #21
0
    hsv = cv.CreateImage(imageSize, 8, 3)
    ##    red = cv.CreateImage(imageSize, 8, 1)
    ##    sat = cv.CreateImage(imageSize, 8, 1)
    ##    cv.Split(hsv, None, sat, None, None)
    ##    cv.Threshold(red, red, 128, 255, cv.CV_THRESH_BINARY)
    ##    cv.Threshold(sat, sat, 128, 255, cv.CV_THRESH_BINARY)
    ##    cv.Mul(red,sat,red)
    ##    cv.Erode(red, red, iterations = 5)
    ##    cv.Dilate(red,red, iterations = 5)
    #   cv.Smooth(original, hsv, cv.CV_BLUR, 7)
    cv.CvtColor(frame, hsv, cv.CV_RGB2HSV)
    cv.ShowImage("HSV", hsv)
    thresh = cv.CreateImage(imageSize, 8, 1)
    thresha = cv.CreateImage(imageSize, 8, 1)

    cv.InRangeS(hsv, (165, 145, 100), (140, 255, 255), thresh)
    cv.ShowImage("Modifieda", thresh)
    cv.InRangeS(hsv, (0, 145, 100), (10, 255, 255), thresha)
    cv.Add(thresh, thresha, thresh)

    cv.Erode(thresh, thresh, iterations=5)
    cv.Dilate(thresh, thresh, iterations=5)
    ##    cv.ShowImage("Saturated", sat)
    ##    cv.ShowImage("End Resul", red)
    cv.ShowImage("Modified", thresh)
    memory = cv.CreateMemStorage(0)
    clone = cv.CloneImage(thresh)
    contours = cv.FindContours(clone, memory, cv.CV_RETR_LIST,
                               cv.CV_CHAIN_APPROX_SIMPLE, (0, 0))
    #   cv.ShowImage("Only Contours", clone)
    cv.DrawContours(original, contours, 120, 0, 7)
Beispiel #22
0
def reconocimiento_pelotas_y_robot (imagen):

    tolerancia = 30     
	    
    cv.ShowImage('Prueba',imagen)

    rojo      = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)
    verde     = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)
    azul      = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)
    amarillo  = cv.CreateImage(cv.GetSize(imagen),cv.IPL_DEPTH_8U,1)

    pixel_rojo     = [36,28,237]
    pixel_verde    = [76,177,34]
    pixel_azul     = [232,162,0]
    pixel_amarillo = [164,73,163] 

    cv.InRangeS(imagen,(pixel_rojo[0]-tolerancia,pixel_rojo[1]-tolerancia,pixel_rojo[2]-tolerancia),(pixel_rojo[0]+tolerancia,pixel_rojo[1]+tolerancia,pixel_rojo[2]+tolerancia),rojo)

    cv.InRangeS(imagen,(pixel_verde[0]-tolerancia,pixel_verde[1]-tolerancia,pixel_verde[2]-tolerancia),(pixel_verde[0]+tolerancia,pixel_verde[1]+tolerancia,pixel_verde[2]+tolerancia),verde)

    cv.InRangeS(imagen,(pixel_azul[0]-tolerancia,pixel_azul[1]-tolerancia,pixel_azul[2]-tolerancia),(pixel_azul[0]+tolerancia,pixel_azul[1]+tolerancia,pixel_azul[2]+tolerancia),azul)

    cv.InRangeS(imagen,(pixel_amarillo[0]-tolerancia,pixel_amarillo[1]-tolerancia,pixel_amarillo[2]-tolerancia),(pixel_amarillo[0]+tolerancia,pixel_amarillo[1]+tolerancia,pixel_amarillo[2]+tolerancia),amarillo)

    cv.ShowImage('Color Rojo'     ,rojo)
    cv.ShowImage('Color Verde'    ,verde)
    cv.ShowImage('Color Azul'     ,azul)
    cv.ShowImage('Color Amarillo' ,amarillo)

    (cg, ca, cy, cr) = ([],[],[],[])
    (p1, p2, p3) = ([],[],[])
    sumx_g = sumy_g = sumx_a = sumy_a = sumx_y = sumy_y = 0 
    sumx_p1 = sumx_p2 = sumx_p3 = sumy_p1 = sumy_p2 = sumy_p3 = 0 
    inicio = inicio2 = inicio3 = 0
    off = 20
    old_x = old_y = 0
    for j in range(1,480,1):  
      for i in range(1,640,1):
	r = cv.Get2D(rojo ,j,i)
	a = cv.Get2D(azul ,j,i)
	v = cv.Get2D(verde,j,i)
	y = cv.Get2D(amarillo,j,i)
	if r[0] == 255: 
	    cr += [(i,j)]
	    if   inicio == 0 :
		inicio = 1
		old_x = i  
		old_y = j
	    elif i > old_x-off and i < old_x+off and j > old_y-off and j < old_y+off :
		p1 += [(i,j)]
		sumx_p1 = sumx_p1 + i
		sumy_p1 = sumy_p1 + j 
		old_x = i 
		old_y = j
	    else:
		if   inicio2 == 0 :
		    inicio2 = 1
		    old_x2 = i  
		    old_y2 = j
		elif i > old_x2-off and i < old_x2+off and j > old_y2-off and j < old_y2+off :
		    p2 += [(i,j)]
		    sumx_p2 = sumx_p2 + i
		    sumy_p2 = sumy_p2 + j 
		    old_x2 = i 
		    old_y2 = j
		else:
		    if  inicio3 == 0 :
			inicio3 = 1
			old_x3 = i  
			old_y3 = j
		    elif i > old_x3-off and i < old_x3+off and j > old_y3-off and j < old_y3+off :
			p3 += [(i,j)]
			sumx_p3 = sumx_p3 + i
			sumy_p3 = sumy_p3 + j 
			old_x3 = i 
			old_y3 = j
	if v[0] == 255: 
	    cg    += [(i,j)]
	    sumx_g  = sumx_g + i
	    sumy_g  = sumy_g + j  
	if a[0] == 255: 
	    ca += [(i,j)]
	    sumx_a  = sumx_a + i
	    sumy_a  = sumy_a + j 
	if y[0] == 255: 
	    cy += [(i,j)]
	    sumx_y  = sumx_y + i
	    sumy_y  = sumy_y + j
	p_old1_x = i 
	p_old1_y = j  

    ro_x = sumx_g/len(cg)	   #0
    ro_y = sumy_g/len(cg)	
    rf_x = sumx_a/len(ca)	
    rf_y = sumy_a/len(ca)
    rb_x = sumx_y/len(cy)	
    rb_y = sumy_y/len(cy)
    
    num_pel = 0

    if(len(p1)==0):
      p1_x = ro_x 
      p1_y = ro_y
    else:
      p1_x = sumx_p1/len(p1)      #1
      p1_y = sumy_p1/len(p1) 
      num_pel = num_pel + 1

    if(len(p2)==0):
      p2_x = ro_x       
      p2_y = ro_y 
    else:
      p2_x = sumx_p2/len(p2)      #2
      p2_y = sumy_p2/len(p2) 
      num_pel = num_pel + 1
      
    if(len(p3)==0):
      p3_x = ro_x       
      p3_y = ro_y
    else:
      p3_x = sumx_p3/len(p3)      #3
      p3_y = sumy_p3/len(p3) 
      num_pel = num_pel + 1
      
     
    return ro_x,ro_y,p1_x,p1_y,p2_x,p2_y,p3_x,p3_y,num_pel
Beispiel #23
0
def swordcenterdetection(enemy):
    global motionProxy
    global post
    global sonarProxy
    global memoryProxy
    # work ! set current to servos
    stiffnesses = 1.0
    time.sleep(0.5)

    # init video
    cameraProxy = ALProxy("ALVideoDevice", IP, PORT)
    resolution = 1  # 0 : QQVGA, 1 : QVGA, 2 : VGA
    colorSpace = 11  # RGB
    camNum = 0  # 0:top cam, 1: bottom cam
    fps = 1
    # frame Per Second
    cameraProxy.setParam(18, camNum)
    try:
        videoClient = cameraProxy.subscribe("python_client", resolution,
                                            colorSpace, fps)
    except:
        cameraProxy.unsubscribe("python_client")
        videoClient = cameraProxy.subscribe("python_client", resolution,
                                            colorSpace, fps)
    print "Start videoClient: ", videoClient
    # Get a camera image.
    # image[6] contains the image data passed as an array of ASCII chars.
    naoImage = cameraProxy.getImageRemote(videoClient)
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]

    found = True
    posx = 0
    posy = 0
    mem = cv.CreateMemStorage(0)
    i = 0
    cv.NamedWindow("Real")
    cv.MoveWindow("Real", 0, 0)
    cv.NamedWindow("Threshold")
    cv.MoveWindow("Real", imageWidth + 100, 0)
    error = 0.0
    nframe = 0.0
    closing = 1
    tstp, tu = 0, 0
    K = 2
    pb = 0.5  # low pass filter value
    try:
        while found:
            nframe = nframe + 1
            # Get current image (top cam)
            naoImage = cameraProxy.getImageRemote(videoClient)
            # Get the image size and pixel array.
            imageWidth = naoImage[0]
            imageHeight = naoImage[1]
            array = naoImage[6]
            # Create a PIL Image from our pixel array.
            pilImg = Image.fromstring("RGB", (imageWidth, imageHeight), array)
            # Convert Image to OpenCV
            cvImg = cv.CreateImageHeader((imageWidth, imageHeight),
                                         cv.IPL_DEPTH_8U, 3)
            cv.SetData(cvImg, pilImg.tostring())
            cv.CvtColor(cvImg, cvImg, cv.CV_RGB2BGR)
            hsv_img = cv.CreateImage(cv.GetSize(cvImg), 8, 3)
            cv.CvtColor(cvImg, hsv_img, cv.CV_BGR2HSV)
            thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
            thresholded_img2 = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
            temp = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
            eroded = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
            skel = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
            img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
            edges = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
            # Get the orange on the image
            cv.InRangeS(hsv_img, (110, 80, 80), (150, 200, 200),
                        thresholded_img)
            storage = cv.CreateMemStorage(0)

            lines = cv.HoughLines2(thresholded_img,
                                   storage,
                                   cv.CV_HOUGH_PROBABILISTIC,
                                   1,
                                   cv.CV_PI / 180,
                                   30,
                                   param1=0,
                                   param2=0)
            print lines

            first = 1
            sl = 0
            Mx = []
            My = []
            for l in lines:
                sl = sl + 1
            for i in range((sl - 1)):
                l = lines[i]
                print l
                rho = l[0]
                theta = l[1]
                a = np.cos(theta)
                b = np.sin(theta)
                x0 = a * rho
                y0 = b * rho
                cf1, cf2 = 300, 300
                # xpt11 = int(cv.Round(x0 + cf1*(-b)))
                # ypt11 = int(cv.Round(y0 + cf1*(a)))
                # xpt12 = int(cv.Round(x0 - cf2*(-b)))
                # ypt12 = int(cv.Round(y0 - cf2*(a)))
                pt11 = l[0]
                pt12 = l[1]
                cv.Line(cvImg,
                        pt11,
                        pt12,
                        cv.CV_RGB(255, 255, 255),
                        thickness=1,
                        lineType=8,
                        shift=0)

                l = lines[(i + 1)]
                rho = l[0]
                theta = l[1]
                a = np.cos(theta)
                b = np.sin(theta)
                x0 = a * rho
                y0 = b * rho
                cf1, cf2 = 300, 300
                # xpt1 = int(cv.Round(x0 + cf1*(-b)))
                # ypt1 = int(cv.Round(y0 + cf1*(a)))
                # xpt2 = int(cv.Round(x0 - cf2*(-b)))
                # ypt2 = int(cv.Round(y0 - cf2*(a)))

                # A = np.array(((xpt1,ypt1),(xpt2,ypt2)))
                # B = np.array(((xpt11,ypt11),(xpt12,ypt12)))

                # try:
                # 	m = line_intersection(A, B)
                # 	mx = m[0]
                # 	my = m[1]
                # 	Mx.append(mx)
                # 	My.append(my)
                # except:
                # 	error=1 #intersection return False we don't add the point

                pt1 = l[0]
                pt2 = l[1]
                cv.Line(cvImg,
                        pt1,
                        pt2,
                        cv.CV_RGB(255, 255, 255),
                        thickness=1,
                        lineType=8,
                        shift=0)
            cMx, cMy = [], []
            for x in Mx:
                cMx.append((1 - pb) * x)
            for y in My:
                cMy.append((1 - pb) * y)
            try:
                for i in range(len(cMx)):
                    Mx[i] = cMx[i] + cMtx[i]
                    My[i] = cMy[i] + cMty[i]
                Mm = (int(np.mean(Mx)), int(np.mean(My)))
                print "M", Mm
                cv.Circle(cvImg, Mm, 5, (254, 0, 254), -1)
            except:
                error = 1  # we are at first iteration
            cMtx, cMty = [], []
            Mtx = Mx
            Mty = My
            for x in Mtx:
                cMtx.append(pb * x)
            for y in Mty:
                cMty.append(pb * y)

            cv.ShowImage("Real", cvImg)
            cv.ShowImage("Threshold", thresholded_img)
            cv.WaitKey(1)

    except KeyboardInterrupt:
        print
        print "Interrupted by user, shutting down"
        end()
Beispiel #24
0
    cv.NamedWindow("Mask", 1)
    cv.CreateTrackbar("Open", "Camera", 0, 10, Opening)
    cv.CreateTrackbar("Close", "Camera", 0, 10, Closing)
    
    src = cv.QueryFrame(capture)
    hsv_frame = cv.CreateImage(src.getSize(), cv.IPL_DEPTH_8U, 3)
    thresholded = cv.CreateImage(src.getSize(), cv.IPL_DEPTH_8U, 1)
    thresholded2 = cv.CreateImage(src.getSize(), cv.IPL_DEPTH_8U, 1)
        
    storage = cv.CreateMemStorage(0)
    
    while True:
        src = cv.QueryFrame(capture)
        #image = cv.CloneImage(src)
        #dest = cv.CloneImage(src)
        #hsv = cv.CloneImage(src)
        
        # convert to HSV for color matching
        cv.CvtColor(image, hsv, cv.CV_BGR2HSV)
        
        mask = cv.CreateImage(cv.GetSize(image), 8, 1);
        cv.InRangeS(hsv, cv.Scalar(0, 50, 170, 0), cv.Scalar(30, 255, 255, 0), mask);
        
        cv.ShowImage("Camera", hsv)
        cv.ShowImage("Mask", mask)
        
        if cv.WaitKey(10) == 27:
            break
        
    cv.DestroyWindow("camera")
Beispiel #25
0
	def in_range_s(self, im):
		dst = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 1)
		cv.InRangeS(im, cv.Scalar(self.mn_1_, self.mn_2_, self.mn_3_),
			 cv.Scalar(self.mx_1_, self.mx_2_, self.mx_3_), dst)
		return dst
def thresholded_image(image, min_treshold, max_treshold):
    image_hsv = cv.CreateImage(cv.GetSize(image), image.depth, 3)
    cv.CvtColor(image, image_hsv, cv.CV_BGR2HSV)
    image_threshed = cv.CreateImage(cv.GetSize(image), image.depth, 1)
    cv.InRangeS(image_hsv, min_treshold, max_treshold, image_threshed)
    return image_threshed
Beispiel #27
0
def eyefinder(IP, PORT):
    global motionProxy
    global post
    global sonarProxy
    global memoryProxy
    global cameraProxy
    global videoClient
    # work ! set current to servos
    stiffnesses = 1.0
    time.sleep(0.5)

    # TEMP
    pNames = "Body"
    pStiffnessLists = 0.0
    pTimeLists = 1.0
    motionProxy.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists)

    # Get a camera image.
    # image[6] contains the image data passed as an array of ASCII chars.
    naoImage = cameraProxy.getImageRemote(videoClient)
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]

    found = True
    posx = 0
    posy = 0
    mem = cv.CreateMemStorage(0)
    i = 0
    cv.NamedWindow("Real")
    cv.MoveWindow("Real", 0, 0)
    cv.NamedWindow("Threshold")
    cv.MoveWindow("Real", imageWidth + 100, 0)
    error = 0.0
    nframe = 0.0
    closing = 3
    tstp, tu = 0, 0
    K = 1
    try:
        while found:
            nframe = nframe + 1
            # Get current image (top cam)
            naoImage = cameraProxy.getImageRemote(videoClient)
            # Get the image size and pixel array.
            imageWidth = naoImage[0]
            imageHeight = naoImage[1]
            array = naoImage[6]
            # Create a PIL Image from our pixel array.
            pilImg = Image.fromstring("RGB", (imageWidth, imageHeight), array)
            # Convert Image to OpenCV
            cvImg = cv.CreateImageHeader((imageWidth, imageHeight),
                                         cv.IPL_DEPTH_8U, 3)
            cv.SetData(cvImg, pilImg.tostring())
            cv.CvtColor(cvImg, cvImg, cv.CV_RGB2BGR)

            cvImg2 = cv.CreateImageHeader((imageWidth, imageHeight),
                                          cv.IPL_DEPTH_8U, 3)
            cv.SetData(cvImg2, pilImg.tostring())
            cv.CvtColor(cvImg2, cvImg, cv.CV_RGB2BGR)

            hsv_img = cv.CreateImage(cv.GetSize(cvImg), 8, 3)
            cv.CvtColor(cvImg, hsv_img, cv.CV_BGR2HSV)
            thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
            thresholded_img2 = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)

            thresholded1 = cv.CreateImage(cv.GetSize(cvImg), 8, 1)
            cv.InRangeS(hsv_img, (91, 130, 130), (130, 255, 255), thresholded1)
            storage2 = cv.CreateMemStorage(0)
            contour2 = cv.FindContours(thresholded1, storage2,
                                       cv.CV_RETR_CCOMP,
                                       cv.CV_CHAIN_APPROX_SIMPLE)
            points = []
            d = []
            data2 = []
            while contour2:
                # Draw bounding rectangles
                bound_rect = cv.BoundingRect(list(contour2))
                contour2 = contour2.h_next()
                # for more details about cv.BoundingRect,see documentation
                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2],
                       bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                cv.Rectangle(cvImg, pt1, pt2, cv.CV_RGB(255, 0, 0), 1)
                lastx = posx
                lasty = posy
                posx = cv.Round((pt1[0] + pt2[0]) / 2)
                posy = cv.Round((pt1[1] + pt2[1]) / 2)
                data2.append([posx, posy])
                d.append(math.sqrt(pt1[0]**2 + pt2[0]**2))
                d.append(math.sqrt(pt1[1]**2 + pt2[1]**2))

            cvImg, error, centroid1, labels = clustering(data2,
                                                         cvImg,
                                                         nframe,
                                                         error,
                                                         K=1)

            thresholded2 = cv.CreateImage(cv.GetSize(cvImg), 8, 1)
            cv.InRangeS(hsv_img, (70, 150, 150), (89, 255, 255), thresholded2)
            storage2 = cv.CreateMemStorage(0)
            contour2 = cv.FindContours(thresholded2, storage2,
                                       cv.CV_RETR_CCOMP,
                                       cv.CV_CHAIN_APPROX_SIMPLE)
            points = []
            d = []
            data2 = []
            while contour2:
                # Draw bounding rectangles
                bound_rect = cv.BoundingRect(list(contour2))
                contour2 = contour2.h_next()
                # for more details about cv.BoundingRect,see documentation
                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2],
                       bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                cv.Rectangle(cvImg2, pt1, pt2, cv.CV_RGB(255, 0, 0), 1)
                lastx = posx
                lasty = posy
                posx = cv.Round((pt1[0] + pt2[0]) / 2)
                posy = cv.Round((pt1[1] + pt2[1]) / 2)
                data2.append([posx, posy])
                d.append(math.sqrt(pt1[0]**2 + pt2[0]**2))
                d.append(math.sqrt(pt1[1]**2 + pt2[1]**2))

            cvImg2, error, centroid2, labels = clustering(data2,
                                                          cvImg2,
                                                          nframe,
                                                          error,
                                                          K=1)
            if (len(centroid2) != 0 and len(centroid1) != 0):
                c1 = centroid1.tolist()[0]
                c2 = centroid2.tolist()[0]
                if c1[0] > c2[0]:
                    C = [c1, c2]
                else:
                    C = [c2, c1]
                print C
            cv.ShowImage("Blue", cvImg)
            cv.ShowImage("Green", cvImg2)
            #cv.ShowImage("Threshold",thresholded_img2)
            cv.ShowImage("Threshold", thresholded2)
            cv.WaitKey(1)

            cv.ShowImage("Real", cvImg)
            #cv.ShowImage("Threshold",thresholded_img2)
            cv.ShowImage("Threshold", thresholded2)
            cv.WaitKey(1)

    except KeyboardInterrupt:
        print
        print "Interrupted by user, shutting down"
        end(IP, PORT)
Beispiel #28
0
def swordcenterdetection(shm_tar,
                         mutex_tar,
                         enemy="obi",
                         window=5,
                         pb=0.7,
                         segHough=50):
    global motionProxy
    global post
    global sonarProxy
    global memoryProxy
    global IP
    global PORT
    # work ! set current to servos
    stiffnesses = 1.0
    time.sleep(0.5)

    # init video
    cameraProxy = ALProxy("ALVideoDevice", IP, PORT)
    resolution = 1  # 0 : QQVGA, 1 : QVGA, 2 : VGA
    colorSpace = 11  # RGB
    camNum = 0  # 0:top cam, 1: bottom cam
    fps = 1
    # frame Per Second
    cameraProxy.setParam(18, camNum)
    try:
        videoClient = cameraProxy.subscribe("python_client", resolution,
                                            colorSpace, fps)
    except:
        cameraProxy.unsubscribe("python_client")
        videoClient = cameraProxy.subscribe("python_client", resolution,
                                            colorSpace, fps)
    print "Start videoClient: ", videoClient
    # Get a camera image.
    # image[6] contains the image data passed as an array of ASCII chars.
    naoImage = cameraProxy.getImageRemote(videoClient)
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]

    found = True
    posx = 0
    posy = 0
    mem = cv.CreateMemStorage(0)
    i = 0
    cv.NamedWindow("Real")
    cv.MoveWindow("Real", 0, 0)
    cv.NamedWindow("Threshold")
    cv.MoveWindow("Real", imageWidth + 100, 0)
    closing = 1
    tstp, tu = 0, 0
    K = 2
    Mf, Mft = [], []
    window = 5  # lenght of the window of observation for the low pass filter
    pb = 0.7  # low pass filter value
    print "before lines"
    try:
        while found:
            #Synchro
            mutex_tar.acquire()
            n = shm_tar.value[0]
            mutex_tar.release()
            if n == -1:
                print "Fail in target mem zone tar. Exit", n

            elif n == -2:
                print "Terminated by parent"

            # Get current image (top cam)
            naoImage = cameraProxy.getImageRemote(videoClient)
            # Get the image size and pixel array.
            imageWidth = naoImage[0]
            imageHeight = naoImage[1]
            array = naoImage[6]
            # Create a PIL Image from our pixel array.
            pilImg = Image.fromstring("RGB", (imageWidth, imageHeight), array)
            # Convert Image to OpenCV
            cvImg = cv.CreateImageHeader((imageWidth, imageHeight),
                                         cv.IPL_DEPTH_8U, 3)
            cv.SetData(cvImg, pilImg.tostring())
            cv.CvtColor(cvImg, cvImg, cv.CV_RGB2BGR)
            hsv_img = cv.CreateImage(cv.GetSize(cvImg), 8, 3)
            cv.CvtColor(cvImg, hsv_img, cv.CV_BGR2HSV)
            thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)

            if enemy == "obi":
                # Get the blue on the image
                cv.InRangeS(hsv_img, (100, 40, 40), (150, 255, 255),
                            thresholded_img)
            elif enemy == "dark":
                # Get the red on the image
                cv.InRangeS(hsv_img, (0, 150, 150), (40, 255, 255),
                            thresholded_img)
            else:
                tts.say("I don't know my enemy")

            # cv.Erode(thresholded_img,thresholded_img, None, closing)
            # cv.Dilate(thresholded_img,thresholded_img, None, closing)
            storage = cv.CreateMemStorage(0)

            lines = cv.HoughLines2(thresholded_img,
                                   storage,
                                   cv.CV_HOUGH_PROBABILISTIC,
                                   1,
                                   cv.CV_PI / 180,
                                   segHough,
                                   param1=0,
                                   param2=0)
            lines_standard = cv.HoughLines2(thresholded_img,
                                            storage,
                                            cv.CV_HOUGH_STANDARD,
                                            1,
                                            cv.CV_PI / 180,
                                            segHough,
                                            param1=0,
                                            param2=0)
            Theta = []
            for l in lines_standard:
                Theta.append(l[1])
            theta = np.mean(Theta)
            PTx, PTy, Mftx, Mfty = [], [], [], []

            for l in lines:
                pt1 = l[0]
                pt2 = l[1]
                cv.Line(cvImg,
                        pt1,
                        pt2,
                        cv.CV_RGB(255, 255, 255),
                        thickness=1,
                        lineType=8,
                        shift=0)
                PTx.append(pt1[0])
                PTx.append(pt2[0])
                PTy.append(pt1[1])
                PTy.append(pt2[1])
            if len(PTx) != 0:
                xm = np.mean(PTx)
                ym = np.mean(PTy)
                M = (int(xm), int(ym))
                Mf.append(M)
                if len(Mf) > window:
                    Mft = Mf[len(Mf) - window:len(Mf)]
                    for m in Mft[0:-2]:
                        Mftx.append(m[0])
                        Mfty.append(m[1])
                    mx = (1 - pb) * np.mean(Mftx) + pb * Mftx[-1]
                    my = (1 - pb) * np.mean(Mfty) + pb * Mfty[-1]
                    M = (int(mx), int(my))
                    cv.Circle(cvImg, M, 5, (254, 0, 254), -1)
                    #Thread processing
                    mutex_tar.acquire()
                    shm_tar.value = [n, (M[0], M[1], 0, theta)]
                    mutex_tar.release()

            cv.ShowImage("Real", cvImg)
            cv.ShowImage("Threshold", thresholded_img)
            cv.WaitKey(1)

    except KeyboardInterrupt:
        print
        print "Interrupted by user, shutting down"
        end()
Beispiel #29
0
def getthresholdedimgR4(imhsv):
    # A little change here. Creates images for blue and yellow (or whatever color you like).
    imgRed = cv.CreateImage(cv.GetSize(imhsv), 8, 1)
    imgYellow = cv.CreateImage(cv.GetSize(imhsv), 8, 1)
    imgGreen = cv.CreateImage(cv.GetSize(imhsv), 8, 1)

    imgthreshold = cv.CreateImage(cv.GetSize(imhsv), 8, 1)

    cv.InRangeS(imghsv, cv.Scalar(0, 190, 130), cv.Scalar(18, 255, 190),
                imgRed)  # Select a range of red color
    cv.InRangeS(imghsv, cv.Scalar(100, 100, 100), cv.Scalar(120, 255, 255),
                imgYellow)  # Select a range of blue color
    cv.InRangeS(imghsv, cv.Scalar(67, 103, 46), cv.Scalar(100, 209, 184),
                imgGreen)  # Select a range of green color

    storage = cv.CreateMemStorage(0)
    redContour = cv.FindContours(imgRed, storage, cv.CV_RETR_CCOMP,
                                 cv.CV_CHAIN_APPROX_SIMPLE)
    points = []

    while redContour:
        # Draw bounding rectangles
        bound_rect = cv.BoundingRect(list(redContour))
        #bound_rect = cv.BoundingRect(contour)

        # for more details about cv.BoundingRect,see documentation
        pt1 = (bound_rect[0], bound_rect[1])
        pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
        points.append(pt1)
        points.append(pt2)
        cv.Rectangle(imhsv, pt1, pt2, cv.CV_RGB(255, 0, 0), 1)

        #Calculating centroids
        centroidx = cv.Round((pt1[0] + pt2[0]) / 2)
        centroidy = cv.Round((pt1[1] + pt2[1]) / 2)
        area = cv.ContourArea(list(redContour))

        redContour = redContour.h_next()

    while yellowContour:
        # Draw bounding rectangles
        bound_rect = cv.BoundingRect(list(redContour))
        #bound_rect = cv.BoundingRect(contour)

        # for more details about cv.BoundingRect,see documentation
        pt1 = (bound_rect[0], bound_rect[1])
        pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
        points.append(pt1)
        points.append(pt2)
        cv.Rectangle(imhsv, pt1, pt2, cv.CV_RGB(255, 0, 0), 1)

        #Calculating centroids
        centroidx = cv.Round((pt1[0] + pt2[0]) / 2)
        centroidy = cv.Round((pt1[1] + pt2[1]) / 2)
        area = cv.ContourArea(list(redContour))

        redContour = redContour.h_next()

    cv.ShowImage("Ceva", imhsv)

    cv.Add(imgRed, imgBlue, imgthreshold)
    cv.Add(imgGreen, imgGreen, imgthreshold)
    return imgthreshold
    cv.GetSize(image), 8,
    1)  # Initializes blank image holders for the channel separation.
channelB = cv.CreateImage(cv.GetSize(image), 8, 1)

cv.Split(
    image, cv.CreateImage(cv.GetSize(image), 8, 1), channelG, channelB, None
)  # Splits the input image into channels (RGB). I only use B and G because the laser is green, not red.

cv.Sub(
    channelG, channelB, imageThreshold
)  # Subtracts the channels. Since the green laser's pixels are basically have a G value of 255, they stand out. Uncomment the next two lines to see what I mean.
#cv.ShowImage('Test', imageThreshold)
#cv.WaitKey(0)

cv.InRangeS(
    imageThreshold, cv.Scalar(24, 24, 24), cv.Scalar(255, 255,
                                                     255), imageThreshold
)  # The channel seperation doesn't make things bitonal. This is something I need to fix, as it depends on a fixed value, but this just thresholds everything between those RGB values into white.

cv.ShowImage('Test', imageThreshold)
cv.WaitKey(0)

cv.Smooth(
    imageThreshold, imageThreshold, smoothtype=cv.CV_MEDIAN, param1=9
)  # This median smoothing filter is really handy. It averages the values of each pixel within the window, so any noise gets truned into black (the image is bitonal, so if it's not white, it's black).

cv.ShowImage('Test', imageThreshold)
cv.WaitKey(0)

image = Image.fromstring(
    'L', cv.GetSize(imageThreshold), imageThreshold.tostring()
)  # I have to get rid of this dependency to Python's imaging library, but this just re-inits the thresholded image.