コード例 #1
0
ファイル: hue_filter.py プロジェクト: caz2114/visual_feedback
    def listen(self):
        bgimg = cv.CreateImage((self.background.width, self.background.height),
                               8, 3)
        img = cv.CreateImage((self.background.width, self.background.height),
                             8, 3)
        cv.Copy(self.background, bgimg)
        smallimg = cv.CreateImage((self.background.width / self.zoom,
                                   self.background.height / self.zoom), 8, 3)
        cv.GetRectSubPix(
            bgimg, smallimg,
            (self.background.width / (2 * self.zoom) + self.offset[0],
             self.background.height / (2 * self.zoom) + self.offset[1]))
        cv.Resize(smallimg, img)

        cv.Smooth(img, img, cv.CV_GAUSSIAN)
        if (self.cp != False):
            cv.Circle(img, self.zoomPt(int(self.cp.x), int(self.cp.y)), 3,
                      cv.RGB(0, 255, 0), -1)
        mask = thresholding.threshold(img,
                                      thresholding.CUSTOM,
                                      False,
                                      crop_rect=None,
                                      cam_info=None,
                                      listener=None,
                                      hue_interval=(self.hue_low, self.hue_up))

        cv.Not(mask, mask)
        new_img = cv.CloneImage(img)
        cv.SetZero(new_img)
        cv.Copy(img, new_img, mask)
        new_img = thresholding.sat_threshold(new_img, 50)
        cv.Line(img, (self.ch_x - 25, self.ch_y), (self.ch_x + 25, self.ch_y),
                cv.RGB(255, 255, 0))
        cv.Line(img, (self.ch_x, self.ch_y - 25), (self.ch_x, self.ch_y + 25),
                cv.RGB(255, 255, 0))

        image_gray = cv.CreateImage(cv.GetSize(new_img), 8, 1)
        cv.CvtColor(new_img, image_gray, cv.CV_RGB2GRAY)
        cv.MorphologyEx(image_gray, image_gray, None, None, cv.CV_MOP_OPEN, 1)
        storage = cv.CreateMemStorage(0)
        seq = cv.FindContours(image_gray, storage)
        points = []
        contour = seq
        centers = []
        ccs = []
        while contour:
            bound_rect = cv.BoundingRect(list(contour))
            area = cv.ContourArea(contour)
            cc = contour
            contour = contour.h_next()

            if area < 50 or area > 2500:
                continue
            ccs.append(cc)
            win, center, radius = cv.MinEnclosingCircle(cc)
            cv.DrawContours(new_img, cc, (0, 255, 0), (0, 255, 0), 0, 1)
            pt1 = (bound_rect[0], bound_rect[1])
            pt2 = (bound_rect[0] + bound_rect[2],
                   bound_rect[1] + bound_rect[3])
            points.append(pt1)
            points.append(pt2)
            cv.Circle(new_img, center, radius, (0, 0, 255))
            centers.append(center)
            #cv.Rectangle(new_img, pt1, pt2, cv.CV_RGB(255,0,0), 1)

            font = cv.InitFont(cv.CV_FONT_HERSHEY_PLAIN, 1, 1)
            cv.PutText(new_img, "%.2f" % area, pt1, font, (255, 255, 255))

        for cont1, cont2 in itertools.combinations(ccs, 2):
            if is_next_to(cont1, cont2):
                win, c1, r1 = cv.MinEnclosingCircle(cont1)
                win, c2, r2 = cv.MinEnclosingCircle(cont2)
                cv.Line(new_img, c1, c2, (255, 255, 0))
        #DRAW
        cv.ShowImage(self.name, new_img)
        #Do some funny business
        imgcs = {}
        satt = thresholding.sat_threshold(img, 50)
        for color in HueRanges.__dict__:
            if color == color.upper():
                img_c = thresholding.filter_color(satt, color)
                cv.ShowImage(color, img_c)
        cv.WaitKey(25)
コード例 #2
0
ファイル: nao_1.29.py プロジェクト: stayrealff/nao-lib
def Detect(frame, draw=True):
    global face1_x
    global face1_y
    global face1_width
    global face1_center
    global old_face1_x
    global old_face1_y
    global old_face1_width
    global fast
    global windowsz
    global cascade_front

    roiscale = 2
    windowscale = 10
    face1_center = (0, 0)

    if fast > 0:

        if fast == 3:
            #The cvrectangle defines the ROI that is used for face detection
            #it depends on the previous location of the face and increases in
            #size if no face is detected
            cvrectangle = [
                face1_x - (face1_width / (roiscale * 2)),
                face1_y - (face1_width / (roiscale * 2)),
                face1_width + (face1_width / roiscale),
                face1_width + (face1_width / roiscale)
            ]
            windowsz = face1_width - (face1_width / windowscale)
            old_face1_x = face1_x  # windowsize should be kept as big as possible
            old_face1_y = face1_y  # a larger windowsz means faster detection
            old_face1_width = face1_width
        if fast == 2:
            cvrectangle = [
                old_face1_x - (old_face1_width / (roiscale)),
                old_face1_y - (old_face1_width / (roiscale)),
                old_face1_width + (old_face1_width / (roiscale * 0.5)),
                old_face1_width + (old_face1_width / (roiscale * 0.5))
            ]
            windowsz = old_face1_width - (old_face1_width / (windowscale / 2))
        if fast == 1:
            cvrectangle = [
                old_face1_x - (old_face1_width / (roiscale * 0.5)),
                old_face1_y - (old_face1_width / (roiscale * 0.5)),
                old_face1_width + (old_face1_width / (roiscale * 0.25)),
                old_face1_width + (old_face1_width / (roiscale * 0.25))
            ]
            windowsz = old_face1_width - (old_face1_width / (windowscale / 4))

        for i in range(0, 2):  #Make sure the window under consideration is not
            if cvrectangle[i] < 0:  #outside the camera region. If so, user edge
                cvrectangle[i] = 0

            if i == 0 and (cvrectangle[i] + cvrectangle[i + 2]) > frame.width:
                cvrectangle[i + 2] = frame.width - cvrectangle[i]

            if i == 1 and (cvrectangle[i] + cvrectangle[i + 2]) > frame.height:
                cvrectangle[i + 2] = frame.height - cvrectangle[i]

        if draw == True:
            cv.Rectangle(frame, (cvrectangle[0], cvrectangle[1]),
                         (cvrectangle[0] + cvrectangle[2],
                          cvrectangle[1] + cvrectangle[3]), cv.RGB(0, 255, 0))

        cv.SetImageROI(frame, tuple(cvrectangle))

    else:
        windowsz = 20
        cv.ResetImageROI(frame)

    faces = cv.HaarDetectObjects(frame, cascade_front, cv.CreateMemStorage(0),
                                 1.2, 6, 1, (windowsz, windowsz))

    cv.ResetImageROI(frame)

    try:
        if fast > 0:
            face1_x = faces[0][0][0] + cvrectangle[
                0]  #These results are from the ROI
            face1_y = faces[0][0][1] + cvrectangle[
                1]  #instead of from the entire image

        else:
            face1_x = faces[0][0][0]
            face1_y = faces[0][0][1]

        face1_width = faces[0][0][2]
        face1_height = faces[0][0][3]
        face1_center = (face1_x + (face1_width / 2),
                        face1_y + (face1_height / 2))

        region = Region()
        region.x = face1_x
        region.y = face1_y
        region.width = face1_width
        region.height = face1_height

        if draw == True:
            cv.Rectangle(frame, (face1_x, face1_y),
                         (face1_x + face1_width, face1_y + face1_height),
                         cv.RGB(255, 255, 255))
            cv.Circle(frame, face1_center, 2, cv.RGB(255, 0, 0))
        fast = 3
    except:
        fast = fast - 1
        region = Region()

    if fast == 3:
        facedetected = True
    else:
        facedetected = False

    face_loc = list(face1_center)
    convrad = 0.55 / (frame.width / 2)
    face_loc[0] = (face_loc[0] - (frame.width / 2)) * convrad
    face_loc[1] = (face_loc[1] - (frame.height / 2)) * convrad

    return frame, face_loc, facedetected, region
コード例 #3
0
    def find_places(self, c):
        # find long side of ball tray
        l1_sq = ((c[1][0] - c[0][0]) * (c[1][0] - c[0][0])) +           \
                ((c[1][1] - c[0][1]) * (c[1][1] - c[0][1]))
        l2_sq = ((c[2][0] - c[1][0]) * (c[2][0] - c[1][0])) +           \
                ((c[2][1] - c[1][1]) * (c[2][1] - c[1][1]))

        if l1_sq > l2_sq:  # c[0] to c[1] is a long side
            cc = [c[0], c[1], c[2], c[3]]
        else:  # c[1] to c[2] is a long side
            cc = [c[1], c[2], c[3], c[0]]

        # ball tray corners in baxter coordinates
        for i in range(4):
            self.ball_tray_corner[i] = self.pixel_to_baxter(
                cc[i], self.tray_distance)

        # ball tray places in pixel coordinates
        ref_x = cc[0][0]
        ref_y = cc[0][1]
        dl_x = (cc[1][0] - cc[0][0]) / 8
        dl_y = (cc[1][1] - cc[0][1]) / 8
        ds_x = (cc[2][0] - cc[1][0]) / 6
        ds_y = (cc[2][1] - cc[1][1]) / 6

        p = {}
        p[0] = (ref_x + (3 * dl_x) + (3 * ds_x),
                ref_y + (3 * dl_y) + (3 * ds_y))
        p[1] = (ref_x + (5 * dl_x) + (3 * ds_x),
                ref_y + (5 * dl_y) + (3 * ds_y))
        p[2] = (ref_x + (3 * dl_x) + (1 * ds_x),
                ref_y + (3 * dl_y) + (1 * ds_y))
        p[3] = (ref_x + (5 * dl_x) + (1 * ds_x),
                ref_y + (5 * dl_y) + (1 * ds_y))
        p[4] = (ref_x + (3 * dl_x) + (5 * ds_x),
                ref_y + (3 * dl_y) + (5 * ds_y))
        p[5] = (ref_x + (5 * dl_x) + (5 * ds_x),
                ref_y + (5 * dl_y) + (5 * ds_y))
        p[6] = (ref_x + (1 * dl_x) + (3 * ds_x),
                ref_y + (1 * dl_y) + (3 * ds_y))
        p[7] = (ref_x + (7 * dl_x) + (3 * ds_x),
                ref_y + (7 * dl_y) + (3 * ds_y))
        p[8] = (ref_x + (1 * dl_x) + (1 * ds_x),
                ref_y + (1 * dl_y) + (1 * ds_y))
        p[9] = (ref_x + (7 * dl_x) + (1 * ds_x),
                ref_y + (7 * dl_y) + (1 * ds_y))
        p[10] = (ref_x + (1 * dl_x) + (5 * ds_x),
                 ref_y + (1 * dl_y) + (5 * ds_y))
        p[11] = (ref_x + (7 * dl_x) + (5 * ds_x),
                 ref_y + (7 * dl_y) + (5 * ds_y))

        for i in range(12):
            # mark position of ball tray places
            cv.Circle(cv.fromarray(self.cv_image),
                      (int(p[i][0]), int(p[i][1])), 5, (0, 250, 0), -1)

            # ball tray places in baxter coordinates
            self.ball_tray_place[i] = self.pixel_to_baxter(
                p[i], self.tray_distance)

        # display the ball tray places
        cv.ShowImage("Egg tray", cv.fromarray(self.cv_image))

        if self.save_images:
            # save ball tray image with overlay of ball tray and ball positions
            file_name = self.image_dir + "ball_tray.jpg"
            cv.SaveImage(file_name, cv.fromarray(self.cv_image))

        # 3ms wait
        cv.WaitKey(3)
コード例 #4
0
        cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255, 0, 0), 1)

        #	Calculating centroids

        centroidx = cv.Round((pt1[0] + pt2[0]) / 2)
        centroidy = cv.Round((pt1[1] + pt2[1]) / 2)

        #	Identifying if blue or yellow blobs and adding centroids to corresponding lists
        if (20 < cv.Get2D(imghsv, centroidy, centroidx)[0] < 30):
            yellow.append((centroidx, centroidy))
        elif (100 < cv.Get2D(imghsv, centroidy, centroidx)[0] < 120):
            blue.append((centroidx, centroidy))

# 		Now drawing part. Exceptional handling is used to avoid IndexError.	After drawing is over, centroid from previous part is #		removed from list by pop. So in next frame,centroids in this frame become initial points of line to draw.
    try:
        cv.Circle(imdraw, yellow[1], 5, (0, 255, 255))
        cv.Line(imdraw, yellow[0], yellow[1], (0, 255, 255), 3, 8, 0)
        yellow.pop(0)
    except IndexError:
        print "Just wait for yellow"

    try:
        cv.Circle(imdraw, blue[1], 5, (255, 0, 0))
        cv.Line(imdraw, blue[0], blue[1], (255, 0, 0), 3, 8, 0)
        blue.pop(0)
    except IndexError:
        print "just wait for blue"
    cv.Add(test, imdraw, test)

    cv.ShowImage("Real", color_image)
    cv.ShowImage("Threshold", img2)
コード例 #5
0
ファイル: main.py プロジェクト: Peculator/wasser-kanone
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)
        color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)

        self.wrapper = ClientWrapper()
        self.client = self.wrapper.Client()

        first = True
        i = 0
        while True:
            closest_to_left = cv.GetSize(frame)[0]
            closest_to_right = cv.GetSize(frame)[1]
            i = i+1
            color_image = cv.QueryFrame(self.capture)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, 0.020, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 18)
            cv.Erode(grey_image, grey_image, None, 10)

            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            points = []

            while contour:

                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()

                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                #cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)

            if (len(points) and i > 2):
                i = 0
                center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points)

                xRange = self.bottomLeft[0] - self.topRight[0]
                yRange = self.topRight[1] - self.bottomLeft[1]

                # x = lowestValue + (center_point[0] / width) * xRange
                dmxCoordinate = (int(self.bottomLeft[0] - (float(center_point[0]) / self.width) * xRange), int(self.topRight[1] - (float(center_point[1]) / self.height) * yRange))

                print dmxCoordinate, "bei: ", center_point
                self.moveDmxTo(dmxCoordinate)

                cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1)
                #cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1)
                #cv.Circle(color_image, center_point, 10, cv.CV_RGB(255, 100, 0), 1)

            cv.ShowImage("Target", color_image)

            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
コード例 #6
0
ファイル: pizza.py プロジェクト: tarora2/seawolf
    def process_frame(self, frame):
        self.debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
        og_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.Copy(frame, self.debug_frame)
        cv.Copy(self.debug_frame, og_frame)

        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Set binary image to have saturation channel
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binary = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        cv.SetImageCOI(hsv, 1)
        cv.Copy(hsv, binary)
        cv.SetImageCOI(hsv, 0)

        cv.AdaptiveThreshold(
            binary,
            binary,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY_INV,
            self.adaptive_thresh_blocksize,
            self.adaptive_thresh,
        )

        # Morphology
        kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(binary, binary, kernel, 1)
        cv.Dilate(binary, binary, kernel, 1)

        # Get Edges
        #cv.Canny(binary, binary, 30, 40)

        cv.CvtColor(binary, self.debug_frame, cv.CV_GRAY2RGB)

        # Hough Transform
        line_storage = cv.CreateMemStorage()
        raw_lines = cv.HoughLines2(binary,
                                   line_storage,
                                   cv.CV_HOUGH_PROBABILISTIC,
                                   rho=1,
                                   theta=math.pi / 180,
                                   threshold=self.hough_threshold,
                                   param1=self.min_length,
                                   param2=self.max_gap)

        lines = []
        corners = []

        for line in raw_lines:
            lines.append(line)

        # Grouping lines depending on endpoint similarities

        for line1 in lines[:]:
            for line2 in lines[:]:
                if line1 in lines and line2 in lines and line1 != line2:
                    if math.fabs(line1[0][0] - line2[0][0]) < self.max_corner_range and \
                       math.fabs(line1[0][1] - line2[0][1]) < self.max_corner_range and \
                       math.fabs(line1[1][0] - line2[1][0]) < self.max_corner_range and \
                       math.fabs(line1[1][1] - line2[1][1]) < self.max_corner_range:
                        if line_distance(line1[0], line1[1]) > line_distance(
                                line2[0], line2[1]):
                            lines.remove(line2)
                        else:
                            lines.remove(line1)
                    elif math.fabs(line1[0][0] - line2[1][0]) < self.max_corner_range and \
                            math.fabs(line1[0][1] - line2[1][1]) < self.max_corner_range and \
                            math.fabs(line1[1][0] - line2[0][0]) < self.max_corner_range and \
                            math.fabs(line1[1][1] - line2[0][1]) < self.max_corner_range:
                        if line_distance(line1[0], line1[1]) > line_distance(
                                line2[0], line2[1]):
                            lines.remove(line2)
                        else:
                            lines.remove(line1)

        for line in lines:
            corners.append(line[0])
            corners.append(line[1])

        for corner1 in corners:
            for corner2 in corners:
                for corner3 in corners:
                    for corner4 in corners:
                        # Checks that corners are not the same and are in the proper orientation
                        if corner4[0] != corner3[0] and corner4[0] != corner2[0] and corner4[0] != corner1[0] and \
                           corner3[0] != corner2[0] and corner3[0] != corner1[0] and corner2[0] != corner1[0] and \
                           corner4[1] != corner3[1] and corner4[1] != corner2[1] and corner4[1] != corner1[1] and \
                           corner3[1] != corner2[1] and corner3[1] != corner1[1] and corner2[1] != corner1[1] and \
                           corner2[0] >= corner3[0] and corner1[1] >= corner4[1] and corner2[0] >= corner1[0]:
                            # Checks that the side ratios are correct
                            if math.fabs(line_distance(corner1, corner3) - line_distance(corner2, corner4)) < self.size_threshold and \
                               math.fabs(line_distance(corner1, corner2) - line_distance(corner3, corner4)) < self.size_threshold and \
                               math.fabs(line_distance(corner1, corner3) / line_distance(corner1, corner2)) < self.ratio_threshold and \
                               math.fabs(line_distance(corner1, corner2) / line_distance(corner1, corner3)) < self.ratio_threshold:
                                #^^^ CHANGED OR TO AND --> DID MUCH BETTER. CONSIDER CHANGING ON BINSCORNER

                                # Checks that angles are roughly 90 degrees
                                angle_cnr_2 = math.fabs(
                                    angle_between_lines(
                                        line_slope(corner1, corner2),
                                        line_slope(corner2, corner4)))
                                if self.angle_min < angle_cnr_2 < self.angle_max:
                                    angle_cnr_3 = math.fabs(
                                        angle_between_lines(
                                            line_slope(corner1, corner3),
                                            line_slope(corner3, corner4)))
                                    if self.angle_min2 < angle_cnr_3 < self.angle_max2:
                                        new_box = Pizza(
                                            corner1, corner2, corner3, corner4)
                                        self.match_Boxes(new_box)

        for Box in self.Boxes[:]:
            Box.lastseen -= 1
            if Box.lastseen < 0:
                self.Boxes.remove(Box)

        self.draw_pizza()

        for line in lines:
            cv.Line(self.debug_frame, line[0], line[1], (255, 255, 0), 10,
                    cv.CV_AA, 0)
            cv.Circle(self.debug_frame, line[0], 15, (255, 0, 0), 2, 8, 0)
            cv.Circle(self.debug_frame, line[1], 15, (255, 0, 0), 2, 8, 0)

        self.output.pizza = self.Boxes
        anglesum = 0
        for Box in self.Boxes:
            Box.theta = (Box.center[0] - frame.width / 2) * 37 / (frame.width /
                                                                  2)
            Box.phi = -1 * (Box.center[1] -
                            frame.height / 2) * 36 / (frame.height / 2)
            anglesum += Box.angle
        if len(self.output.pizza) > 0:
            self.output.orientation = anglesum / len(self.output.pizza)
        else:
            self.output.orientation = None
        self.return_output()

        svr.debug("Pizza", self.debug_frame)
        svr.debug("Original", og_frame)
コード例 #7
0
def camera():
    found_goals = False
    print "# Starting initialization..."
    intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
    cv.Zero(intrinsics)

    #camera data
    intrinsics[0, 0] = 850.850708957251072
    intrinsics[1, 1] = 778.955239997982062
    intrinsics[2, 2] = 1
    intrinsics[0, 2] = 320.898495232253822
    intrinsics[1, 2] = 380.213734835526282
    dist_coeffs = cv.CreateMat(1, 4, cv.CV_64FC1)
    cv.Zero(dist_coeffs)
    dist_coeffs[0, 0] = -0.226795877008420
    dist_coeffs[0, 1] = 0.139445565548056
    dist_coeffs[0, 2] = 0.001245710462327
    dist_coeffs[0, 3] = -0.001396618726445
    print "# intrinsics loaded!"

    #prepare memory
    capture = cv.CaptureFromCAM(0)
    src = cv.QueryFrame(capture)
    size = GetSize(src)
    dst0 = cv.CreateImage(size, src.depth, src.nChannels)
    image_ROI = (0, 60, 640, 340)
    size = (640, 340)

    hue = cv.CreateImage(size, 8, 1)
    sat = cv.CreateImage(size, 8, 1)
    val = cv.CreateImage(size, 8, 1)
    ball = cv.CreateImage(size, 8, 1)
    yellow = cv.CreateImage(size, 8, 1)
    blue = cv.CreateImage(size, 8, 1)
    Set2D(hue, 4, 4, 255)
    Set2D(sat, 4, 4, 255)
    Set2D(val, 4, 4, 255)
    Set2D(ball, 4, 4, 255)
    Set2D(yellow, 4, 4, 255)
    Set2D(blue, 4, 4, 255)

    ballx = 0
    bally = 0

    ballmiss = 0
    yellowmiss = 0
    bluemiss = 0

    print "# base images created..."
    #####------------------ajustment data---------------------###############
    #shadow
    high = 40
    low = 300

    #threshold
    thresBallInit = 116
    thresYellowInit = 94
    thresBlueInit = 18
    ballRangeInit = 8.0
    yellowRangeInit = 6.0
    blueRangeInit = 8.0
    ballRange = ballRangeInit
    yellowRange = yellowRangeInit
    blueRange = blueRangeInit
    ballMinRange = 1.5
    yellowMinRange = 1.5
    blueMinRange = 8.0
    thresBall = thresBallInit
    thresYellow = thresYellowInit
    thresBlue = thresBlueInit

    #dilate
    ex = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT)
    ex2 = cv.CreateStructuringElementEx(2, 2, 1, 1, cv.CV_SHAPE_RECT)
    ex5 = cv.CreateStructuringElementEx(5, 5, 1, 1, cv.CV_SHAPE_RECT)

    #ball
    ballcount = 15.0
    ballAreaInit = 95.0
    ballAreaRangeInit = 80.0
    ballArea = ballAreaInit
    ballAreaRange = ballAreaRangeInit
    ballMinAreaRange = 40.0
    ballcompact = 8.0

    #blue
    bluecount = 30.0
    blueAreaInit = 400.0
    blueAreaRangeInit = 200.0
    blueArea = blueAreaInit
    blueAreaRange = blueAreaRangeInit
    blueMiniAreaRange = 50.0
    bluemaxdepth = 9.0
    blueminidepth = 2.5

    #yellow
    yellowcount = 30.0
    yellowAreaInit = 450.0
    yellowAreaRangeInit = 200.0
    yellowArea = yellowAreaInit
    yellowAreaRange = yellowAreaRangeInit
    yellowMinAreaRange = 50.0
    yellowmaxdepth = 10.0
    yellowminidepth = 3.2

    #####----------------------------------------
    aa = time.time()
    storage = cv.CreateMemStorage()
    first = True
    pitch = 0  # 0 for main pitch, 1 for alt pitch
    countf = 0
    print "# starting capture..."
    print ''
    capture = cv.CaptureFromCAM(0)
    while (True):
        global connected
        if (not connected):
            global s
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            try:
                s.connect((hostname, port))
                connected = True
            except:
                print "java down, waiting"

        src = cv.QueryFrame(capture)
        #ShowImage('src',src)
        cv.SetImageROI(dst0, (0, 0, 640, 480))
        average = cv.CreateImage(size, 8, 3)
        #barrel undistortion
        cv.Undistort2(src, dst0, intrinsics, dist_coeffs)
        #ROI = Region of Interests, crop the image
        cv.SetImageROI(dst0, image_ROI)
        dst = GetImage(dst0)
        dst2 = cv.CreateImage(size, 8, 3)
        Set2D(dst2, 4, 4, 255)
        hsv = cv.CreateImage(size, 8, 3)
        CvtColor(dst, hsv, CV_RGB2HSV)
        cv.Split(hsv, hue, sat, val, None)
        if (first):
            pitch = pitchSet
            if pitch == 1:
                base = cv.LoadImage("base.jpg", cv.CV_LOAD_IMAGE_UNCHANGED)
                baseInv = cv.CreateImage(size, 8, 1)
                cv.Not(base, baseInv)
            first = False
        if (debug):
            ShowImage("hue", hue)
            ShowImage("sat", sat)
            ShowImage("val", val)

        # BALL
        cv.Threshold(hue, ball, thresBallInit + ballRange, 255,
                     cv.CV_THRESH_TOZERO_INV)
        cv.Threshold(hue, ball, thresBallInit - ballRange, 255,
                     cv.CV_THRESH_BINARY)

        #ShowImage("ball",ball)
        # YELLOW
        cv.Threshold(hue, yellow, thresYellowInit + yellowRange, 255,
                     cv.CV_THRESH_TOZERO_INV)
        cv.Threshold(yellow, yellow, thresYellowInit - yellowRange, 255,
                     cv.CV_THRESH_BINARY)
        cv.Erode(yellow, yellow, ex, 1)
        cv.Dilate(yellow, yellow, ex, 1)
        #ShowImage("yellow",yellow)

        # BLUE
        #		CvtColor(dst,hsv,CV_BGR2HSV)
        #		cv.Split(hsv,hue,sat,val,None)

        cv.Threshold(hue, blue, thresBlue + blueRange, 255,
                     cv.CV_THRESH_BINARY_INV)
        #		cv.Threshold(blue,blue,4,255,cv.CV_THRESH_BINARY)
        #		cv.Erode(blue,blue,ex2,1)

        #ShowImage("blue",blue)

        cv.Threshold(val, val, 130, 255, cv.CV_THRESH_BINARY_INV)
        cv.Threshold(sat, sat, 100, 255, cv.CV_THRESH_BINARY_INV)
        #ShowImage("sat2",sat)
        #ShowImage("val2",val)
        # Removes the walls
        Sub(blue, val, blue)
        Sub(blue, sat, blue)
        Sub(yellow, val, yellow)
        Sub(yellow, sat, yellow)
        Sub(ball, val, ball)
        Sub(ball, sat, ball)
        cv.Erode(ball, ball, ex, 1)
        cv.Dilate(ball, ball, ex, 1)

        cv.Dilate(blue, blue, ex, 1)
        Set2D(ball, 4, 4, 255)
        Set2D(blue, 4, 4, 255)
        Set2D(yellow, 4, 4, 255)

        #ShowImage("yellow3",yellow)
        #ShowImage("ball3",ball)
        #ShowImage("blue3",blue)

        if (debug):
            ShowImage("blue", blue)
            ShowImage("yellow", yellow)
            ShowImage("ball", ball)

    #find ball

        seq = cv.FindContours(ball, storage, cv.CV_RETR_LIST, cv.CV_LINK_RUNS)
        if seq != None:
            count = 0
            while (seq != None and count <= ballcount):
                count = count + 1
                area = cv.ContourArea(seq) + 0.01
                compact = ArcLength(seq) * ArcLength(seq) / (4 * area *
                                                             math.pi)
                if (area < 4 or area > (ballArea + ballAreaRange) or area <
                    (ballArea - ballAreaRange) or compact >= ballcompact):
                    seq = seq.h_next()
                    continue
                else:
                    ballx = 0
                    bally = 0
                    for p in seq:
                        ballx = ballx + p[0]
                        bally = bally + p[1]
                    ballx = int(float(ballx) / len(seq))
                    bally = int(float(bally) / len(seq))

                    ###############--------------Auto ajustment
                    #					print "ball area %f" %area
                    #					print "ball hue: %f" %hue[bally,ballx]
                    #					cv.Circle(dst,(ballx,bally),4,cv.CV_RGB(255,255,255),2,8,0)
                    cv.Circle(dst, (ballx, bally), 5, cv.CV_RGB(255, 255, 255),
                              3, 8, 0)
                    break
            if (count > ballcount or seq == None):
                #				print ballAreaRange
                ballx = 0
                bally = 0
                ballmiss = ballmiss + 1
                print "# error: ball not found  "

        #find blue
        seq = cv.FindContours(blue, storage, cv.CV_RETR_LIST, cv.CV_LINK_RUNS)
        if seq != None:
            count = 0
            while (seq != None and count <= bluecount):
                count = count + 1
                area = cv.ContourArea(seq)
                if (area < blueArea - blueAreaRange
                        or area > blueArea + blueAreaRange):
                    seq = seq.h_next()
                    continue
                else:
                    hull = None
                    convex = None
                    hull = cv.ConvexHull2(seq, storage)
                    convex = cv.ConvexityDefects(seq, hull, storage)
                    if (len(convex) > 1):
                        convex = sorted(convex,
                                        key=lambda (k1, k2, k3, k4): k4
                                        )  #sort by depth of the convex defect
                        if (convex[len(convex) - 1][3] < blueminidepth
                                or convex[len(convex) - 2][3] < blueminidepth
                                or convex[len(convex) - 1][3] > bluemaxdepth
                                or convex[len(convex) - 2][3] > bluemaxdepth):
                            seq = seq.h_next()
                            continue
                        else:
                            #find the T
                            blue_start1 = convex[len(convex) - 1][0]
                            blue_end1 = convex[len(convex) - 1][1]
                            blue_depth1 = convex[len(convex) - 1][2]

                            #draw the side line of T

                            blue_start2 = convex[len(convex) - 2][0]
                            blue_end2 = convex[len(convex) - 2][1]
                            blue_depth2 = convex[len(convex) - 2][2]

                            blue_from = ((blue_depth1[0] + blue_depth2[0]) / 2,
                                         (blue_depth1[1] + blue_depth2[1]) / 2
                                         )  #calculate the center of robot

                            #calculate the end of direction vector, the two end point of the smaller distans
                            if math.hypot(blue_start1[0] - blue_end2[0],
                                          blue_start1[1] -
                                          blue_end2[1]) > math.hypot(
                                              blue_end1[0] - blue_start2[0],
                                              blue_end1[1] - blue_start2[1]):
                                blue_to = ((blue_end1[0] + blue_start2[0]) / 2,
                                           (blue_end1[1] + blue_start2[1]) / 2)
                            else:
                                blue_to = ((blue_start1[0] + blue_end2[0]) / 2,
                                           (blue_start1[1] + blue_end2[1]) / 2)
                            cv.Line(dst, blue_from, blue_to,
                                    cv.CV_RGB(255, 0, 255), 2, 8, 0)
                            cv.Circle(dst, blue_from, 1, cv.CV_RGB(255, 0, 0),
                                      2, 8, 0)
                            cv.Circle(dst, blue_to, 3, cv.CV_RGB(0, 0, 0), 2,
                                      8, 0)
                            cv.Circle(dst, blue_from, 5,
                                      cv.CV_RGB(0, 255, 255), 3, 8, 0)

                            #######---------------------------Auto Ajusting
                            print "blue area %f" % area
                            #							print "blue hue: %f" %hue[blue_from[1],blue_from[0]]
                            break
                    else:
                        seq = seq.h_next()
                        continue
            if (count > bluecount or seq == None):
                bluemiss = bluemiss + 1
                blue_from = (0, 0)
                blue_to = (0, 0)
                print "# error: blue not found  "

        #find yellow
        seq = cv.FindContours(yellow, storage, cv.CV_RETR_LIST,
                              cv.CV_LINK_RUNS)
        if seq != None:
            count = 0
            while (seq != None and count <= yellowcount):
                count = count + 1
                area = cv.ContourArea(seq)
                if (area < yellowArea - yellowAreaRange
                        or area > yellowArea + yellowAreaRange):
                    seq = seq.h_next()
                    continue
                else:
                    hull = None
                    convex = None
                    hull = cv.ConvexHull2(seq, storage)
                    convex = cv.ConvexityDefects(seq, hull, storage)
                    if (len(convex) > 1):
                        convex = sorted(convex,
                                        key=lambda (k1, k2, k3, k4): k4
                                        )  #sort by depth of the convex defect
                        if (convex[len(convex) - 1][3] < yellowminidepth
                                or convex[len(convex) - 2][3] < yellowminidepth
                                or convex[len(convex) - 1][3] > yellowmaxdepth
                                or
                                convex[len(convex) - 2][3] > yellowmaxdepth):
                            seq = seq.h_next()
                            continue
                        else:
                            #find the T
                            yellow_start1 = convex[len(convex) - 1][0]
                            yellow_end1 = convex[len(convex) - 1][1]
                            yellow_depth1 = convex[len(convex) - 1][2]

                            #draw the side line of T

                            yellow_start2 = convex[len(convex) - 2][0]
                            yellow_end2 = convex[len(convex) - 2][1]
                            yellow_depth2 = convex[len(convex) - 2][2]

                            yellow_from = (
                                (yellow_depth1[0] + yellow_depth2[0]) / 2,
                                (yellow_depth1[1] + yellow_depth2[1]) / 2
                            )  #calculate the center of robot

                            #calculate the end of direction vector, the two end point of the smaller distans
                            if math.hypot(
                                    yellow_start1[0] - yellow_end2[0],
                                    yellow_start1[1] -
                                    yellow_end2[1]) > math.hypot(
                                        yellow_end1[0] - yellow_start2[0],
                                        yellow_end1[1] - yellow_start2[1]):
                                yellow_to = (
                                    (yellow_end1[0] + yellow_start2[0]) / 2,
                                    (yellow_end1[1] + yellow_start2[1]) / 2)
                            else:
                                yellow_to = (
                                    (yellow_start1[0] + yellow_end2[0]) / 2,
                                    (yellow_start1[1] + yellow_end2[1]) / 2)


###########------------------------------Auto Ajusting
#							print cv.ContourArea(seq)
#							print "yellow area %f" %area
#							print "yellow hue: %f" %hue[yellow_from[1],yellow_from[0]]
                            cv.Line(dst, yellow_from, yellow_to,
                                    cv.CV_RGB(255, 0, 255), 2, 8, 0)
                            cv.Circle(dst, yellow_from, 1,
                                      cv.CV_RGB(255, 0, 0), 2, 8, 0)
                            cv.Circle(dst, yellow_to, 3, cv.CV_RGB(0, 0, 0), 2,
                                      8, 0)
                            cv.Circle(dst, yellow_from, 5,
                                      cv.CV_RGB(255, 255, 0), 3, 8, 0)
                            break
                    else:
                        seq = seq.h_next()
                        continue
            if (count > yellowcount or seq == None):
                yellowmiss = yellowmiss + 1
                yellow_from = (0, 0)
                yellow_to = (0, 0)
                print "# error: yellow not found"

        ballpos = (ballx, bally)
        ShowImage("camera", dst)
        if (found_goals == False):
            if (us == "yellow"):
                goals = find_goals(size, yellow_from)
                stewies_goal = goals[0]
                loiss_goal = goals[1]
                found_goals = True
            elif (us == "blue"):
                goals = find_goals(size, blue_from)
                stewies_goal = goals[0]
                loiss_goal = goals[1]
                found_goals = True
        #if (ballx >= 0):
        output(ballpos, blue_from, blue_to, yellow_from, yellow_to,
               stewies_goal, loiss_goal)
        time_passed = time.time() - aa
        countf += 1
        if (time_passed >= 1):
            print "frame per second: " + str(countf)
            countf = 0
            aa = time.time()
        keyPress = cv.WaitKey(2)
        if (keyPress == 1048608):
            break
        elif (keyPress >= 0 and keyPress != 1048608):
            bb = time.clock()
            print "frame rate: %f" % (timecount / (bb - aa))
            print "ball miss rate: %f" % (ballmiss)
            print "blue miss rate: %f" % (bluemiss)
            print "yellow miss rate: %f" % (yellowmiss)
コード例 #8
0
def track(bgr_image, threshold=100):
    '''Accepts BGR image and optional object threshold between 0 and 255 (default = 100).
       Returns: (x,y) coordinates of centroid if found
                (-1,-1) if no centroid was found
                None if user hit ESC
    '''

    # Extract bytes, width, and height
    bgr_bytes = bgr_image.tostring()
    width = bgr_image.width
    height = bgr_image.height

    # Create separate red, green, and blue image matrices from bytes
    r_image = _create_grayscale_mat(bgr_bytes, width, height, 2)
    b_image = _create_grayscale_mat(bgr_bytes, width, height, 0)
    g_image = _create_grayscale_mat(bgr_bytes, width, height, 1)

    # Remove 1/3 of red and blue components from green
    threes_image = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)
    cv.Set(threes_image, 3)

    # Remove 1/3 of green and red components from blue
    #_div_and_sub(b_image, r_image, threes_image)
    #_div_and_sub(b_image, g_image, threes_image)

    # Remove 1/3 of green and blue components from red
    #_div_and_sub(r_image, g_image, threes_image)
    #_div_and_sub(r_image, b_image, threes_image)

    #Remove 1/3 of red and blue components from green
    _div_and_sub(g_image, r_image, threes_image)
    _div_and_sub(g_image, b_image, threes_image)

    #Threshold and erode green image
    cv.Threshold(g_image, g_image, threshold, 255, cv.CV_THRESH_BINARY)
    cv.Erode(g_image, g_image)

    # Threshold and erode red image
    #cv.Threshold(r_image, r_image, threshold, 255, cv.CV_THRESH_BINARY)
    #cv.Erode(r_image, r_image)

    # Threshold and erode blue image
    #cv.Threshold(b_image, b_image, threshold, 255, cv.CV_THRESH_BINARY)
    #cv.Erode(b_image, b_image)

    # Find centroid of eroded image
    moments = cv.Moments(cv.GetMat(g_image), 1)  # binary flag

    #moments = cv.Moments(cv.GetMat(r_image), 1) # binary flag

    #moments = cv.Moments(cv.GetMat(b_image), 1) # binary flag

    centroid_x = _centroid(moments, 1, 0)
    centroid_y = _centroid(moments, 0, 1)

    # Assume no centroid
    ctr = (-1, -1)

    # Use centroid if it exists
    if centroid_x != None and centroid_y != None:

        ctr = (centroid_x, centroid_y)

        # Put black circle in at centroid in image
        cv.Circle(bgr_image, ctr, 4, (0, 0, 0))

    # Display full-color image
    cv.NamedWindow(WINDOW_NAME)
    cv.ShowImage(WINDOW_NAME, bgr_image)

    # Force image display, setting centroid to None on ESC key input
    if cv.WaitKey(5) == 27:
        ctr = None

    # Return coordinates of centroid
    return ctr
コード例 #9
0
    def display_video(self):
        if not self.upload_file():
	    return
        self.video_active = True
    	# hand-tuned, apologies
	cv.NamedWindow("Velocity", cv.CV_WINDOW_AUTOSIZE)
        cv.MoveWindow("Velocity", 0, 590)   
        cv.NamedWindow("Acceleration", cv.CV_WINDOW_AUTOSIZE)
        cv.MoveWindow("Acceleration", 415, 590)
        cv.NamedWindow("Replay", cv.CV_WINDOW_AUTOSIZE) 
        cv.MoveWindow("Replay", 610, 20)
        cv.NamedWindow("Overall", cv.CV_WINDOW_AUTOSIZE)
        cv.MoveWindow("Overall", 880, 590)

        # load background
        input_background = str(self.video_folder) + "/background.png"
	background = cv.LoadImage(input_background)
	if not background: 
	    QMessageBox.information(self, "Open video", "No such video")  
   	    return	
        cv.ShowImage("Replay", background)
   	size = cv.GetSize(background)
	screen_width = size[0]
	screen_height = size[1] 
        
	# load position data
        input_data = str(self.video_folder) + "/Data"
        f_in = open(input_data, 'r')
	data = pickle.load(f_in)
	f_in.close()
	if not data:
	    QMessageBox.information(self, "Loading video", "Unable to load data")
        # Data knows it's a Speed object.
	# this is why Python ROCKS.
	num_frames = data.num_frames()
 
	top_speed = 0
	dist = 0.0
        # getting images
	imgArr = []
        img_name = str(self.video_folder) + "/frame_"
	
	# if we saved the full video (as opposed to just the position info)
        if self.full_video_mode:
 	    for frame in range(1, num_frames):
		full_img = str(img_name) + str(frame) + ".png"
		img = cv.LoadImage(full_img)
		imgArr.append(img)
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 1, cv.CV_AA)
	# all parameters we want to track
        params = ["x_pos", "y_pos", "v_x", "v_y", "a_x", "a_y", "distance", "v_net", "a_net"]
        # values for which we want to scale display color with relative magnitude
	fields = ["v_x", "v_y", "a_x", "a_y", "v_net", "a_net"]

        # for velocity and acceleration, there is min max for pos and neg
        # returns (min, max)
        neg_outliers = {}
	pos_outliers = {}
	pos_color = cv.CV_RGB(0, 255, 0) # green
        neg_color = cv.CV_RGB(255, 0, 0) # red
    
	for f in fields:
	    if f is not "v_net" and f is not "a_net":
	    	neg_outliers[f] = min_max(data.metrics[f], f, which_vals = "neg")
	    pos_outliers[f] = min_max(data.metrics[f], f, which_vals = "pos")

        next_image = cv.CloneImage(background)
	# ignore the first values for everything
	# (since velocity/acceleration will not be accurate)
	img_index = 1

	line_list = []
	color_list = []
        while not self.busy_updating and self.video_active:
	    # enables pause button functionality
	    if not self.video_active:
	        break
	    qr = cv.WaitKey(10)
  	    # if we're done with the video
	    if img_index >= num_frames:
	        break
	    	    	   
            if self.show_video:
	      # loop around when video done
	      if img_index == num_frames - 1:
	  	   img_index = 0
		   dist = 0.0
		   top_speed = 0.0
		   line_list = []
		   color_list = []
	      if img_index < num_frames -1:
		# advance to next image
	        if self.full_video_mode: 
	            next_image = imgArr[img_index]
		# values are one ahead of the frames
	        img_index += 1
		# make white canvases for writing values
    	        speed_img = cv.CreateImage((400, 140), 8, 3)
	        cv.Set(speed_img, cv.CV_RGB(255, 255, 255))
	        accl_img = cv.CreateImage((450, 140), 8, 3)
	        cv.Set(accl_img, cv.CV_RGB(255, 255, 255))
	        overall_img = cv.CreateImage((390, 140), 8, 3)
	        cv.Set(overall_img, cv.CV_RGB(255, 255, 255))

                x_coord = data.metrics["x_pos"][img_index]
	        y_coord = data.metrics["y_pos"][img_index]
	        data_for_step = []
		# the below will eventually be [v_net, a_net, v_x, v_y, a_x, a_y]
   	        colors_for_step = []
	        # convert all data to real units
	        # and determine red/green display color
	        for p in params:
		    raw_pixel_val = data.metrics[p][img_index]
		    val = self.to_real_units(raw_pixel_val)
		    if val < 0:
		        if p == "x_pos" or p == "y_pos" or p == "distance":
			    colors_for_step.append(neg_color)
		        else:
			    colors_for_step.append(scale_color(raw_pixel_val, neg_outliers[p][0], neg_outliers[p][1], "R"))
		    else:
		        if p == "x_pos" or p == "y_pos" or p == "distance":
			    colors_for_step.append(pos_color)
		        else:	    
			    colors_for_step.append(scale_color(raw_pixel_val, pos_outliers[p][0], pos_outliers[p][1], "G"))
		    data_for_step.append(val)		
	   
	       # track top speed after first three steps (since these are less precise)
		v_net = data_for_step[7]
	        if abs(v_net) > abs(top_speed) and img_index > 3:
	 	    top_speed = v_net
	
	        # display all velocities/accelerations
	        x_speed = "Horizontal: " +  str(round(data_for_step[2], 1))
	        y_speed = "Vertical: " + str(round(data_for_step[3], 1))
	        total_speed = "Net: " + str(round(data_for_step[7], 1))
	        x_accl = "Horizontal: " +  str(round(data_for_step[4], 1))
	        y_accl = "Vertical: " + str(round(data_for_step[5], 1))
	        total_accl = "Net: " + str(round(data_for_step[8], 1))  
	        if img_index > 1:
	    	    dist += data_for_step[6]
	        dist_traveled = "Distance: " + str(round(dist, 1))
	        top_speed_so_far = "Top speed: " + str(round(top_speed, 1))   

	        # add to speed window
	        cv.PutText(speed_img, x_speed, (10, 40), font, colors_for_step[2]) 
	        cv.PutText(speed_img, y_speed, (10, 80), font, colors_for_step[3]) 
	        cv.PutText(speed_img, total_speed, (10, 120), font, colors_for_step[7]) 
	        # add to accl window
	        cv.PutText(accl_img, x_accl, (10, 40), font, colors_for_step[4])
	        cv.PutText(accl_img, y_accl, (10, 80), font, colors_for_step[5])
	        cv.PutText(accl_img, total_accl, (10, 120), font, colors_for_step[8])
	        # add to overall window
	        cv.PutText(overall_img, dist_traveled, (10, 60), font, cv.Scalar(0, 255, 0))
	        cv.PutText(overall_img, top_speed_so_far, (10, 120), font, cv.Scalar(0, 255, 0))
	        # if the object fits on the screen, display it as a green circle 
	        if x_coord < screen_width and y_coord < screen_height:
		    if self.draw_mode == "circle":
			cv.Circle(next_image, (int(x_coord), int(y_coord)), self.marker_rad, self.object_color, thickness = -1)
		    elif self.draw_mode == "line":
			if img_index > 1:
			    x_0 = data.metrics["x_pos"][img_index - 1]
			    y_0 = data.metrics["y_pos"][img_index - 1]
			    line_list.append([int(x_0), int(y_0), int(x_coord), int(y_coord)])
		   	    for l in line_list:
				cv.Line(next_image, (l[0], l[1]), (l[2], l[3]), self.object_color, thickness = self.marker_rad)
		    elif self.draw_mode == "v_path":
			# v-dependent path #
			### colors for step at 7 ###
		    	if img_index > 1:
			    x_0 = data.metrics["x_pos"][img_index - 1]
			    y_0 = data.metrics["y_pos"][img_index - 1]
			    line_list.append([int(x_0), int(y_0), int(x_coord), int(y_coord)])
			    color_list.append(colors_for_step[7])
		   	    for index, l in enumerate(line_list):
				cv.Line(next_image, (l[0], l[1]), (l[2], l[3]), color_list[index], thickness = self.marker_rad)
		    else:
			# a-dependent path
			### colors for step at 8 ###
		    	if img_index > 1:
			    x_0 = data.metrics["x_pos"][img_index - 1]
			    y_0 = data.metrics["y_pos"][img_index - 1]
			    line_list.append([int(x_0), int(y_0), int(x_coord), int(y_coord)])
			    color_list.append(colors_for_step[8])
		   	    for index, l in enumerate(line_list):
				cv.Line(next_image, (l[0], l[1]), (l[2], l[3]), color_list[index], thickness = self.marker_rad)
		    cv.ShowImage("Replay", next_image)
		    cv.ShowImage("Velocity", speed_img)
		    cv.ShowImage("Acceleration", accl_img)
		    cv.ShowImage("Overall", overall_img)
	   	    k = cv.WaitKey(self.playback_speed)
		    # press q or escape to quit
		    if k == 113 or k == 27:
		        self.show_video = False
			cv.DestroyAllWindows()
		        break
        self.show_video = False 
コード例 #10
0
    def run(self):
        # Initialize
        #log_file_name = "tracker_output.log"
        #log_file = file( log_file_name, 'a' )

        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame(self.capture)

        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage(cv.GetSize(frame),
                                               cv.IPL_DEPTH_32F, 3)
        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage(display_image)

        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)

        # The difference between the running average and the current frame:
        difference = cv.CloneImage(display_image)

        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook = []
        frame_count = 0
        last_frame_entity_list = []

        t0 = time.time()

        # For toggling display:
        image_list = ["camera", "difference", "threshold", "display", "faces"]
        image_index = 0  # Index into image_list

        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1,
                                cv.CV_AA)
        text_coord = (5, 15)
        text_color = cv.CV_RGB(255, 255, 255)

        ###############################
        ### Face detection stuff
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_default.xml' )
        haar_cascade = cv.Load('haarcascades/haarcascade_frontalface_alt.xml')
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt2.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_mcs_mouth.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_eye.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt_tree.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_upperbody.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_profileface.xml' )

        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 3

        while True:

            # Capture frame from webcam
            camera_image = cv.QueryFrame(self.capture)

            frame_count += 1
            frame_t0 = time.time()

            # Create an image with interactive feedback:
            display_image = cv.CloneImage(camera_image)

            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage(display_image)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 19, 0)

            # Use the Running Average as the static background
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg(color_image, running_average_image, 0.320, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(running_average_image,
                            running_average_in_display_color_depth, 1.0, 0.0)

            # Subtract the current frame from the moving average.
            cv.AbsDiff(color_image, running_average_in_display_color_depth,
                       difference)

            # Convert the image to greyscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Threshold the image to a black and white motion mask:
            cv.Threshold(grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY)
            # Smooth and threshold again to eliminate "sparkles"
            cv.Smooth(grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0)
            cv.Threshold(grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)

            grey_image_as_array = numpy.asarray(cv.GetMat(grey_image))
            non_black_coords_array = numpy.where(grey_image_as_array > 3)
            # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip(non_black_coords_array[1],
                                         non_black_coords_array[0])

            points = [
            ]  # Was using this to hold either pixel coords or polygon coords.
            bounding_box_list = []

            # Now calculate movements using the white pixels as "motion" data
            contour = cv.FindContours(grey_image, mem_storage,
                                      cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)

            while contour:

                bounding_rect = cv.BoundingRect(list(contour))
                point1 = (bounding_rect[0], bounding_rect[1])
                point2 = (bounding_rect[0] + bounding_rect[2],
                          bounding_rect[1] + bounding_rect[3])

                bounding_box_list.append((point1, point2))
                polygon_points = cv.ApproxPoly(list(contour), mem_storage,
                                               cv.CV_POLY_APPROX_DP)

                # To track polygon points only (instead of every pixel):
                #points += list(polygon_points)

                # Draw the contours:
                ###cv.DrawContours(color_image, contour, cv.CV_RGB(255,0,0), cv.CV_RGB(0,255,0), levels, 3, 0, (0,0) )
                cv.FillPoly(grey_image, [
                    list(polygon_points),
                ], cv.CV_RGB(255, 255, 255), 0, 0)
                cv.PolyLine(display_image, [
                    polygon_points,
                ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0)
                #cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)

                contour = contour.h_next()

            # Find the average size of the bbox (targets), then
            # remove any tiny bboxes (which are prolly just noise).
            # "Tiny" is defined as any box with 1/10th the area of the average box.
            # This reduces false positives on tiny "sparkles" noise.
            box_areas = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                box_areas.append(box_width * box_height)

                #cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)

            average_box_area = 0.0
            if len(box_areas):
                average_box_area = float(sum(box_areas)) / len(box_areas)

            trimmed_box_list = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]

                # Only keep the box if it's not a tiny noise box:
                if (box_width * box_height) > average_box_area * 0.1:
                    trimmed_box_list.append(box)

            # Draw the trimmed box list:
            #for box in trimmed_box_list:
            #	cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )

            bounding_box_list = merge_collided_bboxes(trimmed_box_list)

            # Draw the merged box list:
            for box in bounding_box_list:
                cv.Rectangle(display_image, box[0], box[1],
                             cv.CV_RGB(0, 255, 0), 1)

            # Here are our estimate points to track, based on merged & trimmed boxes:
            estimated_target_count = len(bounding_box_list)

            # Don't allow target "jumps" from few to many or many to few.
            # Only change the number of targets up to one target per n seconds.
            # This fixes the "exploding number of targets" when something stops moving
            # and the motion erodes to disparate little puddles all over the place.

            if frame_t0 - last_target_change_t < .350:  # 1 change per 0.35 secs
                estimated_target_count = last_target_count
            else:
                if last_target_count - estimated_target_count > 1:
                    estimated_target_count = last_target_count - 1
                if estimated_target_count - last_target_count > 1:
                    estimated_target_count = last_target_count + 1
                last_target_change_t = frame_t0

            # Clip to the user-supplied maximum:
            estimated_target_count = min(estimated_target_count, max_targets)

            # The estimated_target_count at this point is the maximum number of targets
            # we want to look for.  If kmeans decides that one of our candidate
            # bboxes is not actually a target, we remove it from the target list below.

            # Using the numpy values directly (treating all pixels as points):
            points = non_black_coords_array
            center_points = []

            if len(points):

                # If we have all the "target_count" targets from last frame,
                # use the previously known targets (for greater accuracy).
                k_or_guess = max(estimated_target_count,
                                 1)  # Need at least one target to look for.
                if len(codebook) == estimated_target_count:
                    k_or_guess = codebook

                #points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
                codebook, distortion = vq.kmeans(array(points), k_or_guess)

                # Convert to tuples (and draw it to screen)
                for center_point in codebook:
                    center_point = (int(center_point[0]), int(center_point[1]))
                    center_points.append(center_point)
                    #cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
                    #cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)

            # Now we have targets that are NOT computed from bboxes -- just
            # movement weights (according to kmeans).  If any two targets are
            # within the same "bbox count", average them into a single target.
            #
            # (Any kmeans targets not within a bbox are also kept.)
            trimmed_center_points = []
            removed_center_points = []

            for box in bounding_box_list:
                # Find the centers within this box:
                center_points_in_box = []

                for center_point in center_points:
                    if center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
                     center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :

                        # This point is within the box.
                        center_points_in_box.append(center_point)

                # Now see if there are more than one.  If so, merge them.
                if len(center_points_in_box) > 1:
                    # Merge them:
                    x_list = y_list = []
                    for point in center_points_in_box:
                        x_list.append(point[0])
                        y_list.append(point[1])

                    average_x = int(float(sum(x_list)) / len(x_list))
                    average_y = int(float(sum(y_list)) / len(y_list))

                    trimmed_center_points.append((average_x, average_y))

                    # Record that they were removed:
                    removed_center_points += center_points_in_box

                if len(center_points_in_box) == 1:
                    trimmed_center_points.append(
                        center_points_in_box[0])  # Just use it.

            # If there are any center_points not within a bbox, just use them.
            # (It's probably a cluster comprised of a bunch of small bboxes.)
            for center_point in center_points:
                if (not center_point in trimmed_center_points) and (
                        not center_point in removed_center_points):
                    trimmed_center_points.append(center_point)

            # Draw what we found:
            #for center_point in trimmed_center_points:
            #	center_point = ( int(center_point[0]), int(center_point[1]) )
            #	cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
            #	cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
            #	cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
            #	cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)

            # Determine if there are any new (or lost) targets:
            actual_target_count = len(trimmed_center_points)
            last_target_count = actual_target_count

            # Now build the list of physical entities (objects)
            this_frame_entity_list = []

            # An entity is list: [ name, color, last_time_seen, last_known_coords ]

            for target in trimmed_center_points:

                # Is this a target near a prior entity (same physical entity)?
                entity_found = False
                entity_distance_dict = {}

                for entity in last_frame_entity_list:

                    entity_coords = entity[3]
                    delta_x = entity_coords[0] - target[0]
                    delta_y = entity_coords[1] - target[1]

                    distance = sqrt(pow(delta_x, 2) + pow(delta_y, 2))
                    entity_distance_dict[distance] = entity

                # Did we find any non-claimed entities (nearest to furthest):
                distance_list = entity_distance_dict.keys()
                distance_list.sort()

                for distance in distance_list:

                    # Yes; see if we can claim the nearest one:
                    nearest_possible_entity = entity_distance_dict[distance]

                    # Don't consider entities that are already claimed:
                    if nearest_possible_entity in this_frame_entity_list:
                        #print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] )
                        continue

                    #print "Target %s: USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1])
                    # Found the nearest entity to claim:
                    entity_found = True
                    nearest_possible_entity[
                        2] = frame_t0  # Update last_time_seen
                    nearest_possible_entity[
                        3] = target  # Update the new location
                    this_frame_entity_list.append(nearest_possible_entity)
                    #log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_t0, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
                    break

                if entity_found == False:
                    # It's a new entity.
                    color = (random.randint(0, 255), random.randint(0, 255),
                             random.randint(0, 255))
                    name = hashlib.md5(str(frame_t0) +
                                       str(color)).hexdigest()[:6]
                    last_time_seen = frame_t0

                    new_entity = [name, color, last_time_seen, target]
                    this_frame_entity_list.append(new_entity)
                    #log_file.write( "%.3f FOUND %s %d %d\n" % ( frame_t0, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )

            # Now "delete" any not-found entities which have expired:
            entity_ttl = 1.0  # 1 sec.

            for entity in last_frame_entity_list:
                last_time_seen = entity[2]
                if frame_t0 - last_time_seen > entity_ttl:
                    # It's gone.
                    #log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_t0, entity[0], entity[3][0], entity[3][1]  ) )
                    pass
                else:
                    # Save it for next time... not expired yet:
                    this_frame_entity_list.append(entity)

            # For next frame:
            last_frame_entity_list = this_frame_entity_list

            # Draw the found entities to screen:
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
                cv.Circle(display_image, center_point, 20,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 15,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 10,
                          cv.CV_RGB(c[0], c[1], c[2]), 2)
                cv.Circle(display_image, center_point, 5,
                          cv.CV_RGB(c[0], c[1], c[2]), 3)

            #print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break

            # Toggle which image to show
            if chr(c) == 'd':
                image_index = (image_index + 1) % len(image_list)

            image_name = image_list[image_index]

            # Display frame to user
            if image_name == "camera":
                image = camera_image
                cv.PutText(image, "Camera (Normal)", text_coord, text_font,
                           text_color)
            elif image_name == "difference":
                image = difference
                cv.PutText(image, "Difference Image", text_coord, text_font,
                           text_color)
            elif image_name == "display":
                image = display_image
                cv.PutText(image, "Targets (w/AABBs and contours)", text_coord,
                           text_font, text_color)
            elif image_name == "threshold":
                # Convert the image to color.
                cv.CvtColor(grey_image, display_image, cv.CV_GRAY2RGB)
                image = display_image  # Re-use display image here
                cv.PutText(image, "Motion Mask", text_coord, text_font,
                           text_color)
            elif image_name == "faces":
                # Do face detection
                detect_faces(camera_image, haar_cascade, mem_storage)
                image = camera_image  # Re-use camera image here
                cv.PutText(image, "Face Detection", text_coord, text_font,
                           text_color)

            cv.ShowImage("Target", image)

            if self.writer:
                cv.WriteFrame(self.writer, image)

            #log_file.flush()

            # If only using a camera, then there is no time.sleep() needed,
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()

            # If reading from a file, put in a forced delay:
            if not self.writer:
                delta_t = frame_t1 - frame_t0
                if delta_t < (1.0 / 15.0): time.sleep((1.0 / 15.0) - delta_t)

        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float(frame_count) / time_delta
        print "Got %d frames. %.1f s. %f fps." % (frame_count, time_delta,
                                                  processed_fps)
コード例 #11
0
def drawPoint(image, point, color):
    (x, y) = point
    cv.Circle(image, (int(x), int(y)), 2, color)
コード例 #12
0
                              cv.CV_CHAIN_APPROX_SIMPLE)
    points = []

    #	This is the new part here. ie Use of cv.BoundingRect()
    while contour:
        # Draw bounding rectangles
        bound_rect = cv.BoundingRect(list(contour))
        contour = contour.h_next()

        # for more details about cv.BoundingRect,see documentation
        pt1 = (bound_rect[0], bound_rect[1])
        pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
        points.append(pt1)
        points.append(pt2)
        cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255, 0, 0), 1)
        lastx = posx
        lasty = posy
        posx = cv.Round((pt1[0] + pt2[0]) / 2)
        posy = cv.Round((pt1[1] + pt2[1]) / 2)
        if lastx != 0 and lasty != 0:
            cv.Line(imdraw, (posx, posy), (lastx, lasty), (0, 255, 255))
            cv.Circle(imdraw, (posx, posy), 5, (0, 255, 255), -1)
    cv.Add(test, imdraw, test)

    cv.ShowImage("Real", color_image)
    cv.ShowImage("Threshold", test)
    if cv.WaitKey(33) == 1048603:
        cv.DestroyWindow("Real")
        cv.DestroyWindow("Threshold")
        break
コード例 #13
0
ファイル: dist1.py プロジェクト: nebuladream/kod
            small_img = cv.CreateImage((cv.Round(
                frame.width / __scale__), cv.Round(frame.height / __scale__)),
                                       8, 1)
            cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
            cv.EqualizeHist(small_img, small_img)
            pi = Image.fromstring("L", cv.GetSize(small_img),
                                  small_img.tostring())
            s_res = sift(ravel(PIL2NumPy(pi)))
            n_res = array(s_res)

            for item in n_res:
                xx = item[0] * __scale__
                yy = item[1] * __scale__
                pt = (int(xx), int(yy))
                if is_point_in_region(pt):
                    cv.Circle(frame, pt, 8, cv.CV_RGB(100, 100, 255), 0,
                              cv.CV_AA, 0)

            # display webcam image
            cv.ShowImage('Camera', frame)

            frame_no += 1

        # handle events
        k = cv.WaitKey(40)
        #k = cvWaitKey()
        if k == "t":
            cv.SaveImage('snap-' + str(snap_no) + '.jpg', frame)
            snap_no += 1
        if k == 27:  # ESC
            print 'ESC pressed. Exiting ...'
            break
コード例 #14
0
    def find_places(self, c):
        ####图片格式转化#################################
        cv_image_trans = cv.fromarray(self.cv_image)
        # find long side of ball tray
        l1_sq = ((c[1][0] - c[0][0]) * (c[1][0] - c[0][0])) +           \
                ((c[1][1] - c[0][1]) * (c[1][1] - c[0][1]))
        l2_sq = ((c[2][0] - c[1][0]) * (c[2][0] - c[1][0])) +           \
                ((c[2][1] - c[1][1]) * (c[2][1] - c[1][1]))

        if l1_sq > l2_sq:  # c[0] to c[1] is a long side
            cc = [c[0], c[1], c[2], c[3]]
        else:  # c[1] to c[2] is a long side
            cc = [c[1], c[2], c[3], c[0]]

        # ball tray corners in baxter coordinates
        for i in range(4):
            self.ball_tray_corner[i] = self.pixel_to_baxter(
                cc[i], self.tray_distance)

        # ball tray places in pixel coordinates
        ref_x = cc[0][0]
        ref_y = cc[0][1]
        dl_x = (cc[1][0] - cc[0][0]) / 8
        dl_y = (cc[1][1] - cc[0][1]) / 8
        ds_x = (cc[2][0] - cc[1][0]) / 6
        ds_y = (cc[2][1] - cc[1][1]) / 6

        #算出放置球的12位置
        p = {}

        p[0] = (ref_x + (1 * dl_x) + (1 * ds_x),
                ref_y + (1 * dl_y) + (1 * ds_y))
        p[1] = (ref_x + (1 * dl_x) + (3 * ds_x),
                ref_y + (1 * dl_y) + (3 * ds_y))
        p[2] = (ref_x + (1 * dl_x) + (5 * ds_x),
                ref_y + (1 * dl_y) + (5 * ds_y))
        p[3] = (ref_x + (3 * dl_x) + (1 * ds_x),
                ref_y + (3 * dl_y) + (1 * ds_y))
        p[4] = (ref_x + (3 * dl_x) + (3 * ds_x),
                ref_y + (3 * dl_y) + (3 * ds_y))
        p[5] = (ref_x + (3 * dl_x) + (5 * ds_x),
                ref_y + (3 * dl_y) + (5 * ds_y))
        p[6] = (ref_x + (5 * dl_x) + (1 * ds_x),
                ref_y + (5 * dl_y) + (1 * ds_y))
        p[7] = (ref_x + (5 * dl_x) + (3 * ds_x),
                ref_y + (5 * dl_y) + (3 * ds_y))
        p[8] = (ref_x + (5 * dl_x) + (5 * ds_x),
                ref_y + (5 * dl_y) + (5 * ds_y))
        p[9] = (ref_x + (7 * dl_x) + (1 * ds_x),
                ref_y + (7 * dl_y) + (1 * ds_y))
        p[10] = (ref_x + (7 * dl_x) + (3 * ds_x),
                 ref_y + (7 * dl_y) + (3 * ds_y))
        p[11] = (ref_x + (7 * dl_x) + (5 * ds_x),
                 ref_y + (7 * dl_y) + (5 * ds_y))

        #在这些位置上画圈

        for i in range(12):
            # mark position of ball tray places
            cv.Circle(cv_image_trans, (int(p[i][0]), int(p[i][1])), 5,
                      (0, 250, 0), -1)

            #位置标序号
            font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 1)
            cv.PutText(cv_image_trans, str(i + 1),
                       (int(p[i][0]), int(p[i][1])), font, (0, 255, 0))
            # ball tray places in baxter coordinates
            # 把这些位置转换到baxter的坐标系
            self.ball_tray_place[i] = self.pixel_to_baxter(
                p[i], self.tray_distance)

        # display the ball tray places
        file_name = self.image_dir + "eggtray.jpg"
        cv.SaveImage(file_name, cv_image_trans)
        cv.ShowImage("Egg tray", cv_image_trans)

        if self.save_images:
            # save ball tray image with overlay of ball tray and ball positions
            file_name = self.image_dir + "ball_tray.jpg"
            cv.SaveImage(file_name, cv_image_trans)

        # 3ms wait
        cv.WaitKey(3)
コード例 #15
0
ファイル: main.py プロジェクト: ahmetech/breakout
 def translate(self, points, img):
     if len(points) < 2: return
     cv.Line(img, points[0], points[1], im.color.BLUE, 3) 
     polar = self.__calc_new_position(points)
     self.update_game(polar)
     cv.Circle(img, polar[0], 10, im.color.BLUE, 3)
コード例 #16
0
ファイル: ogr.py プロジェクト: millerjs/OGR
    HIGH = 50
    LOW = 140

    try: 
        # extract circles
        cv.HoughCircles(processed, storage, cv.CV_HOUGH_GRADIENT, 2, 32.0, HIGH, LOW)

        for i in range(0, len(np.asarray(storage))):
            print "circle #%d" %i
            Radius = int(np.asarray(storage)[i][0][2])
            x = int(np.asarray(storage)[i][0][0])
            y = int(np.asarray(storage)[i][0][1])
            center = (x, y)

            # green dot on center and red circle around
            cv.Circle(orig, center, 1, cv.CV_RGB(0, 255, 0), -1, 8, 0)
            cv.Circle(orig, center, Radius, cv.CV_RGB(255, 0, 0), 3, 8, 0)

            cv.Circle(processed, center, 1, cv.CV_RGB(0, 255, 0), -1, 8, 0)
            cv.Circle(processed, center, Radius, cv.CV_RGB(255, 0, 0), 3, 8, 0)

    except:
        print "nothing found"
        pass

    # show images
    cv.ShowImage("image - press 'q' to quit", orig)
    cv.ShowImage("post-process", processed)

    cv_key = cv.WaitKey(WAITKEY_DELAY_MS)
    key_pressed = chr(cv_key & 255)
コード例 #17
0
    def track_lk(self, cv_image, face):
        feature_box = None
        """ Initialize intermediate images if necessary """
        if not face.pyramid:
            face.grey = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.prev_grey = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.pyramid = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.prev_pyramid = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            face.features = []
        """ Create a grey version of the image """
        cv.CvtColor(cv_image, face.grey, cv.CV_BGR2GRAY)
        """ Equalize the histogram to reduce lighting effects """
        cv.EqualizeHist(face.grey, face.grey)

        if face.track_box and face.features != []:
            """ We have feature points, so track and display them """
            """ Calculate the optical flow """
            face.features, status, track_error = cv.CalcOpticalFlowPyrLK(
                face.prev_grey, face.grey, face.prev_pyramid, face.pyramid,
                face.features, (self.win_size, self.win_size), 3,
                (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.01),
                self.flags)
            """ Keep only high status points """
            face.features = [p for (st, p) in zip(status, face.features) if st]

        elif face.track_box and self.is_rect_nonzero(face.track_box):
            """ Get the initial features to track """
            """ Create a mask image to be used to select the tracked points """
            mask = cv.CreateImage(cv.GetSize(cv_image), 8, 1)
            """ Begin with all black pixels """
            cv.Zero(mask)
            """ Get the coordinates and dimensions of the track box """
            try:
                x, y, w, h = face.track_box
            except:
                return None

            if self.auto_face_tracking:
                #                """ For faces, the detect box tends to extend beyond the actual object so shrink it slightly """
                #                x = int(0.97 * x)
                #                y = int(0.97 * y)
                #                w = int(1 * w)
                #                h = int(1 * h)
                """ Get the center of the track box (type CvRect) so we can create the
                    equivalent CvBox2D (rotated rectangle) required by EllipseBox below. """
                center_x = int(x + w / 2)
                center_y = int(y + h / 2)
                roi_box = ((center_x, center_y), (w, h), 0)
                """ Create a filled white ellipse within the track_box to define the ROI. """
                cv.EllipseBox(mask, roi_box, cv.CV_RGB(255, 255, 255),
                              cv.CV_FILLED)
            else:
                """ For manually selected regions, just use a rectangle """
                pt1 = (x, y)
                pt2 = (x + w, y + h)
                cv.Rectangle(mask, pt1, pt2, cv.CV_RGB(255, 255, 255),
                             cv.CV_FILLED)
            """ Create the temporary scratchpad images """
            eig = cv.CreateImage(cv.GetSize(self.grey), 32, 1)
            temp = cv.CreateImage(cv.GetSize(self.grey), 32, 1)

            if self.feature_type == 0:
                """ Find keypoints to track using Good Features to Track """
                face.features = cv.GoodFeaturesToTrack(
                    face.grey,
                    eig,
                    temp,
                    self.max_count,
                    self.quality,
                    self.good_feature_distance,
                    mask=mask,
                    blockSize=self.block_size,
                    useHarris=self.use_harris,
                    k=0.04)

            elif self.feature_type == 1:
                """ Get the new features using SURF """
                (surf_features, descriptors) = cv.ExtractSURF(
                    face.grey, mask, cv.CreateMemStorage(0),
                    (0, self.surf_hessian_quality, 3, 1))
                for feature in surf_features:
                    face.features.append(feature[0])
            #
            if self.auto_min_features:
                """ Since the detect box is larger than the actual face
                    or desired patch, shrink the number of features by 10% """
                face.min_features = int(len(face.features) * 0.9)
                face.abs_min_features = int(0.5 * face.min_features)
        """ Swapping the images """
        face.prev_grey, face.grey = face.grey, face.prev_grey
        face.prev_pyramid, face.pyramid = face.pyramid, face.prev_pyramid
        """ If we have some features... """
        if len(face.features) > 0:
            """ The FitEllipse2 function below requires us to convert the feature array
                into a CvMat matrix """
            try:
                self.feature_matrix = cv.CreateMat(1, len(face.features),
                                                   cv.CV_32SC2)
            except:
                pass
            """ Draw the points as green circles and add them to the features matrix """
            i = 0
            for the_point in face.features:
                if self.show_features:
                    cv.Circle(self.marker_image,
                              (int(the_point[0]), int(the_point[1])), 2,
                              (0, 255, 0, 0), cv.CV_FILLED, 8, 0)
                try:
                    cv.Set2D(self.feature_matrix, 0, i,
                             (int(the_point[0]), int(the_point[1])))
                except:
                    pass
                i = i + 1
            """ Draw the best fit ellipse around the feature points """
            if len(face.features) > 6:
                feature_box = cv.FitEllipse2(self.feature_matrix)
            else:
                feature_box = None
            """ Publish the ROI for the tracked object """
            # try:
            #     (roi_center, roi_size, roi_angle) = feature_box
            # except:
            #     logger.info("Patch box has shrunk to zeros...")
            #     feature_box = None

            # if feature_box and not self.drag_start and self.is_rect_nonzero(face.track_box):
            #     self.ROI = RegionOfInterest()
            #     self.ROI.x_offset = min(self.image_size[0], max(0, int(roi_center[0] - roi_size[0] / 2)))
            #     self.ROI.y_offset = min(self.image_size[1], max(0, int(roi_center[1] - roi_size[1] / 2)))
            #     self.ROI.width = min(self.image_size[0], int(roi_size[0]))
            #     self.ROI.height = min(self.image_size[1], int(roi_size[1]))

            # self.pubROI.publish(self.ROI)

        if feature_box is not None and len(face.features) > 0:
            return feature_box
        else:
            return None
コード例 #18
0
import cv
import numpy

IMAGE="data/fish_ss.png"

img = cv.LoadImageM(IMAGE, cv.CV_LOAD_IMAGE_GRAYSCALE)
eig_image = cv.CreateMat(img.rows, img.cols, cv.CV_32FC1)
temp_image = cv.CreateMat(img.rows, img.cols, cv.CV_32FC1)
for (x,y) in cv.GoodFeaturesToTrack(img, eig_image, temp_image, 15, 0.54, 1.0, useHarris = False):
    print "good feature at", x,y
    cv.Circle(img, (int(x),int(y)), 7, cv.RGB(250, 7, 10), 2)

cv.ShowImage("foo", img)
cv.WaitKey()
コード例 #19
0
    H_blm = K_blm * delete(E_blm, 2, 1)
    inv_H_blm = inv(H_blm)
    H_alm = K_alm * delete(E_alm, 2, 1)
    inv_H_alm = inv(H_alm)

    corners = readcorners(filename + str(loopVar0) + '_corners.txt')
    proj_to_fixed_blm = []
    proj_to_fixed_alm = []
    for loopVar1 in range(len(corners)):
        corners[loopVar1] = [corners[loopVar1][0], corners[loopVar1][1], 1]

        proj_to_world = inv_H_blm * transpose(matrix(corners[loopVar1]))
        temp_point = asarray(H_blm_ground * proj_to_world)
        temp_point = temp_point / float(temp_point[2])
        proj_to_fixed_blm.append([temp_point[0], temp_point[1]])
        cv.Circle(fixed_img, (int(temp_point[1]), int(temp_point[0])), 2,
                  (0, 0, 255), -1)  # Draw a point

        proj_to_world = inv_H_alm * transpose(matrix(corners[loopVar1]))
        temp_point = asarray(H_alm_ground * proj_to_world)
        temp_point = temp_point / float(temp_point[2])
        proj_to_fixed_alm.append([temp_point[0], temp_point[1]])
        cv.Circle(fixed_img, (int(temp_point[1]), int(temp_point[0])), 2,
                  (0, 255, 0), -1)  # Draw a point

    #save_corners('Reprojected_'+str(loopVar0)+'_Without_LM.txt', proj_to_fixed_blm)
    #save_corners('Reprojected_'+str(loopVar0)+'_with_LM.txt', proj_to_fixed_alm)
    cv.SaveImage('Reprojected_' + str(loopVar0) + '.jpg',
                 fixed_img)  #Save the result

    error_blm = calc_error(orig_points, proj_to_fixed_blm)
    error_alm = calc_error(orig_points, proj_to_fixed_alm)
コード例 #20
0
ファイル: annotator.py プロジェクト: caz2114/visual_feedback
 def temp_highlight(self, pt, landmark=True):
     color = cv.CV_RGB(0, 255, 255)
     newimg = cv.CloneImage(self.img)
     cv.Circle(newimg, pt, 5, color, -1)
     cv.ShowImage("Annotator", newimg)
コード例 #21
0
ファイル: cvslamshow.py プロジェクト: zlite/RPLidar-SLAMbot
    def displayScan(self, scan, offset_mm=(0, 0), color=SCANPOINT_COLOR_BGR):

        for point in scan:
            cv.Circle(self.image, \
                      (self.mm2pix(point[0]+offset_mm[0]), self.mm2pix(point[1]+offset_mm[1])), \
                      SCANPOINT_RADIUS, color)
コード例 #22
0
ファイル: annotator.py プロジェクト: caz2114/visual_feedback
 def highlight(self, pt, landmark=True):
     if landmark:
         color = cv.CV_RGB(255, 0, 0)
     else:
         color = cv.CV_RGB(0, 255, 255)
     cv.Circle(self.img, pt, 5, color, -1)
コード例 #23
0
ファイル: doublepath2.py プロジェクト: tarora2/seawolf
    def process_frame(self, frame):

        self.output.found = False

        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Use RGB color finder
        binary = libvision.cmodules.target_color_rgb.find_target_color_rgb(
            frame, 250, 125, 0, 1500, 500, .3)
        color_filtered = cv.CloneImage(binary)

        blob_map = cv.CloneImage(binary)
        blobs = libvision.blob.find_blobs(binary,
                                          blob_map,
                                          min_blob_size=50,
                                          max_blobs=10)

        if not blobs:
            return

        binary = cv.CloneImage(blob_map)
        mapping = [0] * 256
        for blob in blobs:
            mapping[blob.id] = 255
        libvision.greymap.greymap(blob_map, binary, mapping)

        # Get Edges
        cv.Canny(binary, binary, 30, 40)

        # Hough Transform
        line_storage = cv.CreateMemStorage()
        lines = cv.HoughLines2(binary,
                               line_storage,
                               cv.CV_HOUGH_STANDARD,
                               rho=1,
                               theta=math.pi / 180,
                               threshold=self.hough_threshold,
                               param1=0,
                               param2=0)
        print "hough transform found", len(lines), " lines"
        lines = lines[:self.lines_to_consider]  # Limit number of lines

        # if not lines:
        #    return

        paths = self.path_manager.process(lines, blobs)

        if paths and not self.path:
            # If path[1] is clockwise of paths[0]
            distance = circular_distance(paths[0].angle, paths[1].angle)

            if distance > 0:
                self.path = paths[self.which_path]
            else:
                self.path = paths[1 - self.which_path]

        if paths and self.path in paths and self.path.blobs:

            temp_map = cv.CloneImage(blob_map)

            mapping = [0] * 256
            for blob in self.path.blobs:
                mapping[blob.id] = 255
            libvision.greymap.greymap(blob_map, temp_map, mapping)
            center = self.find_centroid(temp_map)

            svr.debug("map", temp_map)

            self.path.center = (center[0] - (frame.width / 2),
                                -center[1] + (frame.height / 2))

        random = 0
        if random == 0:
            # Show color filtered
            color_filtered_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(color_filtered, color_filtered_rgb, cv.CV_GRAY2RGB)
            cv.SubS(color_filtered_rgb, (255, 0, 0), color_filtered_rgb)
            cv.Sub(frame, color_filtered_rgb, frame)

            # Show edges
            binary_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(binary, binary_rgb, cv.CV_GRAY2RGB)
            cv.Add(frame, binary_rgb, frame)  # Add white to edge pixels
            cv.SubS(binary_rgb, (0, 0, 255), binary_rgb)
            cv.Sub(frame, binary_rgb, frame)  # Remove all but Red
            test_lines = []
            new_path = None

            for line in lines[:]:
                if self.candidates == []:
                    new_path = Path(line[0], line[1])
                    new_path.id = self.path_id
                    self.path_id += 1
                    new_path.last_seen += 1
                    self.candidates.append(new_path)
                    print "got a candidate"
            for candidate in self.candidates:
                if len(self.confirmed) == 0:
                    self.confirmed.append(candidate)

            for line in lines[:]:
                for candidate in self.candidates:
                    if math.fabs(line[0] - candidate.loc) < self.distance_threshold and \
                       math.fabs(line[1] - candidate.angle) < self.angle_threshold:
                        candidate.loc = (candidate.loc + line[0]) / 2
                        candidate.angle = (candidate.angle + line[1]) / 2
                        if candidate.last_seen < self.max_lastseen:
                            candidate.last_seen += 1
                        # print line1

                        if line in lines:
                            lines.remove(line)
                    else:
                        new_path = Path(line[0], line[1])
                        new_path.id = self.path_id
                        self.path_id += 1
                        new_path.last_seen += 1
                        new_path.seencount += 5
                        self.candidates.append(new_path)

            for candidate in self.candidates[:]:
                candidate.last_seen -= 1
                if candidate.seencount > self.min_seencount:
                    self.confirmed.append(candidate)
                    self.candidates.remove(candidate)
                if candidate.last_seen == -1:
                    self.candidates.remove(candidate)

            for confirmed in self.confirmed:
                for line in lines[:]:
                    if math.fabs(line[0] - confirmed.loc) < self.distance_trans and \
                       math.fabs(line[1] - confirmed.angle) < self.angle_trans:
                        confirmed.loc = line[0]
                        confirmed.angle = line[1]
                        if confirmed.last_seen < self.max_lastseen:
                            confirmed.last_seen += 2

                        if line in lines:
                            self.lines.remove(line)
                            print "line removed"

            for confirmed in self.confirmed:
                for candidate in self.candidates[:]:
                    if math.fabs(candidate.loc - confirmed.loc) < self.distance_trans and \
                       math.fabs(candidate.angle - confirmed.angle) < self.angle_trans:
                        confirmed.loc = candidate.loc
                        confirmed.angle = candidate.angle
                        if confirmed.last_seen < self.max_lastseen:
                            confirmed.last_seen += 2

                        print "lines"
                        if candidate in self.candidates:
                            self.candidates.remove(candidate)
                            print "line removed"

            for confirmed1 in self.confirmed[:]:
                for confirmed2 in self.confirmed[:]:
                    if math.fabs(confirmed1.loc - confirmed2.loc) < self.distance_threshold and \
                       math.fabs(confirmed1.angle - confirmed2.angle) < self.angle_threshold:
                        if confirmed1.id > confirmed2.id and confirmed1 in self.confirmed:
                            confirmed2.loc == (confirmed2.loc +
                                               confirmed1.loc) / 2
                            confirmed2.angle == (confirmed2.angle +
                                                 confirmed1.angle) / 2
                            self.confirmed.remove(confirmed1)
                            if confirmed2.last_seen < self.max_lastseen:
                                confirmed2.last_seen += 2
                        if confirmed2.id > confirmed1.id and confirmed2 in self.confirmed:
                            confirmed2.loc == (confirmed2.loc +
                                               confirmed1.loc) / 2
                            confirmed2.angle == (confirmed2.angle +
                                                 confirmed1.angle) / 2
                            self.confirmed.remove(confirmed2)
                            if confirmed1.last_seen < self.max_lastseen:
                                confirmed1.last_seen += 2

            for confirmed in self.confirmed[:]:
                confirmed.last_seen -= 1
                if confirmed.last_seen < -10:
                    self.confirmed.remove(confirmed)

            final_lines = []
            for confirmed in self.confirmed:
                final_line = [confirmed.loc, confirmed.angle]
                final_lines.append(final_line)
                print confirmed.id
            candidate_ids = []
            for candidate in self.candidates:
                new_id = candidate.id
                candidate_ids.append(new_id)
            print candidate_ids
            print len(self.candidates)

            libvision.misc.draw_lines(frame, final_lines)
            #libvision.misc.draw_lines2(frame, lines)
            print "Number of Paths:", len(self.confirmed)
            print "Number of Candidates:", len(self.candidates)
            # type -s after the command to run vision for this to work and not produce errors.
            # if len(self.confirmed)>1:
            #    raw_input()

            self.output.paths = []
            center_x = 0
            center_y = 0
            self.output.paths = self.confirmed

            for path in self.output.paths:
                path.theta = path.angle
                center_x = frame.width / 2
                path.x = center_x
                center_y = (-math.cos(path.angle) /
                            (math.sin(path.angle) + .001)) * center_x + (
                                path.loc / ((math.sin(path.angle) + .001)))
                path.y = center_y
                if center_y > frame.height or center_y < 0 or \
                   center_y < self.min_center_distance or \
                   frame.height - center_y < self.min_center_distance:
                    center_y2 = frame.height / 2
                    center_x2 = (center_y2 -
                                 (path.loc /
                                  (math.sin(path.angle) + .0001))) / (
                                      -math.cos(path.angle) /
                                      (math.sin(path.angle) + .0001))

                    if center_x2 > frame.width or center_x2 < 0:
                        path.center = [center_x, center_y]
                    else:
                        path.center = [center_x2, center_y2]
                else:
                    path.center = [center_x, center_y]

                cv.Circle(frame, (int(path.center[0]), int(path.center[1])),
                          15, (255, 255, 255), 2, 8, 0)

            self.return_output()
            svr.debug("Path", frame)
コード例 #24
0
def camera():
    found_goals = False
    print "# Starting initialization..."
    intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
    cv.Zero(intrinsics)

    #camera data
    intrinsics[0, 0] = 850.850708957251072
    intrinsics[1, 1] = 778.955239997982062
    intrinsics[2, 2] = 1
    intrinsics[0, 2] = 320.898495232253822
    intrinsics[1, 2] = 380.213734835526282
    dist_coeffs = cv.CreateMat(1, 4, cv.CV_64FC1)
    cv.Zero(dist_coeffs)
    dist_coeffs[0, 0] = -0.226795877008420
    dist_coeffs[0, 1] = 0.139445565548056
    dist_coeffs[0, 2] = 0.001245710462327
    dist_coeffs[0, 3] = -0.001396618726445
    print "# intrinsics loaded!"

    #prepare memory
    capture = cv.CaptureFromCAM(0)
    src = cv.QueryFrame(capture)
    size = GetSize(src)
    dst0 = cv.CreateImage(size, src.depth, src.nChannels)
    image_ROI = (0, 60, 640, 340)
    size = (640, 340)
    hue = cv.CreateImage(size, 8, 1)
    sat = cv.CreateImage(size, 8, 1)
    val = cv.CreateImage(size, 8, 1)
    ball = cv.CreateImage(size, 8, 1)
    yellow = cv.CreateImage(size, 8, 1)
    blue = cv.CreateImage(size, 8, 1)
    Set2D(hue, 4, 4, 255)
    Set2D(sat, 4, 4, 255)
    Set2D(val, 4, 4, 255)
    Set2D(ball, 4, 4, 255)
    Set2D(yellow, 4, 4, 255)
    Set2D(blue, 4, 4, 255)

    ballx = 0
    bally = 0

    print "# base images created..."
    #####------------------adjustment data---------------------###############
    #shadow
    high = 40
    low = 300

    #threshold
    thresred = 160
    thresgreen = 220
    thresblue = 254

    #dilate
    ex = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT)
    ex2 = cv.CreateStructuringElementEx(2, 2, 1, 1, cv.CV_SHAPE_RECT)
    ex5 = cv.CreateStructuringElementEx(5, 5, 1, 1, cv.CV_SHAPE_RECT)
    tHack = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_CROSS)

    #ball
    ballcount = 15
    ballmaxarea = 200
    ballminiarea = 45
    ballcompact = 1.3

    #blue
    bluecount = 30
    bluemaxarea = 1500
    blueminiarea = 50
    bluemaxdepth = 10
    blueminidepth = 2

    #yellow
    yellowcount = 30
    yellowmaxarea = 1000
    yellowminiarea = 50
    yellowmaxdepth = 10
    yellowminidepth = 3.2

    #####----------------------------------------

    aa = time.time()
    storage = cv.CreateMemStorage()
    first = True
    pitch = 0  # 0 for main pitch, 1 for alt pitch
    countf = 0
    print "# starting capture..."
    print ''
    capture = cv.CaptureFromCAM(0)
    while (True):
        global connected
        if (not connected):
            global s
            #s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            try:
                #	s.connect((hostname,port))
                connected = True
            except:
                print "java down, waiting"
        src = cv.QueryFrame(capture)
        #ShowImage('src',src)
        cv.SetImageROI(dst0, (0, 0, 640, 480))
        average = cv.CreateImage(size, 8, 3)
        #barrel undistortion
        cv.Undistort2(src, dst0, intrinsics, dist_coeffs)
        #ROI = Region of Interests, crop the image
        cv.SetImageROI(dst0, image_ROI)
        dst = GetImage(dst0)
        dst2 = cv.CreateImage(size, 8, 3)
        Set2D(dst2, 4, 4, 255)
        hsv = cv.CreateImage(size, 8, 3)
        CvtColor(dst, hsv, CV_RGB2HSV)
        cv.Split(hsv, hue, sat, val, None)
        if (first):
            #hist = cv.CreateHist([32,64], CV_HIST_ARRAY, [[0,180], [0,256]], 1)
            #cv.CalcHist([hue, sat], hist, 0, None)
            #values = cv.GetMinMaxHistValue(hist)

            #print values
            #tweak = values[3][0]
            #if tweak >= 12:
            #	pitch = 1
            #print ">>> tweak=",tweak,"pitch selected =",pitch

            pitch = pitchSet
            if pitch == 1:
                base = cv.LoadImage("base.jpg", cv.CV_LOAD_IMAGE_UNCHANGED)
                baseInv = cv.CreateImage(size, 8, 1)
                cv.Not(base, baseInv)
                #huecorr = cv.LoadImage("huecorr.jpg",cv.CV_LOAD_IMAGE_UNCHANGED)
                #cv.Smooth(huecorr,huecorr)
                #ShowImage("base",base)
            #base = cv.CreateImage(size,8,1)
            #base = GetImage(val)
            #cv.Threshold(hue,hue,75,255,cv.CV_THRESH_BINARY_INV)
            #cv.SaveImage("huecorr.jpg", hue)
            #cv.Threshold(base,base,110,255,cv.CV_THRESH_BINARY)
            #cv.SaveImage("base.jpg", base)

            #cv.WaitKey(-1)
            first = False
        if (debug):
            ShowImage("hue", hue)
            ShowImage("sat", sat)
            ShowImage("val", val)

        if pitch == 1:
            walls = cv.CreateImage(size, 8, 1)
            cv.Threshold(val, walls, 50, 255, cv.CV_THRESH_BINARY_INV)
            Set2D(walls, 4, 4, 255)

            # BALL
            # fixed this cause with another robot it was finding the ball on it. seems to work
            Add(sat, hue, ball)
            Sub(ball, walls, ball)
            #cv.SubS(ball,10,ball,baseInv)
            cv.Threshold(ball, ball, 170, 255, cv.CV_THRESH_BINARY)
            cv.Erode(ball, ball, ex5, 1)
            cv.Dilate(ball, ball, ex2, 1)
            Set2D(ball, 4, 4, 255)

            # YELLOW
            # cv.Threshold(hue,yellow,80,255,cv.CV_THRESH_BINARY)
            cv.Threshold(val, yellow, 250, 255, cv.CV_THRESH_BINARY)
            Sub(yellow, walls, yellow)
            cv.Erode(yellow, yellow, ex, 1)
            Set2D(yellow, 4, 4, 255)

            # blue
            cv.Add(walls, hue, blue)
            cv.Threshold(blue, blue, 40, 255, cv.CV_THRESH_BINARY_INV)
            cv.Erode(blue, blue, ex2, 2)
            Set2D(blue, 4, 4, 255)
            cv.Dilate(blue, blue, tHack, 2)

        if pitch == 0:
            ballcompact = 2.0
            walls = cv.CreateImage(size, 8, 1)
            cv.Threshold(val, walls, 50, 255, cv.CV_THRESH_BINARY_INV)
            Set2D(walls, 4, 4, 255)

            # BALL
            #cv.Add(sat,val,ball)
            #ShowImage("rawB",ball)
            cv.Threshold(hue, ball, 110, 255, cv.CV_THRESH_BINARY)
            cv.Erode(ball, ball, ex2, 1)
            cv.Dilate(ball, ball, ex, 1)

            # YELLOW
            cv.Threshold(val, yellow, 240, 255, cv.CV_THRESH_BINARY)
            # cv.Threshold(hue,yellow,80,255,cv.CV_THRESH_TOZERO)
            # cv.Threshold(yellow,yellow,105,255,cv.CV_THRESH_TOZERO_INV)
            # cv.Threshold(yellow,yellow,50,255,cv.CV_THRESH_BINARY)
            cv.Erode(yellow, yellow, ex, 1)
            cv.Dilate(yellow, yellow, tHack, 1)

            # BLUE
            CvtColor(dst, hsv, CV_BGR2HSV)
            cv.Split(hsv, hue, sat, val, None)
            cv.Threshold(hue, blue, 80, 255, cv.CV_THRESH_BINARY)
            cv.Threshold(val, val, 80, 255, cv.CV_THRESH_BINARY_INV)

            # Removes the walls
            Sub(blue, val, blue)
            Sub(yellow, val, yellow)
            Sub(ball, val, ball)
            cv.Erode(blue, blue, ex, 1)

            Set2D(ball, 4, 4, 255)
            Set2D(yellow, 4, 4, 255)
            Set2D(blue, 4, 4, 255)

        if (debug):
            ShowImage("blue", blue)
            ShowImage("yellow", yellow)
            ShowImage("ball", ball)
        #find ball
        #seq = None
        seq = cv.FindContours(ball, storage, cv.CV_RETR_LIST, cv.CV_LINK_RUNS)
        if seq != None:
            count = 0
            #print seq
            while seq != None:
                compact = 0
                count = count + 1
                if (count > ballcount):
                    break
                #removed and pitch==0 no idea why it was there
                if (cv.ContourArea(seq) != 0):
                    compact = ArcLength(seq) * ArcLength(seq) / (
                        4 * cv.ContourArea(seq) * math.pi)
                    if compact >= ballcompact:
                        print ">> compact: ", compact, ballcompact
                        seq = seq.h_next()
                        continue
                area = cv.ContourArea(seq)
                if (area == 0 or area > ballmaxarea
                        or area < ballminiarea):  # or compact > ballcompact):

                    print ">> area: ", area, ballmaxarea, ballminiarea
                    seq = seq.h_next()
                    continue
                else:
                    ballx = 0
                    bally = 0
                    for p in seq:
                        ballx = ballx + p[0]
                        bally = bally + p[1]
                    ballx = int(float(ballx) / len(seq))
                    bally = int(float(bally) / len(seq))
                    #	print "compact=%f,area=%f" %(compact,area)
                    cv.Circle(dst, (ballx, bally), 4, cv.CV_RGB(255, 255, 255),
                              2, 8, 0)
                    cv.Circle(dst2, (ballx, bally), 4,
                              cv.CV_RGB(255, 255, 255), 2, 8, 0)
                    break
            if (count > 15 or seq == None):
                ballx = -1
                bally = -1
                print "# error: ball not found  "

        #find blue
        seq = None
        seq = cv.FindContours(blue, storage, cv.CV_RETR_LIST, cv.CV_LINK_RUNS)
        if seq != None:
            count = 0
            while seq != None:
                count = count + 1
                if (count > bluecount):
                    break
                if (cv.ContourArea(seq) < blueminiarea
                        or cv.ContourArea(seq) > bluemaxarea):
                    seq = seq.h_next()
                    continue
                else:
                    hull = None
                    convex = None
                    #
                    hull = cv.ConvexHull2(seq, storage)
                    convex = cv.ConvexityDefects(seq, hull, storage)
                    if (len(convex) > 1):
                        convex = sorted(convex,
                                        key=lambda (k1, k2, k3, k4): k4
                                        )  #sort by depth of the convex defect
                        if (convex[len(convex) - 1][3] < blueminidepth
                                or convex[len(convex) - 2][3] < blueminidepth
                                or convex[len(convex) - 1][3] > bluemaxdepth
                                or convex[len(convex) - 2][3] > bluemaxdepth):
                            cv.Line(dst, convex[len(convex) - 1][0],
                                    convex[len(convex) - 1][2],
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst, convex[len(convex) - 1][2],
                                    convex[len(convex) - 1][1],
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)
                            cv.Line(dst, convex[len(convex) - 2][0],
                                    convex[len(convex) - 2][2],
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst, convex[len(convex) - 2][2],
                                    convex[len(convex) - 2][1],
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)
                            seq = seq.h_next()
                            continue
                        else:
                            #find the T
                            blue_start1 = convex[len(convex) - 1][0]
                            blue_end1 = convex[len(convex) - 1][1]
                            blue_depth1 = convex[len(convex) - 1][2]

                            #draw the side line of T
                            cv.Line(dst, blue_start1, blue_depth1,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst, blue_depth1, blue_end1,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            cv.Line(dst2, blue_start1, blue_depth1,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst2, blue_depth1, blue_end1,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            blue_start2 = convex[len(convex) - 2][0]
                            blue_end2 = convex[len(convex) - 2][1]
                            blue_depth2 = convex[len(convex) - 2][2]
                            cv.Line(dst, blue_start2, blue_depth2,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst, blue_depth2, blue_end2,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            cv.Line(dst2, blue_start2, blue_depth2,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst2, blue_depth2, blue_end2,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            blue_from = ((blue_depth1[0] + blue_depth2[0]) / 2,
                                         (blue_depth1[1] + blue_depth2[1]) / 2
                                         )  #calculate the center of robot

                            #calculate the end of direction vector, the two end point of the smaller distans
                            if math.hypot(blue_start1[0] - blue_end2[0],
                                          blue_start1[1] -
                                          blue_end2[1]) > math.hypot(
                                              blue_end1[0] - blue_start2[0],
                                              blue_end1[1] - blue_start2[1]):
                                blue_to = ((blue_end1[0] + blue_start2[0]) / 2,
                                           (blue_end1[1] + blue_start2[1]) / 2)
                            else:
                                blue_to = ((blue_start1[0] + blue_end2[0]) / 2,
                                           (blue_start1[1] + blue_end2[1]) / 2)
                            cv.Line(dst, blue_from, blue_to,
                                    cv.CV_RGB(255, 0, 255), 2, 8, 0)
                            cv.Circle(dst, blue_from, 1, cv.CV_RGB(255, 0, 0),
                                      2, 8, 0)
                            cv.Circle(dst, blue_to, 1, cv.CV_RGB(0, 0, 0), 2,
                                      8, 0)

                            cv.Line(dst2, blue_from, blue_to,
                                    cv.CV_RGB(255, 0, 255), 2, 8, 0)
                            cv.Circle(dst2, blue_from, 1, cv.CV_RGB(255, 0, 0),
                                      2, 8, 0)
                            cv.Circle(dst2, blue_to, 1,
                                      cv.CV_RGB(255, 255, 255), 2, 8, 0)
                            break
                    else:
                        seq = seq.h_next()
                        continue
            if (count > bluecount or seq == None):
                blue_from = (0, 0)
                blue_to = (0, 0)
                print "# error: blue not found  "
        #find yellow
        seq = None
        seq = cv.FindContours(yellow, storage, cv.CV_RETR_LIST,
                              cv.CV_LINK_RUNS)

        if seq != None:
            count = 0
            while seq != None:
                count = count + 1
                if (count > yellowcount):
                    break
                area = cv.ContourArea(seq)
                if (area < yellowminiarea or area > yellowmaxarea):
                    seq = seq.h_next()
                    continue
                else:
                    hull = None
                    convex = None
                    #
                    hull = cv.ConvexHull2(seq, storage)
                    convex = cv.ConvexityDefects(seq, hull, storage)
                    if (len(convex) > 1):
                        convex = sorted(convex,
                                        key=lambda (k1, k2, k3, k4): k4
                                        )  #sort by depth of the convex defect
                        if (convex[len(convex) - 1][3] < yellowminidepth
                                or convex[len(convex) - 2][3] < yellowminidepth
                                or convex[len(convex) - 1][3] > yellowmaxdepth
                                or
                                convex[len(convex) - 2][3] > yellowmaxdepth):
                            seq = seq.h_next()
                            continue
                        else:
                            #find the T
                            yellow_start1 = convex[len(convex) - 1][0]
                            yellow_end1 = convex[len(convex) - 1][1]
                            yellow_depth1 = convex[len(convex) - 1][2]

                            #draw the side line of T
                            cv.Line(dst, yellow_start1, yellow_depth1,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst, yellow_depth1, yellow_end1,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            cv.Line(dst2, yellow_start1, yellow_depth1,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst2, yellow_depth1, yellow_end1,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            yellow_start2 = convex[len(convex) - 2][0]
                            yellow_end2 = convex[len(convex) - 2][1]
                            yellow_depth2 = convex[len(convex) - 2][2]
                            cv.Line(dst, yellow_start2, yellow_depth2,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst, yellow_depth2, yellow_end2,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            cv.Line(dst2, yellow_start2, yellow_depth2,
                                    cv.CV_RGB(0, 0, 255), 2, 8, 0)
                            cv.Line(dst2, yellow_depth2, yellow_end2,
                                    cv.CV_RGB(0, 255, 255), 2, 8, 0)

                            yellow_from = (
                                (yellow_depth1[0] + yellow_depth2[0]) / 2,
                                (yellow_depth1[1] + yellow_depth2[1]) / 2
                            )  #calculate the center of robot

                            #calculate the end of direction vector, the two end point of the smaller distans
                            if math.hypot(
                                    yellow_start1[0] - yellow_end2[0],
                                    yellow_start1[1] -
                                    yellow_end2[1]) > math.hypot(
                                        yellow_end1[0] - yellow_start2[0],
                                        yellow_end1[1] - yellow_start2[1]):
                                yellow_to = (
                                    (yellow_end1[0] + yellow_start2[0]) / 2,
                                    (yellow_end1[1] + yellow_start2[1]) / 2)
                            else:
                                yellow_to = (
                                    (yellow_start1[0] + yellow_end2[0]) / 2,
                                    (yellow_start1[1] + yellow_end2[1]) / 2)
                            # print cv.ContourArea(seq)
                            cv.Line(dst, yellow_from, yellow_to,
                                    cv.CV_RGB(255, 0, 255), 2, 8, 0)
                            cv.Circle(dst, yellow_from, 1,
                                      cv.CV_RGB(255, 0, 0), 2, 8, 0)
                            cv.Circle(dst, yellow_to, 1, cv.CV_RGB(0, 0, 0), 2,
                                      8, 0)

                            cv.Line(dst2, yellow_from, yellow_to,
                                    cv.CV_RGB(255, 0, 255), 2, 8, 0)
                            cv.Circle(dst2, yellow_from, 1,
                                      cv.CV_RGB(255, 0, 0), 2, 8, 0)
                            cv.Circle(dst2, yellow_to, 1,
                                      cv.CV_RGB(255, 255, 255), 2, 8, 0)
                            break
                    else:
                        seq = seq.h_next()
                        continue
            if (count > yellowcount or seq == None):
                yellow_from = (0, 0)
                yellow_to = (0, 0)
                print "# error: yellow not found"
        ballpos = (ballx, bally)
        ShowImage("camera", dst)
        if (found_goals == False):
            if (us == "yellow"):
                goals = find_goals(size, yellow_from)
                stewies_goal = goals[0]
                loiss_goal = goals[1]
                found_goals = True
            elif (us == "blue"):
                goals = find_goals(size, blue_from)
                stewies_goal = goals[0]
                loiss_goal = goals[1]
                found_goals = True
        #if (ballx >= 0):


#		output(ballpos,blue_from,blue_to,yellow_from,yellow_to,stewies_goal,loiss_goal)
        time_passed = time.time() - aa
        countf += 1
        if (time_passed >= 1):
            print "frame per second: " + str(countf),
            countf = 0
            aa = time.time()
        keyPress = cv.WaitKey(2)
        if (keyPress == 1048608):
            break
コード例 #25
0
ファイル: demo.py プロジェクト: oceanz216/MLDM-demo
        k = cv.WaitKey(3)
        k = chr(k) if k > 0 else 0
        if k == 'q':
            break
        if gTracking:
            # motion tracking
            iplimage = cv.QueryFrame(cam)
            iplimage = im.resize(iplimage, gImageSize)
            cv.Flip(iplimage, None, 1)
            last_imgobj = ImageObject(None, last_iplimage, gImageSize)
            curr_imgobj = ImageObject(None, iplimage, gImageSize)
            matches = match(last_imgobj, curr_imgobj)

            last_iplimage = iplimage
            for kpt in curr_imgobj.keypoints:
                cv.Circle(iplimage, tuple(map(int, kpt.pt)), 1, im.color.green)

            # draw lines between matches
            for m in matches:
                kpt1 = last_imgobj.keypoints[m.queryIdx]
                kpt2 = curr_imgobj.keypoints[m.trainIdx]
                pt1 = tuple(map(int, kpt1.pt))
                pt2 = map(int, kpt2.pt)
                pt2 = tuple(pt2)
                xabs = abs(pt1[0]-pt2[0])
                yabs = abs(pt1[1]-pt2[1])
                if xabs < 20 and yabs < 20:
                    cv.Line(iplimage, pt1, pt2, im.color.green, thickness=1)

            cv.ShowImage(gMainWindowName, iplimage)
コード例 #26
0
#!/usr/bin/env python

import cv

capture = cv.CaptureFromCAM(0)
cv.WaitKey(200)

frame = cv.QueryFrame(capture)
font = cv.InitFont(cv.CV_FONT_HERSHEY_DUPLEX, 1, 1, 0, 2, 8)

while True:
       frame = cv.QueryFrame(capture)

#       cv.PutText(frame, "ShapeOko CAM", (10,460), font, cv.RGB(17, 110, 255))
       cv.Line(frame, (320,0), (320,480) , 255)
       cv.Line(frame, (0,240), (640,240) , 255)
       cv.Circle(frame, (320,240), 100, 255)
       
       cv.ShowImage("Window",frame)
       c = (cv.WaitKey(16) & 255)
       
       if c==27: #Break if user enters 'Esc'.
           break
コード例 #27
0
ファイル: nao_1.29.py プロジェクト: stayrealff/nao-lib
def FindObject(frame):
    global old_frame
    global gftt_list
    global weights
    global existence

    if not MovingHead():
        try:
            mask = FrameMask(old_frame, frame)
        except:
            old_frame = cv.CloneImage(frame)
            gftt_list = list()
            return None, None, False
    else:
        old_frame = cv.CloneImage(frame)
        gftt_list = list()
        return None, None, False

    if mask == None:
        gftt_list = list()
        print "2"
        return None, None, False

    ## Find Good Features to track
    if len(gftt_list) < 300:
        #gftt_list.append((GoodFeaturesToTrack(old_frame, mask),1))
        gftt_new, weights_new, existence_new = GoodFeaturesToTrack(
            old_frame, mask)

        if gftt_new != None:
            gftt_list = gftt_list + gftt_new
            weights = weights + weights_new
            existence = existence + existence_new

    gftt_list_new, weights, existence = OpticalFlow(frame, old_frame,
                                                    gftt_list, weights,
                                                    existence)
    weights, existence = UpdatePointWeights(gftt_list_new, gftt_list, weights,
                                            existence)

    gftt_list = gftt_list_new

    gftt_list, weights, existence = DropPoints(gftt_list, weights, existence)
    gftt_img = DrawPoints(frame, gftt_list)

    if len(gftt_list) > 30:
        loc_obj = list()
        loc_obj = AvgPoint(gftt_list, 1)
        cv.Circle(gftt_img, loc_obj, 4, 255, 4, 8, 0)
        convrad = 0.55 / (frame.width / 2)
        loc_obj = list(loc_obj)
        loc_obj[0] = (loc_obj[0] - (frame.width / 2)) * convrad
        loc_obj[1] = (loc_obj[1] - (frame.height / 2)) * convrad
    else:
        loc_obj = (None, None)
    cv.ShowImage("Good Features", gftt_img)
    cv.ShowImage("Difference", mask)
    cv.Copy(frame, old_frame)
    if MovingHead():
        print "Object Location = 0"
        loc_obj[0] = 0
        loc_obj[1] = 0
        gftt_list = list()
        old_frame = 0
    return loc_obj[0], loc_obj[1], True
コード例 #28
0
def affichage_corners(dcorners, img, diametre):
	for (x,y) in dcorners:
		cv.Circle(img, (x,y), diametre, rouge, -1)
コード例 #29
0
    def find_corners(self, centre):
        # find bottom corner
        max_x = 0
        max_y = 0

        for x in range(100, self.width - 100):
            y = self.height - 20
            while y > 0 and cv.Get2D(self.canny, y, x)[0] > 100:
                y = y - 1
            if y > 20:
                cv.Set2D(cv.fromarray(self.cv_image), y, x, (0, 0, 255))
                if y > max_y:
                    max_x = x
                    max_y = y

        corner_1 = (max_x, max_y)

        # find left corner
        min_x = self.width
        min_y = 0

        for y in range(100, self.height - 100):
            x = 20
            while x < self.width - 1 and cv.Get2D(self.canny, y, x)[0] > 100:
                x = x + 1
            if x < self.width - 20:
                cv.Set2D(cv.fromarray(self.cv_image), y, x, (0, 255, 0, 0))
                if x < min_x:
                    min_x = x
                    min_y = y

        corner_2 = (min_x, min_y)

        # display corner image
        cv.ShowImage("Corner", cv.fromarray(self.cv_image))

        if self.save_images:
            # save Canny image
            file_name = self.image_dir + "egg_tray_canny.jpg"
            cv.SaveImage(file_name, self.canny)

            # mark corners and save corner image
            cv.Circle(cv.fromarray(self.cv_image), corner_1, 9, (0, 250, 0),
                      -1)
            cv.Circle(cv.fromarray(self.cv_image), corner_2, 9, (0, 250, 0),
                      -1)
            file_name = self.image_dir + "corner.jpg"
            cv.SaveImage(file_name, cv.fromarray(self.cv_image))

        # 3ms wait
        cv.WaitKey(3)

        # two corners found and centre known find other two corners
        corner_3 = ((2 * centre[0]) - corner_1[0],
                    (2 * centre[1]) - corner_1[1])
        corner_4 = ((2 * centre[0]) - corner_2[0],
                    (2 * centre[1]) - corner_2[1])

        # draw ball tray boundry
        c1 = (int(corner_1[0]), int(corner_1[1]))
        c2 = (int(corner_2[0]), int(corner_2[1]))
        c3 = (int(corner_3[0]), int(corner_3[1]))
        c4 = (int(corner_4[0]), int(corner_4[1]))

        cv.Line(cv.fromarray(self.cv_image), c1, c2, (255, 0, 0), thickness=3)
        cv.Line(cv.fromarray(self.cv_image), c2, c3, (255, 0, 0), thickness=3)
        cv.Line(cv.fromarray(self.cv_image), c3, c4, (255, 0, 0), thickness=3)
        cv.Line(cv.fromarray(self.cv_image), c4, c1, (255, 0, 0), thickness=3)

        return True, (corner_1, corner_2, corner_3, corner_4)
コード例 #30
0
def main():
    gray = (100,100,100)
    corner_len = 5
    chessboard = ChessboardInfo()
    chessboard.n_cols = 6
    chessboard.n_rows = 7
    chessboard.dim = 0.02273
    cboard_frame = "kinect_cb_corner"
#kinect_tracker_frame = "kinect"
#TODO
    use_pygame = False
    kinect_tracker_frame = "pr2_antenna"

    rospy.init_node("kinect_calib_test")
    img_list = ImageListener("/kinect_head/rgb/image_color")
    pix3d_srv = rospy.ServiceProxy("/pixel_2_3d", Pixel23d, True)
    tf_list = tf.TransformListener()
    if use_pygame:
        pygame.init()
        clock = pygame.time.Clock()
        screen = pygame.display.set_mode((640, 480))
    calib = Calibrator([chessboard])
    done = False
    corner_list = np.ones((2, corner_len)) * -1000.0
    corner_i = 0
    saved_corners_2d, saved_corners_3d, cb_locs = [], [], []
    while not rospy.is_shutdown():
        try:
            cb_pos, cb_quat = tf_list.lookupTransform(kinect_tracker_frame, 
                                                      cboard_frame, 
                                                      rospy.Time())
        except:
            rospy.sleep(0.001)
            continue
        cv_img = img_list.get_cv_img()
        if cv_img is not None:
            has_corners, corners, chess = calib.get_corners(cv_img)
            for corner2d in saved_corners_2d:
                cv.Circle(cv_img, corner2d, 4, [0, 255, 255])
            if has_corners:
                corner_i += 1
                corner = corners[0]
                if use_pygame:
                    for event in pygame.event.get():
                        if event.type == pygame.KEYDOWN:
                            print event.dict['key'], pygame.K_d
                            if event.dict['key'] == pygame.K_d:
                                done = True
                            if event.dict['key'] == pygame.K_q:
                                return
                    if done:
                        break
                corner_list[:, corner_i % corner_len] = corner
                if np.linalg.norm(np.var(corner_list, 1)) < 1.0:
                    corner_avg = np.mean(corner_list, 1)
                    corner_avg_tuple = tuple(corner_avg.round().astype(int).tolist())
                    cv.Circle(cv_img, corner_avg_tuple, 4, [0, 255, 0])
                    pix3d_resp = pix3d_srv(*corner_avg_tuple)
                    if pix3d_resp.error_flag == pix3d_resp.SUCCESS:
                        corner_3d_tuple = (pix3d_resp.pixel3d.pose.position.x,
                                           pix3d_resp.pixel3d.pose.position.y,
                                           pix3d_resp.pixel3d.pose.position.z)
                        if len(saved_corners_3d) == 0:
                            cb_locs.append(cb_pos)
                            saved_corners_2d.append(corner_avg_tuple)
                            saved_corners_3d.append(corner_3d_tuple)
                        else:
                            diff_arr = np.array(np.mat(saved_corners_3d) - np.mat(corner_3d_tuple))
                            if np.min(np.sqrt(np.sum(diff_arr ** 2, 1))) >= 0.03:
                                cb_locs.append(cb_pos)
                                saved_corners_2d.append(corner_avg_tuple)
                                saved_corners_3d.append(corner_3d_tuple)
                                print "Added sample", len(saved_corners_2d) - 1
                else:
                    cv.Circle(cv_img, corner, 4, [255, 0, 0])
            else:
                corner_list = np.ones((2, corner_len)) * -1000.0
        if use_pygame:
            if cv_img is None:
                screen.fill(gray)
            else:
                screen.blit(img_list.get_pg_img(cv_img), (0, 0))
            pygame.display.flip()
        rospy.sleep(0.001)
    A = np.mat(saved_corners_3d).T
    B = np.mat(cb_locs).T
    print A, B
    t, R = umeyama_method(A, B)
    print A, B, R, t
    print "-" * 60
    print "Transformation Parameters:"
    pos, quat = PoseConverter.to_pos_quat(t, R)
    print '%f %f %f %f %f %f %f' % tuple(pos + quat)
    t_r, R_r = ransac(A, B, 0.02, percent_set_train=0.5, percent_set_fit=0.6)
    print t_r, R_r
    pos, quat = PoseConverter.to_pos_quat(t_r, R_r)
    print '%f %f %f %f %f %f %f' % tuple(pos + quat)