示例#1
0
    def process_frame(self, frame):

        self.output.found = False

        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Use RGB color finder
        binary = libvision.cmodules.target_color_rgb.find_target_color_rgb(
            frame, 250, 125, 0, 1500, 500, .3)
        color_filtered = cv.CloneImage(binary)

        blob_map = cv.CloneImage(binary)
        blobs = libvision.blob.find_blobs(binary,
                                          blob_map,
                                          min_blob_size=50,
                                          max_blobs=10)

        if not blobs:
            return

        binary = cv.CloneImage(blob_map)
        mapping = [0] * 256
        for blob in blobs:
            mapping[blob.id] = 255
        libvision.greymap.greymap(blob_map, binary, mapping)

        # Get Edges
        cv.Canny(binary, binary, 30, 40)

        # Hough Transform
        line_storage = cv.CreateMemStorage()
        lines = cv.HoughLines2(binary,
                               line_storage,
                               cv.CV_HOUGH_STANDARD,
                               rho=1,
                               theta=math.pi / 180,
                               threshold=self.hough_threshold,
                               param1=0,
                               param2=0)
        print "hough transform found", len(lines), " lines"
        lines = lines[:self.lines_to_consider]  # Limit number of lines

        # if not lines:
        #    return

        paths = self.path_manager.process(lines, blobs)

        if paths and not self.path:
            # If path[1] is clockwise of paths[0]
            distance = circular_distance(paths[0].angle, paths[1].angle)

            if distance > 0:
                self.path = paths[self.which_path]
            else:
                self.path = paths[1 - self.which_path]

        if paths and self.path in paths and self.path.blobs:

            temp_map = cv.CloneImage(blob_map)

            mapping = [0] * 256
            for blob in self.path.blobs:
                mapping[blob.id] = 255
            libvision.greymap.greymap(blob_map, temp_map, mapping)
            center = self.find_centroid(temp_map)

            svr.debug("map", temp_map)

            self.path.center = (center[0] - (frame.width / 2),
                                -center[1] + (frame.height / 2))

        random = 0
        if random == 0:
            # Show color filtered
            color_filtered_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(color_filtered, color_filtered_rgb, cv.CV_GRAY2RGB)
            cv.SubS(color_filtered_rgb, (255, 0, 0), color_filtered_rgb)
            cv.Sub(frame, color_filtered_rgb, frame)

            # Show edges
            binary_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(binary, binary_rgb, cv.CV_GRAY2RGB)
            cv.Add(frame, binary_rgb, frame)  # Add white to edge pixels
            cv.SubS(binary_rgb, (0, 0, 255), binary_rgb)
            cv.Sub(frame, binary_rgb, frame)  # Remove all but Red
            test_lines = []
            new_path = None

            for line in lines[:]:
                if self.candidates == []:
                    new_path = Path(line[0], line[1])
                    new_path.id = self.path_id
                    self.path_id += 1
                    new_path.last_seen += 1
                    self.candidates.append(new_path)
                    print "got a candidate"
            for candidate in self.candidates:
                if len(self.confirmed) == 0:
                    self.confirmed.append(candidate)

            for line in lines[:]:
                for candidate in self.candidates:
                    if math.fabs(line[0] - candidate.loc) < self.distance_threshold and \
                       math.fabs(line[1] - candidate.angle) < self.angle_threshold:
                        candidate.loc = (candidate.loc + line[0]) / 2
                        candidate.angle = (candidate.angle + line[1]) / 2
                        if candidate.last_seen < self.max_lastseen:
                            candidate.last_seen += 1
                        # print line1

                        if line in lines:
                            lines.remove(line)
                    else:
                        new_path = Path(line[0], line[1])
                        new_path.id = self.path_id
                        self.path_id += 1
                        new_path.last_seen += 1
                        new_path.seencount += 5
                        self.candidates.append(new_path)

            for candidate in self.candidates[:]:
                candidate.last_seen -= 1
                if candidate.seencount > self.min_seencount:
                    self.confirmed.append(candidate)
                    self.candidates.remove(candidate)
                if candidate.last_seen == -1:
                    self.candidates.remove(candidate)

            for confirmed in self.confirmed:
                for line in lines[:]:
                    if math.fabs(line[0] - confirmed.loc) < self.distance_trans and \
                       math.fabs(line[1] - confirmed.angle) < self.angle_trans:
                        confirmed.loc = line[0]
                        confirmed.angle = line[1]
                        if confirmed.last_seen < self.max_lastseen:
                            confirmed.last_seen += 2

                        if line in lines:
                            self.lines.remove(line)
                            print "line removed"

            for confirmed in self.confirmed:
                for candidate in self.candidates[:]:
                    if math.fabs(candidate.loc - confirmed.loc) < self.distance_trans and \
                       math.fabs(candidate.angle - confirmed.angle) < self.angle_trans:
                        confirmed.loc = candidate.loc
                        confirmed.angle = candidate.angle
                        if confirmed.last_seen < self.max_lastseen:
                            confirmed.last_seen += 2

                        print "lines"
                        if candidate in self.candidates:
                            self.candidates.remove(candidate)
                            print "line removed"

            for confirmed1 in self.confirmed[:]:
                for confirmed2 in self.confirmed[:]:
                    if math.fabs(confirmed1.loc - confirmed2.loc) < self.distance_threshold and \
                       math.fabs(confirmed1.angle - confirmed2.angle) < self.angle_threshold:
                        if confirmed1.id > confirmed2.id and confirmed1 in self.confirmed:
                            confirmed2.loc == (confirmed2.loc +
                                               confirmed1.loc) / 2
                            confirmed2.angle == (confirmed2.angle +
                                                 confirmed1.angle) / 2
                            self.confirmed.remove(confirmed1)
                            if confirmed2.last_seen < self.max_lastseen:
                                confirmed2.last_seen += 2
                        if confirmed2.id > confirmed1.id and confirmed2 in self.confirmed:
                            confirmed2.loc == (confirmed2.loc +
                                               confirmed1.loc) / 2
                            confirmed2.angle == (confirmed2.angle +
                                                 confirmed1.angle) / 2
                            self.confirmed.remove(confirmed2)
                            if confirmed1.last_seen < self.max_lastseen:
                                confirmed1.last_seen += 2

            for confirmed in self.confirmed[:]:
                confirmed.last_seen -= 1
                if confirmed.last_seen < -10:
                    self.confirmed.remove(confirmed)

            final_lines = []
            for confirmed in self.confirmed:
                final_line = [confirmed.loc, confirmed.angle]
                final_lines.append(final_line)
                print confirmed.id
            candidate_ids = []
            for candidate in self.candidates:
                new_id = candidate.id
                candidate_ids.append(new_id)
            print candidate_ids
            print len(self.candidates)

            libvision.misc.draw_lines(frame, final_lines)
            #libvision.misc.draw_lines2(frame, lines)
            print "Number of Paths:", len(self.confirmed)
            print "Number of Candidates:", len(self.candidates)
            # type -s after the command to run vision for this to work and not produce errors.
            # if len(self.confirmed)>1:
            #    raw_input()

            self.output.paths = []
            center_x = 0
            center_y = 0
            self.output.paths = self.confirmed

            for path in self.output.paths:
                path.theta = path.angle
                center_x = frame.width / 2
                path.x = center_x
                center_y = (-math.cos(path.angle) /
                            (math.sin(path.angle) + .001)) * center_x + (
                                path.loc / ((math.sin(path.angle) + .001)))
                path.y = center_y
                if center_y > frame.height or center_y < 0 or \
                   center_y < self.min_center_distance or \
                   frame.height - center_y < self.min_center_distance:
                    center_y2 = frame.height / 2
                    center_x2 = (center_y2 -
                                 (path.loc /
                                  (math.sin(path.angle) + .0001))) / (
                                      -math.cos(path.angle) /
                                      (math.sin(path.angle) + .0001))

                    if center_x2 > frame.width or center_x2 < 0:
                        path.center = [center_x, center_y]
                    else:
                        path.center = [center_x2, center_y2]
                else:
                    path.center = [center_x, center_y]

                cv.Circle(frame, (int(path.center[0]), int(path.center[1])),
                          15, (255, 255, 255), 2, 8, 0)

            self.return_output()
            svr.debug("Path", frame)
示例#2
0
    def process_frame(self, frame):
        debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.Copy(frame, debug_frame)

        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Set binary image to have saturation channel
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binary = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        cv.SetImageCOI(hsv, 2)
        cv.Copy(hsv, binary)
        cv.SetImageCOI(hsv, 0)

        cv.AdaptiveThreshold(
            binary,
            binary,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY_INV,
            self.adaptive_thresh_blocksize,
            self.adaptive_thresh,
        )

        # Morphology
        kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(binary, binary, kernel, 1)
        cv.Dilate(binary, binary, kernel, 1)

        # Get Edges
        #cv.Canny(binary, binary, 30, 40)

        cv.CvtColor(binary, debug_frame, cv.CV_GRAY2RGB)

        # Hough Transform
        line_storage = cv.CreateMemStorage()
        raw_lines = cv.HoughLines2(binary,
                                   line_storage,
                                   cv.CV_HOUGH_STANDARD,
                                   rho=1,
                                   theta=math.pi / 180,
                                   threshold=self.hough_threshold,
                                   param1=0,
                                   param2=0)

        line_groups = []  # A list of line groups which are each a line list

        for line in raw_lines:
            group_found = False
            for line_group in line_groups:

                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                line_groups.append([line])

            # Average line groups into lines
            lines = []
            for line_group in line_groups:
                rhos = map(lambda line: line[0], line_group)
                angles = map(lambda line: line[1], line_group)
                line = (sum(rhos) / len(rhos),
                        circular_average(angles, math.pi))
                lines.append(line)

        libvision.misc.draw_lines(debug_frame, raw_lines)
        # cv.CvtColor(color_filtered,debug_frame, cv.CV_GRAY2RGB)
        svr.debug("Bins", debug_frame)
示例#3
0
    def process_frame(self, frame):

        # Resize image to 320x240
        #copy = cv.CreateImage(cv.GetSize(frame), 8, 3)
        #cv.Copy(frame, copy)
        #cv.SetImageROI(frame, (0, 0, 320, 240))
        #cv.Resize(copy, frame, cv.CV_INTER_NN)

        found_hedge = False

        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Set binary image to have value channel
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binary = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        cv.SetImageCOI(hsv, 1)
        cv.Copy(hsv, binary)
        cv.SetImageCOI(hsv, 0)

        cv.AdaptiveThreshold(
            binary,
            binary,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY_INV,
            self.adaptive_thresh_blocksize,
            self.adaptive_thresh,
        )

        # Morphology
        '''
        kernel = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(binary, binary, kernel, 1)
        cv.Dilate(binary, binary, kernel, 1)
        '''
        if self.debug:
            color_filtered = cv.CloneImage(binary)

        # Get Edges
        #cv.Canny(binary, binary, 30, 40)

        # Hough Transform
        line_storage = cv.CreateMemStorage()
        raw_lines = cv.HoughLines2(binary,
                                   line_storage,
                                   cv.CV_HOUGH_STANDARD,
                                   rho=1,
                                   theta=math.pi / 180,
                                   threshold=self.hough_threshold,
                                   param1=0,
                                   param2=0)

        # Get vertical lines
        vertical_lines = []
        for line in raw_lines:
            if line[1] < self.vertical_threshold or \
                    line[1] > math.pi - self.vertical_threshold:

                vertical_lines.append((abs(line[0]), line[1]))

        # Group vertical lines
        vertical_line_groups = [
        ]  # A list of line groups which are each a line list
        for line in vertical_lines:
            group_found = False
            for line_group in vertical_line_groups:

                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                vertical_line_groups.append([line])

        # Average line groups into lines
        vertical_lines = []
        for line_group in vertical_line_groups:
            rhos = map(lambda line: line[0], line_group)
            angles = map(lambda line: line[1], line_group)
            line = (sum(rhos) / len(rhos), circular_average(angles, math.pi))
            vertical_lines.append(line)

        # Get horizontal lines
        horizontal_lines = []
        for line in raw_lines:
            dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi
            if dist_from_horizontal < self.horizontal_threshold or \
                    dist_from_horizontal > math.pi - self.horizontal_threshold:

                horizontal_lines.append((abs(line[0]), line[1]))

        # Group horizontal lines
        horizontal_line_groups = [
        ]  # A list of line groups which are each a line list
        for line in horizontal_lines:
            group_found = False
            for line_group in horizontal_line_groups:

                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                horizontal_line_groups.append([line])

        if len(horizontal_line_groups) is 1:
            self.seen_crossbar = True
            rhos = map(lambda line: line[0], horizontal_line_groups[0])
            angles = map(lambda line: line[1], horizontal_line_groups[0])
            line = (sum(rhos) / len(rhos), circular_average(angles, math.pi))
            horizontal_lines = [line]
        else:
            self.seen_crossbar = False
            horizontal_lines = []

        self.left_pole = None
        self.right_pole = None
        if len(vertical_lines) is 2:
            roi = cv.GetImageROI(frame)
            width = roi[2]
            height = roi[3]
            self.left_pole = round(
                min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2
            self.right_pole = round(
                max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2
        # TODO: If one pole is seen, is it left or right pole?

        # Calculate planar distance r (assuming we are moving perpendicular to
        # the hedge)
        if self.left_pole and self.right_pole:
            theta = abs(self.left_pole - self.right_pole)
            self.r = 3 / tan(radians(theta / 2))
        else:
            self.r = None

        if self.r and self.seen_crossbar:
            bar_phi = (-1 * horizontal_lines[0][0] +
                       frame.height / 2) / (frame.height / 2) * 32
            self.crossbar_depth = self.r * atan(radians(bar_phi))
        else:
            self.crossbar_depth = None

        if self.debug:
            cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB)
            libvision.misc.draw_lines(frame, vertical_lines)
            libvision.misc.draw_lines(frame, horizontal_lines)

            #cv.ShowImage("Hedge", cv.CloneImage(frame))
            svr.debug("Hedge", cv.CloneImage(frame))

        # populate self.output with infos
        self.output.seen_crossbar = self.seen_crossbar
        self.output.left_pole = self.left_pole
        self.output.right_pole = self.right_pole
        self.output.r = self.r
        self.output.crossbar_depth = self.crossbar_depth

        self.return_output()
        print self
示例#4
0
文件: pizza.py 项目: tarora2/seawolf
    def process_frame(self, frame):
        self.debug_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
        og_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.Copy(frame, self.debug_frame)
        cv.Copy(self.debug_frame, og_frame)

        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Set binary image to have saturation channel
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binary = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        cv.SetImageCOI(hsv, 1)
        cv.Copy(hsv, binary)
        cv.SetImageCOI(hsv, 0)

        cv.AdaptiveThreshold(
            binary,
            binary,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY_INV,
            self.adaptive_thresh_blocksize,
            self.adaptive_thresh,
        )

        # Morphology
        kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(binary, binary, kernel, 1)
        cv.Dilate(binary, binary, kernel, 1)

        # Get Edges
        #cv.Canny(binary, binary, 30, 40)

        cv.CvtColor(binary, self.debug_frame, cv.CV_GRAY2RGB)

        # Hough Transform
        line_storage = cv.CreateMemStorage()
        raw_lines = cv.HoughLines2(binary,
                                   line_storage,
                                   cv.CV_HOUGH_PROBABILISTIC,
                                   rho=1,
                                   theta=math.pi / 180,
                                   threshold=self.hough_threshold,
                                   param1=self.min_length,
                                   param2=self.max_gap)

        lines = []
        corners = []

        for line in raw_lines:
            lines.append(line)

        # Grouping lines depending on endpoint similarities

        for line1 in lines[:]:
            for line2 in lines[:]:
                if line1 in lines and line2 in lines and line1 != line2:
                    if math.fabs(line1[0][0] - line2[0][0]) < self.max_corner_range and \
                       math.fabs(line1[0][1] - line2[0][1]) < self.max_corner_range and \
                       math.fabs(line1[1][0] - line2[1][0]) < self.max_corner_range and \
                       math.fabs(line1[1][1] - line2[1][1]) < self.max_corner_range:
                        if line_distance(line1[0], line1[1]) > line_distance(
                                line2[0], line2[1]):
                            lines.remove(line2)
                        else:
                            lines.remove(line1)
                    elif math.fabs(line1[0][0] - line2[1][0]) < self.max_corner_range and \
                            math.fabs(line1[0][1] - line2[1][1]) < self.max_corner_range and \
                            math.fabs(line1[1][0] - line2[0][0]) < self.max_corner_range and \
                            math.fabs(line1[1][1] - line2[0][1]) < self.max_corner_range:
                        if line_distance(line1[0], line1[1]) > line_distance(
                                line2[0], line2[1]):
                            lines.remove(line2)
                        else:
                            lines.remove(line1)

        for line in lines:
            corners.append(line[0])
            corners.append(line[1])

        for corner1 in corners:
            for corner2 in corners:
                for corner3 in corners:
                    for corner4 in corners:
                        # Checks that corners are not the same and are in the proper orientation
                        if corner4[0] != corner3[0] and corner4[0] != corner2[0] and corner4[0] != corner1[0] and \
                           corner3[0] != corner2[0] and corner3[0] != corner1[0] and corner2[0] != corner1[0] and \
                           corner4[1] != corner3[1] and corner4[1] != corner2[1] and corner4[1] != corner1[1] and \
                           corner3[1] != corner2[1] and corner3[1] != corner1[1] and corner2[1] != corner1[1] and \
                           corner2[0] >= corner3[0] and corner1[1] >= corner4[1] and corner2[0] >= corner1[0]:
                            # Checks that the side ratios are correct
                            if math.fabs(line_distance(corner1, corner3) - line_distance(corner2, corner4)) < self.size_threshold and \
                               math.fabs(line_distance(corner1, corner2) - line_distance(corner3, corner4)) < self.size_threshold and \
                               math.fabs(line_distance(corner1, corner3) / line_distance(corner1, corner2)) < self.ratio_threshold and \
                               math.fabs(line_distance(corner1, corner2) / line_distance(corner1, corner3)) < self.ratio_threshold:
                                #^^^ CHANGED OR TO AND --> DID MUCH BETTER. CONSIDER CHANGING ON BINSCORNER

                                # Checks that angles are roughly 90 degrees
                                angle_cnr_2 = math.fabs(
                                    angle_between_lines(
                                        line_slope(corner1, corner2),
                                        line_slope(corner2, corner4)))
                                if self.angle_min < angle_cnr_2 < self.angle_max:
                                    angle_cnr_3 = math.fabs(
                                        angle_between_lines(
                                            line_slope(corner1, corner3),
                                            line_slope(corner3, corner4)))
                                    if self.angle_min2 < angle_cnr_3 < self.angle_max2:
                                        new_box = Pizza(
                                            corner1, corner2, corner3, corner4)
                                        self.match_Boxes(new_box)

        for Box in self.Boxes[:]:
            Box.lastseen -= 1
            if Box.lastseen < 0:
                self.Boxes.remove(Box)

        self.draw_pizza()

        for line in lines:
            cv.Line(self.debug_frame, line[0], line[1], (255, 255, 0), 10,
                    cv.CV_AA, 0)
            cv.Circle(self.debug_frame, line[0], 15, (255, 0, 0), 2, 8, 0)
            cv.Circle(self.debug_frame, line[1], 15, (255, 0, 0), 2, 8, 0)

        self.output.pizza = self.Boxes
        anglesum = 0
        for Box in self.Boxes:
            Box.theta = (Box.center[0] - frame.width / 2) * 37 / (frame.width /
                                                                  2)
            Box.phi = -1 * (Box.center[1] -
                            frame.height / 2) * 36 / (frame.height / 2)
            anglesum += Box.angle
        if len(self.output.pizza) > 0:
            self.output.orientation = anglesum / len(self.output.pizza)
        else:
            self.output.orientation = None
        self.return_output()

        svr.debug("Pizza", self.debug_frame)
        svr.debug("Original", og_frame)
示例#5
0

#---------------------------------------------------------------------------------------------#
# Main code
# Load input image

for loopVar1 in range(1, 41):
    orig_img = cv.LoadImage(filename + str(loopVar1) + '.jpg', 1)
    detected_edges = cv.CreateImage((orig_img.width, orig_img.height),
                                    orig_img.depth, 1)
    cv.Canny(orig_img, detected_edges, canny_lowThreshold,
             canny_lowThreshold * canny_threshold_ratio,
             canny_kernel_size)  # Apply Canny detector
    cv.SaveImage(filename + str(loopVar1) + '_edge.jpg', detected_edges)
    lines = cv.HoughLines2(detected_edges, cv.CreateMemStorage(),
                           cv.CV_HOUGH_PROBABILISTIC, 1, cv.CV_PI / 180,
                           HT_VOTES_THRESHOLD, HT_MIN_LINE_LENGTH,
                           HT_MAX_DIST_BW_LINES)
    corners = []
    filtered_lines = []
    for loopVar2 in range(len(lines)):
        for loopVar3 in range(len(lines)):
            corner = find_corner(lines[loopVar2], lines[loopVar3])
            if corner[2] != 0:
                corner = corner / corner[2]
            else:
                corner = [0, 0, 0]
            if (corner[0] > 0 and corner[0] < orig_img.width) and (
                    corner[1] > 0 and corner[1] < orig_img.height):
                flag = 0
                for item in corners:
                    if sqrt(sum(square(subtract([corner[0], corner[1]],
示例#6
0
 bi = vop2iplimage(b)
 # a is red
 # b is green/blue
 cv.Merge(bi, bi, ai, None, img)
 cv.ShowImage("merged", img)
 di = vop2iplimage(abs(a - b))
 cv.Threshold(di, di, 20.0, 255.0, cv.CV_THRESH_BINARY)
 cv.Erode(di, di)
 if 0:
     cv.Dilate(di, di)
 if 0:
     rgbdi = cv.CreateImage((640, 480), 8, 3)
     cv.Merge(di, di, di, None, rgbdi)
     if 0:
         li = cv.HoughLines2(di, cv.CreateMemStorage(),
                             cv.CV_HOUGH_PROBABILISTIC, 1,
                             math.pi / 180, 50, 200, 200)
         for a, b in li:
             cv.Line(rgbdi, a, b, (255, 0, 0), 3, 8)
     else:
         li = cv.HoughLines2(di, cv.CreateMemStorage(),
                             cv.CV_HOUGH_STANDARD, 1, math.pi / 180,
                             100, 0, 0)
         for rho, theta in li:
             a = math.cos(theta)
             b = math.sin(theta)
             x0 = a * rho
             y0 = b * rho
             cv.Line(rgbdi, (x0 + 1000 * -b, y0 + 1000 * a),
                     (x0 - 1000 * -b, y0 - 1000 * a), (255, 0, 0), 3, 8)
     cv.Merge(None, None, di, None, rgbdi)
示例#7
0
    def detect(self):
        self.detected = 0
        cv.Smooth(self.grey, self.dst2, cv.CV_GAUSSIAN, 3)
        cv.Laplace(self.dst2, self.d)
        cv.CmpS(self.d, 8, self.d2, cv.CV_CMP_GT)

        if self.onlyBlackCubes:
            # can also detect on black lines for improved robustness
            cv.CmpS(grey, 100, b, cv.CV_CMP_LT)
            cv.And(b, d2, d2)

        # these weights should be adaptive. We should always detect 100 lines
        if self.lastdetected > self.dects:
            self.THR = self.THR + 1

        if self.lastdetected < self.dects:
            self.THR = max(2, self.THR - 1)

        self.li = cv.HoughLines2(self.d2, cv.CreateMemStorage(),
                                 cv.CV_HOUGH_PROBABILISTIC, 1, 3.1415926 / 45,
                                 self.THR, 10, 5)

        # store angles for later
        angs = []
        for (p1, p2) in self.li:
            # cv.Line(sg,p1,p2,(0,255,0))
            a = atan2(p2[1] - p1[1], p2[0] - p1[0])
            if a < 0:
                a += pi
            angs.append(a)

        # log.info("THR %d, lastdetected %d, dects %d, houghlines %d, angles: %s" % (self.THR, self.lastdetected, self.dects, len(self.li), pformat(angs)))

        # lets look for lines that share a common end point
        t = 10
        totry = []

        for i in range(len(self.li)):
            p1, p2 = self.li[i]

            for j in range(i + 1, len(self.li)):
                q1, q2 = self.li[j]

                # test lengths are approximately consistent
                dd1 = sqrt((p2[0] - p1[0]) * (p2[0] - p1[0]) +
                           (p2[1] - p1[1]) * (p2[1] - p1[1]))
                dd2 = sqrt((q2[0] - q1[0]) * (q2[0] - q1[0]) +
                           (q2[1] - q1[1]) * (q2[1] - q1[1]))

                if max(dd1, dd2) / min(dd1, dd2) > 1.3:
                    continue

                matched = 0
                if areclose(p1, q2, t):
                    IT = (avg(p1, q2), p2, q1, dd1)
                    matched = matched + 1

                if areclose(p2, q2, t):
                    IT = (avg(p2, q2), p1, q1, dd1)
                    matched = matched + 1

                if areclose(p1, q1, t):
                    IT = (avg(p1, q1), p2, q2, dd1)
                    matched = matched + 1

                if areclose(p2, q1, t):
                    IT = (avg(p2, q1), q2, p1, dd1)
                    matched = matched + 1

                if matched == 0:
                    # not touching at corner... try also inner grid segments hypothesis?
                    self.p1 = (float(p1[0]), float(p1[1]))
                    self.p2 = (float(p2[0]), float(p2[1]))
                    self.q1 = (float(q1[0]), float(q1[1]))
                    self.q2 = (float(q2[0]), float(q2[1]))
                    success, (ua, ub), (x, y) = intersect_seg(
                        self.p1[0], self.p2[0], self.q1[0], self.q2[0],
                        self.p1[1], self.p2[1], self.q1[1], self.q2[1])

                    if success and ua > 0 and ua < 1 and ub > 0 and ub < 1:
                        # if they intersect
                        # cv.Line(sg, p1, p2, (255,255,255))
                        ok1 = 0
                        ok2 = 0

                        if abs(ua - 1.0 / 3) < 0.05:
                            ok1 = 1

                        if abs(ua - 2.0 / 3) < 0.05:
                            ok1 = 2

                        if abs(ub - 1.0 / 3) < 0.05:
                            ok2 = 1

                        if abs(ub - 2.0 / 3) < 0.05:
                            ok2 = 2

                        if ok1 > 0 and ok2 > 0:
                            # ok these are inner lines of grid
                            # flip if necessary
                            if ok1 == 2:
                                self.p1, self.p2 = self.p2, self.p1

                            if ok2 == 2:
                                self.q1, self.q2 = self.q2, self.q1

                            # both lines now go from p1->p2, q1->q2 and
                            # intersect at 1/3
                            # calculate IT
                            z1 = (self.q1[0] + 2.0 / 3 *
                                  (self.p2[0] - self.p1[0]), self.q1[1] +
                                  2.0 / 3 * (self.p2[1] - self.p1[1]))
                            z2 = (self.p1[0] + 2.0 / 3 *
                                  (self.q2[0] - self.q1[0]), self.p1[1] +
                                  2.0 / 3 * (self.q2[1] - self.q1[1]))
                            z = (self.p1[0] - 1.0 / 3 *
                                 (self.q2[0] - self.q1[0]), self.p1[1] -
                                 1.0 / 3 * (self.q2[1] - self.q1[1]))
                            IT = (z, z1, z2, dd1)
                            matched = 1

                # only single one matched!! Could be corner
                if matched == 1:

                    # also test angle
                    a1 = atan2(p2[1] - p1[1], p2[0] - p1[0])
                    a2 = atan2(q2[1] - q1[1], q2[0] - q1[0])

                    if a1 < 0:
                        a1 += pi

                    if a2 < 0:
                        a2 += pi

                    ang = abs(abs(a2 - a1) - pi / 2)

                    if ang < 0.5:
                        totry.append(IT)
                        # cv.Circle(sg, IT[0], 5, (255,255,255))

        # now check if any points in totry are consistent!
        # t=4
        res = []
        for i in range(len(totry)):

            p, p1, p2, dd = totry[i]
            a1 = atan2(p1[1] - p[1], p1[0] - p[0])
            a2 = atan2(p2[1] - p[1], p2[0] - p[0])

            if a1 < 0:
                a1 += pi

            if a2 < 0:
                a2 += pi

            dd = 1.7 * dd
            evidence = 0

            # cv.Line(sg,p,p2,(0,255,0))
            # cv.Line(sg,p,p1,(0,255,0))

            # affine transform to local coords
            A = matrix([[p2[0] - p[0], p1[0] - p[0], p[0]],
                        [p2[1] - p[1], p1[1] - p[1], p[1]], [0, 0, 1]])
            Ainv = A.I

            v = matrix([[p1[0]], [p1[1]], [1]])

            # check likelihood of this coordinate system. iterate all lines
            # and see how many align with grid
            for j in range(len(self.li)):

                # test angle consistency with either one of the two angles
                a = angs[j]
                ang1 = abs(abs(a - a1) - pi / 2)
                ang2 = abs(abs(a - a2) - pi / 2)

                if ang1 > 0.1 and ang2 > 0.1:
                    continue

                # test position consistency.
                q1, q2 = self.li[j]
                qwe = 0.06

                # test one endpoint
                v = matrix([[q1[0]], [q1[1]], [1]])
                vp = Ainv * v

                # project it
                if vp[0, 0] > 1.1 or vp[0, 0] < -0.1:
                    continue

                if vp[1, 0] > 1.1 or vp[1, 0] < -0.1:
                    continue

                if abs(vp[0, 0] - 1 / 3.0) > qwe and abs(vp[0, 0] - 2 / 3.0) > qwe and \
                        abs(vp[1, 0] - 1 / 3.0) > qwe and abs(vp[1, 0] - 2 / 3.0) > qwe:
                    continue

                # the other end point
                v = matrix([[q2[0]], [q2[1]], [1]])
                vp = Ainv * v

                if vp[0, 0] > 1.1 or vp[0, 0] < -0.1:
                    continue

                if vp[1, 0] > 1.1 or vp[1, 0] < -0.1:
                    continue

                if abs(vp[0, 0] - 1 / 3.0) > qwe and abs(vp[0, 0] - 2 / 3.0) > qwe and \
                        abs(vp[1, 0] - 1 / 3.0) > qwe and abs(vp[1, 0] - 2 / 3.0) > qwe:
                    continue

                # cv.Circle(sg, q1, 3, (255,255,0))
                # cv.Circle(sg, q2, 3, (255,255,0))
                # cv.Line(sg,q1,q2,(0,255,255))
                evidence += 1

            res.append((evidence, (p, p1, p2)))

        minch = 10000
        res.sort(reverse=True)
        # log.info("dects %s, res:\n%s" % (self.dects, pformat(res)))

        if len(res) > 0:
            minps = []
            pt = []

            # among good observations find best one that fits with last one
            for i in range(len(res)):

                if res[i][0] > 0.05 * self.dects:
                    # OK WE HAVE GRID
                    p, p1, p2 = res[i][1]
                    p3 = (p2[0] + p1[0] - p[0], p2[1] + p1[1] - p[1])

                    # cv.Line(sg,p,p1,(0,255,0),2)
                    # cv.Line(sg,p,p2,(0,255,0),2)
                    # cv.Line(sg,p2,p3,(0,255,0),2)
                    # cv.Line(sg,p3,p1,(0,255,0),2)
                    # cen=(0.5*p2[0]+0.5*p1[0],0.5*p2[1]+0.5*p1[1])
                    # cv.Circle(sg, cen, 20, (0,0,255),5)
                    # cv.Line(sg, (0,cen[1]), (320,cen[1]),(0,255,0),2)
                    # cv.Line(sg, (cen[0],0), (cen[0],240), (0,255,0),2)

                    w = [p, p1, p2, p3]
                    p3 = (self.prevface[2][0] + self.prevface[1][0] -
                          self.prevface[0][0], self.prevface[2][1] +
                          self.prevface[1][1] - self.prevface[0][1])
                    tc = (self.prevface[0], self.prevface[1], self.prevface[2],
                          p3)
                    ch = compfaces(w, tc)

                    # log.info("ch %s, minch %s" % (ch, minch))
                    if ch < minch:
                        minch = ch
                        minps = (p, p1, p2)

            # log.info("minch %d, minps:\n%s" % (minch, pformat(minps)))

            if len(minps) > 0:
                self.prevface = minps

                if minch < 10:
                    # good enough!
                    self.succ += 1
                    self.pt = self.prevface
                    self.detected = 1
                    # log.info("detected %d, succ %d" % (self.detected, self.succ))

            else:
                self.succ = 0

            # log.info("succ %d\n\n" % self.succ)

            # we matched a few times same grid
            # coincidence? I think NOT!!! Init LK tracker
            if self.succ > 2:

                # initialize features for LK
                pt = []
                for i in [1.0 / 3, 2.0 / 3]:
                    for j in [1.0 / 3, 2.0 / 3]:
                        pt.append(
                            (self.p0[0] + i * self.v1[0] + j * self.v2[0],
                             self.p0[1] + i * self.v1[1] + j * self.v2[1]))

                self.features = pt
                self.tracking = True
                self.succ = 0
                log.info("non-tracking -> tracking: succ %d" % self.succ)
        src = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)


    cv.NamedWindow("Source", 1)
    cv.NamedWindow("Hough", 1)

    while True:
        dst = cv.CreateImage(cv.GetSize(src), 8, 1)
        color_dst = cv.CreateImage(cv.GetSize(src), 8, 3)
        storage = cv.CreateMemStorage(0)
        lines = 0
        cv.Canny(src, dst, 50, 200, 3)
        cv.CvtColor(dst, color_dst, cv.CV_GRAY2BGR)

        if USE_STANDARD:
            lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_STANDARD, 1, pi / 180, 100, 0, 0)
            for (rho, theta) in lines[:100]:
                a = cos(theta)
                b = sin(theta)
                x0 = a * rho 
                y0 = b * rho
                pt1 = (cv.Round(x0 + 1000*(-b)), cv.Round(y0 + 1000*(a)))
                pt2 = (cv.Round(x0 - 1000*(-b)), cv.Round(y0 - 1000*(a)))
                cv.Line(color_dst, pt1, pt2, cv.RGB(255, 0, 0), 3, 8)
        else:
            lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_PROBABILISTIC, 1, pi / 180, 50, 50, 10)
            for line in lines:
                cv.Line(color_dst, line[0], line[1], cv.CV_RGB(255, 0, 0), 3, 8)

        cv.ShowImage("Source", src)
        cv.ShowImage("Hough", color_dst)
示例#9
0
    def process_frame(self, frame):

        # Resize image to 320x240
        #copy = cv.CreateImage(cv.GetSize(frame), 8, 3)
        #cv.Copy(frame, copy)
        #cv.SetImageROI(frame, (0, 0, 320, 240))
        #cv.Resize(copy, frame, cv.CV_INTER_NN)

        found_hedge = False

        test_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)

        cv.Copy(frame, test_frame)

        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Set binary image to have value channel
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binary = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        cv.SetImageCOI(hsv, 2)
        cv.Copy(hsv, binary)
        cv.SetImageCOI(hsv, 0)

        cv.AdaptiveThreshold(
            binary,
            binary,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY_INV,
            self.adaptive_thresh_blocksize,
            self.adaptive_thresh,
        )

        # Morphology

        kernel = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_ELLIPSE)
        #cv.Erode(binary, binary, kernel, 1)
        cv.Dilate(binary, binary, kernel, 4)

        if self.debug:
            color_filtered = cv.CloneImage(binary)

        # Get Edges
        #cv.Canny(binary, binary, 30, 40)

        # Hough Transform
        '''
        line_storage = cv.CreateMemStorage()
        raw_lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD,
            rho=1,
            theta=math.pi/180,
            threshold=self.hough_threshold,
            param1=0,
            param2=0
        )
        '''
        # Hough Transform
        line_storage = cv.CreateMemStorage()
        raw_lines = cv.HoughLines2(binary,
                                   line_storage,
                                   cv.CV_HOUGH_PROBABILISTIC,
                                   rho=1,
                                   theta=math.pi / 180,
                                   threshold=self.hough_threshold,
                                   param1=self.min_length,
                                   param2=self.max_gap)

        self.hor_lines = []

        for line in raw_lines:
            slope = line_slope(line[0], line[1])
            if slope is None:
                continue
            if math.fabs(line_slope(line[0], line[1])) < self.hor_threshold:
                self.hor_lines.append(line)

        max_length = 0

        for line in self.hor_lines:
            print line
            if math.fabs(line_distance(line[0], line[1])) > max_length:
                max_length = math.fabs(line_distance(line[0], line[1]))
                crossbar_seg = line
        '''
        # Get vertical lines
        vertical_lines = []
        for line in raw_lines:
            if line[1] < self.vertical_threshold or \
                line[1] > math.pi-self.vertical_threshold:

                vertical_lines.append( (abs(line[0]), line[1]) )

        # Group vertical lines
        vertical_line_groups = []  # A list of line groups which are each a line list
        for line in vertical_lines:
            group_found = False
            for line_group in vertical_line_groups:

                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                vertical_line_groups.append([line])

        # Average line groups into lines
        vertical_lines = []
        for line_group in vertical_line_groups:
            rhos = map(lambda line: line[0], line_group)
            angles = map(lambda line: line[1], line_group)
            line = (sum(rhos)/len(rhos), circular_average(angles, math.pi))
            vertical_lines.append(line)

        # Get horizontal lines
        horizontal_lines = []
        for line in raw_lines:
            dist_from_horizontal = (math.pi/2 + line[1]) % math.pi
            if dist_from_horizontal < self.horizontal_threshold or \
                dist_from_horizontal > math.pi-self.horizontal_threshold:

                horizontal_lines.append( (abs(line[0]), line[1]) )

        # Group horizontal lines
        horizontal_line_groups = []  # A list of line groups which are each a line list
        for line in horizontal_lines:
            group_found = False
            for line_group in horizontal_line_groups:

                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                horizontal_line_groups.append([line])

        if len(horizontal_line_groups) is 1:
            self.seen_crossbar = True
            rhos = map(lambda line: line[0], horizontal_line_groups[0])
            angles = map(lambda line: line[1], horizontal_line_groups[0])
            line = (sum(rhos)/len(rhos), circular_average(angles, math.pi))
            horizontal_lines = [line]
        else:
            self.seen_crossbar = False
            horizontal_lines = []

        self.left_pole = None
        self.right_pole = None
        if len(vertical_lines) is 2:
            roi = cv.GetImageROI(frame)
            width = roi[2]
            height = roi[3]
            self.left_pole = round(min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2
            self.right_pole = round(max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width/2
        #TODO: If one pole is seen, is it left or right pole?

        # Calculate planar distance r (assuming we are moving perpendicular to
        # the hedge)
        if self.left_pole and self.right_pole:
            theta = abs(self.left_pole - self.right_pole)
            self.r = 3 / tan(radians(theta/2))
        else:
            self.r = None

        if self.r and self.seen_crossbar:
            bar_phi = (-1*horizontal_lines[0][0] + frame.height/2) / (frame.height/2) * 32
            self.crossbar_depth = self.r * atan(radians(bar_phi))
        else:
            self.crossbar_depth = None
        '''
        self.left_pole = None
        self.right_pole = None
        self.seen_crossbar = False
        self.crossbar_depth = None

        if self.debug and max_length != 0:
            cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB)

            #libvision.misc.draw_lines(frame, vertical_lines)
            #libvision.misc.draw_lines(frame, horizontal_lines)
            # for line in raw_lines:
            #    cv.Line(frame,line[0],line[1], (255,255,0), 10, cv.CV_AA, 0)
            #    cv.Circle(frame, line[1], 15, (255,0,0), 2,8,0)
            # print len(raw_lines)
            #cv.ShowImage("Hedge", cv.CloneImage(frame))
            if (crossbar_seg[0][0] - frame.width / 2) * 37 / (
                    frame.width / 2) < (crossbar_seg[1][0] - frame.width /
                                        2) * 37 / (frame.width / 2):
                self.left_pole = round((crossbar_seg[0][0] - frame.width / 2) *
                                       37 / (frame.width / 2))
                self.right_pole = round(
                    (crossbar_seg[1][0] - frame.width / 2) * 37 /
                    (frame.width / 2))
            else:
                self.left_pole = round((crossbar_seg[1][0] - frame.width / 2) *
                                       37 / (frame.width / 2))
                self.right_pole = round(
                    (crossbar_seg[0][0] - frame.width / 2) * 37 /
                    (frame.width / 2))
            self.crossbar_depth = round(
                -1 * (crossbar_seg[1][1] - frame.height / 2) * 36 /
                (frame.height / 2))

            if math.fabs(self.left_pole) <= 37 and math.fabs(
                    self.left_pole) >= self.frame_boundary_thresh:
                self.left_pole = None
            if math.fabs(self.right_pole) <= 37 and math.fabs(
                    self.right_pole) >= self.frame_boundary_thresh:
                self.right_pole = None

            self.seen_crossbar = True

            if self.left_pole and self.right_pole:

                self.returning = (self.left_pole + self.right_pole) / 2
                print "Returning ", self.returning

                if self.last_seen < 0:
                    self.last_center = None
                    self.last_seen = 0
                if self.last_center is None:
                    self.last_center = self.returning
                    self.seen_count = 1
                elif math.fabs(self.last_center -
                               self.returning) < self.center_trans_thresh:
                    self.seen_count += 1
                    self.last_seen += 2
                else:
                    self.last_seen -= 1

                if self.seen_count < self.seen_count_thresh:
                    self.left_pole = None
                    self.right_pole = None
                else:
                    print "FOUND CENTER AND RETURNED IT"
                    self.found = True
            else:
                self.returning = 0
                if self.last_seen < 0:
                    self.last_center = None
                    self.last_seen = 0
                self.last_seen -= 1
                self.left_pole = None
                self.right_pole = None

            cv.Line(frame, crossbar_seg[0], crossbar_seg[1], (255, 255, 0), 10,
                    cv.CV_AA, 0)
            if self.left_pole and crossbar_seg[0][0] < crossbar_seg[1][0]:

                cv.Line(frame, crossbar_seg[0],
                        (crossbar_seg[0][0], crossbar_seg[0][0] - 500),
                        (255, 0, 0), 10, cv.CV_AA, 0)
            elif self.left_pole:
                cv.Line(frame, crossbar_seg[1],
                        (crossbar_seg[1][0], crossbar_seg[1][1] - 500),
                        (255, 0, 0), 10, cv.CV_AA, 0)

            if self.right_pole and crossbar_seg[0][0] > crossbar_seg[1][0]:

                cv.Line(frame, crossbar_seg[0],
                        (crossbar_seg[0][0], crossbar_seg[0][0] - 500),
                        (255, 0, 0), 10, cv.CV_AA, 0)
            elif self.right_pole:
                cv.Line(frame, crossbar_seg[1],
                        (crossbar_seg[1][0], crossbar_seg[1][1] - 500),
                        (255, 0, 0), 10, cv.CV_AA, 0)

            # populate self.output with infos
            self.output.seen_crossbar = self.seen_crossbar
            self.output.left_pole = self.left_pole
            self.output.right_pole = self.right_pole
            #self.output.r = self.r
            self.output.crossbar_depth = self.crossbar_depth

            self.return_output()
            print self
        else:
            cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB)

        svr.debug("Hedge", cv.CloneImage(frame))
        svr.debug("Hedge2", test_frame)
def findHoughLines():
    """ Uses the Hough transformation to find lines from the sensor
        readings and displays them
    """
    global D

    # initialize lists
    D.lines = []
    D.theta = []
    D.distance = []
    D.midpoint = []

    # sensitivity options for Hough transformation
    threshold = 20
    min_line_len = 10
    max_gap_len = 30
    line_width = 1

    # source for Hough transformation has dots instead of lines
    src = D.hough

    # prepare image and destination for Hough transformation
    dst = cv.CreateImage(cv.GetSize(src), 8, 1)
    D.color_dst = cv.CreateImage(cv.GetSize(src), 8, 3)
    storage = cv.CreateMemStorage(0)
    lines = 0
    cv.Canny(src, dst, 50, 200, 3)
    cv.CvtColor(dst, D.color_dst, cv.CV_GRAY2BGR)

    # apply Hough transformation to find walls
    # For more information, see:
    # http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/hough_lines/hough_lines.html
    lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_PROBABILISTIC, line_width, \
                           pi / 180, threshold, min_line_len, max_gap_len)

    # draw the danger zone
    cv.Rectangle(D.image, (CENTER - 30, CENTER - 90),
                 (CENTER + 30, CENTER - 30), cv.RGB(25, 25, 112), 2, 8)

    for line in lines:
        cv.Line(D.color_dst, line[0], line[1], cv.CV_RGB(0, 255, 0), 1, 8)

        # storing the lines and their distances
        D.lines.append((line[0], line[1]))
        x1 = float(line[0][0])
        y1 = float(line[0][1])
        x2 = float(line[1][0])
        y2 = float(line[1][1])
        x3 = float(CENTER)
        y3 = float(CENTER)

        # find the midpoint, the angle, and the distance to center
        midpoint = (int((x1 + x2) / 2), int((y1 + y2) / 2))
        theta = atan2((y2 - y1), (x2 - x1)) / pi * 180

        if (x2 - x1) != 0:
            slope = (y2 - y1) / (x2 - x1)
            intercept = (x2 * y1 - x1 * y2) / (x2 - x1)
            distance = abs(y3 - slope * x3 - intercept) / sqrt(slope**2 + 1)
        else:
            distance = abs(x2 - x3)

        cv.Line(D.image, line[0], line[1], cv.CV_RGB(0, 255, 0), 1, 8)
        cv.Line(D.image, midpoint, midpoint, cv.RGB(255, 255, 255), 4, 8)

        # add data to the list
        D.theta.append(theta)
        D.distance.append(distance)
        D.midpoint.append(midpoint)
示例#11
0
    def process_frame(self, frame):
        found_path = False
        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # use RGB color finder
        binary = libvision.cmodules.target_color_rgb.find_target_color_rgb(frame, 250, 125, 0, 1500, 500, .3)

        if self.debug:
            color_filtered = cv.CloneImage(binary)

        # Get Edges
        cv.Canny(binary, binary, 30, 40)

        # Hough Transform
        line_storage = cv.CreateMemStorage()
        lines = cv.HoughLines2(binary, line_storage, cv.CV_HOUGH_STANDARD,
                               rho=1,
                               theta=math.pi / 180,
                               threshold=self.hough_threshold,
                               param1=0,
                               param2=0
                               )
        lines = lines[:self.lines_to_consider]  # Limit number of lines

        # If there are at least 2 lines and they are close to parallel...
        # There's a path!
        if len(lines) >= 2:

            # Find: min, max, average
            theta_max = lines[0][1]
            theta_min = lines[0][1]
            total_theta = 0
            for rho, theta in lines:
                total_theta += theta
                if theta_max < theta:
                    theta_max = theta
                if theta_min > theta:
                    theta_min = theta

            theta_range = theta_max - theta_min
            # Near vertical angles will wrap around from pi to 0.  If the range
            # crosses this vertical line, the range will be way too large.  To
            # correct for this, we always take the smallest angle between the
            # min and max.
            if theta_range > math.pi / 2:
                theta_range = math.pi - theta_range

            if theta_range < self.theta_threshold:
                found_path = True

                angles = map(lambda line: line[1], lines)
                self.theta = circular_average(angles, math.pi)

        if found_path:
            self.seen_in_a_row += 1
        else:
            self.seen_in_a_row = 0

        # stores whether or not we are confident about the path's presence
        object_present = False

        if self.seen_in_a_row >= self.seen_in_a_row_threshold:
            object_present = True
            self.image_coordinate_center = self.find_centroid(binary)
            # Move the origin to the center of the image
            self.center = (
                self.image_coordinate_center[0] - frame.width / 2,
                self.image_coordinate_center[1] * -1 + frame.height / 2
            )

        if self.debug:

            # Show color filtered
            color_filtered_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(color_filtered, color_filtered_rgb, cv.CV_GRAY2RGB)
            cv.SubS(color_filtered_rgb, (255, 0, 0), color_filtered_rgb)
            cv.Sub(frame, color_filtered_rgb, frame)

            # Show edges
            binary_rgb = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(binary, binary_rgb, cv.CV_GRAY2RGB)
            cv.Add(frame, binary_rgb, frame)  # Add white to edge pixels
            cv.SubS(binary_rgb, (0, 0, 255), binary_rgb)
            cv.Sub(frame, binary_rgb, frame)  # Remove all but Red

            # Show lines
            if self.seen_in_a_row >= self.seen_in_a_row_threshold:
                rounded_center = (
                    int(round(self.image_coordinate_center[0])),
                    int(round(self.image_coordinate_center[1])),
                )
                cv.Circle(frame, rounded_center, 5, (0, 255, 0))
                libvision.misc.draw_lines(frame, [(frame.width / 2, self.theta)])
            else:
                libvision.misc.draw_lines(frame, lines)

            #cv.ShowImage("Path", frame)
            svr.debug("Path", frame)

        # populate self.output with infos
        self.output.found = object_present
        self.output.theta = self.theta

        if self.center:
            # scale center coordinates of path based on frame size
            self.output.x = self.center[0] / (frame.width / 2)
            self.output.y = self.center[1] / (frame.height / 2)
            libvision.misc.draw_linesC(frame, [(frame.width / 2, self.output.theta)],[255,0,255])
	    print "Output Returned!!! ", self.output.theta 
        else:
            self.output.x = None
            self.output.y = None
	    print "No output..."

        if self.output.found and self.center:
            print self.output

        self.return_output()
示例#12
0
    def process_frame(self, frame):
        self.numpy_frame = libvision.cv_to_cv2(frame)
        self.debug_frame = self.numpy_frame.copy()
        self.test_frame = self.numpy_frame.copy()

        self.numpy_frame = cv2.medianBlur(self.numpy_frame, 7)
        self.numpy_frame = cv2.cvtColor(self.numpy_frame, cv2.COLOR_BGR2HSV)

        (rf1, rf2, rf3) = cv2.split(self.numpy_frame)

        Rbinary = rf3
        Gbinary = rf1

        # Adaptive Threshold
        Rbinary = cv2.adaptiveThreshold(Rbinary, 255,
                                        cv2.ADAPTIVE_THRESH_MEAN_C,
                                        cv2.THRESH_BINARY_INV,
                                        self.adaptive_thresh_blocksize,
                                        self.adaptive_thresh)

        Gbinary = cv2.adaptiveThreshold(Gbinary, 255,
                                        cv2.ADAPTIVE_THRESH_MEAN_C,
                                        cv2.THRESH_BINARY_INV,
                                        self.Gadaptive_thresh_blocksize,
                                        self.Gadaptive_thresh)

        # Morphology
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

        Rbinary = cv2.erode(Rbinary, kernel)
        Rbinary = cv2.dilate(Rbinary, kernel)
        Gbinary = cv2.erode(Gbinary, kernel)
        Gbinary = cv2.dilate(Gbinary, kernel)

        Rframe = cv2.cvtColor(Rbinary, cv2.COLOR_GRAY2RGB)
        Gframe = cv2.cvtColor(Gbinary, cv2.COLOR_GRAY2RGB)

        # Hough Transform
        raw_linesG = cv2.HoughLines(Gbinary,
                                    rho=1,
                                    theta=math.pi / 180,
                                    threshold=self.hough_thresholdG)

        # Get vertical lines
        vertical_linesG = []
        for line in raw_linesG[0]:
            rho = line[0]
            theta = line[1]
            if theta < self.vertical_thresholdG or \
                    theta > math.pi - self.vertical_thresholdG:

                vertical_linesG.append((abs(rho), theta))

        # Group vertical lines
        vertical_line_groupsG = [
        ]  # A list of line groups which are each a line list
        for line in vertical_linesG:
            group_found = False
            for line_group in vertical_line_groupsG:

                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                vertical_line_groupsG.append([line])

        # Average line groups into lines
        vertical_linesG = []
        for line_group in vertical_line_groupsG:
            rhos = map(lambda line: line[0], line_group)
            angles = map(lambda line: line[1], line_group)
            line = (sum(rhos) / len(rhos), circular_average(angles, math.pi))
            vertical_linesG.append(line)

        # Get horizontal lines
        horizontal_lines = []
        for line in raw_linesG[0]:
            rho = line[0]
            theta = line[1]
            dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi
            if dist_from_horizontal < self.horizontal_threshold or \
                    dist_from_horizontal > math.pi - self.horizontal_threshold:

                horizontal_lines.append((abs(line[0]), line[1]))

        # Group horizontal lines
        horizontal_line_groups = [
        ]  # A list of line groups which are each a line list
        print "Horizontal lines: ",
        for line in horizontal_lines:
            group_found = False
            for line_group in horizontal_line_groups:

                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                horizontal_line_groups.append([line])

        if len(horizontal_line_groups) is 1:
            self.seen_crossbar = True
            rhos = map(lambda line: line[0], horizontal_line_groups[0])
            angles = map(lambda line: line[1], horizontal_line_groups[0])
            line = (sum(rhos) / len(rhos), circular_average(angles, math.pi))
            horizontal_lines = [line]
        else:
            self.seen_crossbar = False
            horizontal_lines = []

        self.left_pole = None
        self.right_pole = None

        Rframe = libvision.cv2_to_cv(Rframe)
        Gframe = libvision.cv2_to_cv(self.debug_frame)
        Rbinary = libvision.cv2_to_cv(Rbinary)
        self.debug_frame = libvision.cv2_to_cv(self.debug_frame)
        self.test_frame = libvision.cv2_to_cv(self.test_frame)
        Gbinary = libvision.cv2_to_cv(Gbinary)

        if len(vertical_linesG) is 2:
            roi = cv.GetImageROI(frame)
            width = roi[2]
            height = roi[3]
            self.left_pole = round(
                min(vertical_linesG[0][0], vertical_linesG[1][0]),
                2) - width / 2
            self.right_pole = round(
                max(vertical_linesG[0][0], vertical_linesG[1][0]),
                2) - width / 2
        # TODO: If one pole is seen, is it left or right pole?

        # Calculate planar distance r (assuming we are moving perpendicular to
        # the hedge)
        if self.left_pole and self.right_pole:
            theta = abs(self.left_pole - self.right_pole)
            self.r = 3 / math.tan(math.radians(theta / 2))
        else:
            self.r = None

        if self.r and self.seen_crossbar:
            bar_phi = (-1 * horizontal_lines[0][0] +
                       Gframe.height / 2) / (Gframe.height / 2) * 32
            self.crossbar_depth = self.r * math.atan(math.radians(bar_phi))
        else:
            self.crossbar_depth = None

        # Line Finding on Red pvc
        # Hough Transform
        line_storage = cv.CreateMemStorage()
        raw_linesR = cv.HoughLines2(Rbinary,
                                    line_storage,
                                    cv.CV_HOUGH_STANDARD,
                                    rho=1,
                                    theta=math.pi / 180,
                                    threshold=self.hough_thresholdR,
                                    param1=0,
                                    param2=0)

        # Get vertical lines
        vertical_linesR = []
        for line in raw_linesR:
            if line[1] < self.vertical_thresholdR or \
               line[1] > math.pi - self.vertical_thresholdR:

                vertical_linesR.append((abs(line[0]), line[1]))

        # Group vertical lines
        vertical_line_groupsR = [
        ]  # A list of line groups which are each a line list
        for line in vertical_linesR:
            group_found = False
            for line_group in vertical_line_groupsR:

                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                vertical_line_groupsR.append([line])

        # Average line groups into lines
        vertical_linesR = []
        for line_group in vertical_line_groupsR:
            rhos = map(lambda line: line[0], line_group)
            angles = map(lambda line: line[1], line_group)
            line = (sum(rhos) / len(rhos), circular_average(angles, math.pi))
            vertical_linesR.append(line)
        '''
        for red_line in vertical_linesR:
            print "Red Line:", red_line[0],", ",red_line[1]
        for green_line in vertical_linesG:
            print "Green Line:", green_line[0],", ",green_line[1]
        '''
        for red_line in vertical_linesR:
            for green_line in vertical_linesG[:]:
                if math.fabs(green_line[0] - red_line[0]) < self.GR_Threshold0 and \
                   math.fabs(green_line[1] - red_line[1]) < self.GR_Threshold1:
                    vertical_linesG.remove(green_line)

        for red_line in vertical_linesR:
            print "New Red Line:", red_line[0], ", ", red_line[1]
        for green_line in vertical_linesG:
            print "New Green Line:", green_line[0], ", ", green_line[1]

        if len(vertical_linesR) is 0:
            print "No Red Found"

        self.left_pole = None
        self.right_pole = None
        if len(vertical_linesR) is 2:
            roi = cv.GetImageROI(frame)
            width = roi[2]
            height = roi[3]
            self.left_pole = round(
                min(vertical_linesR[0][0], vertical_linesR[1][0]),
                2) - width / 2
            self.right_pole = round(
                max(vertical_linesR[0][0], vertical_linesR[1][0]),
                2) - width / 2
        # TODO: If one pole is seen, is it left or right pole?

        # Calculate planar distance r (assuming we are moving perpendicular to
        # the hedge)
        if self.left_pole and self.right_pole:
            theta = abs(self.left_pole - self.right_pole)
            self.r = 3 / math.tan(math.radians(theta / 2))
        else:
            self.r = None

        for i in range(len(vertical_linesR[:])):
            if vertical_linesR[i][1] > math.pi / 2:
                vertical_linesR[i] = (vertical_linesR[i][0],
                                      -(math.pi - vertical_linesR[i][1]))
                print "Line changed to ", vertical_linesR[i]
        for line in vertical_linesR:
            print line
            if line[1] > math.pi / 2:
                line = (line[0], math.pi - line[1])
                print "Line changed to ", line

        libvision.misc.draw_lines(Gframe, vertical_linesG)
        libvision.misc.draw_lines(Gframe, horizontal_lines)
        libvision.misc.draw_lines(Rframe, vertical_linesR)

        # there was a merge error, these 3 lines conflicted b/c your copy out of date

        for line in vertical_linesR:
            roi = cv.GetImageROI(frame)
            width = roi[2]
            height = roi[3]
            x = line[0] * math.cos(line[1])
            y = line[0] * math.sin(line[1])
            cv.Circle(Rframe, (int(x), int(y)), 5, (0, 255, 0), -1, 8, 0)
            if x > width or y > width or x < 0 or y < 0:
                print "Lost point  ", x

        svr.debug("Original", self.test_frame)
        svr.debug("Red", Rframe)
        svr.debug("Green", Gframe)
示例#13
0
    def process_frame(self, frame):
        (w, h) = cv.GetSize(frame)

        #generate hue selection frames

        #create locations for the a pair of test frames
        frametest = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binarytest = cv.CreateImage(cv.GetSize(frame), 8, 1)

        #use the red channel for the binary frame (just for debugging purposes)
        cv.Copy(frame, frametest)
        cv.SetImageCOI(frametest, 3)
        cv.Copy(frametest, binarytest)
        cv.SetImageCOI(frametest, 0)  #reset COI
        #svr.debug("R?",binarytest)

        # Resize image to 320x240
        #copy = cv.CreateImage(cv.GetSize(frame), 8, 3)
        #cv.Copy(frame, copy)
        #cv.SetImageROI(frame, (0, 0, 320, 240))
        #cv.Resize(copy, frame, cv.CV_INTER_NN)

        found_gate = False

        #create a new frame just for comparison purposes
        unchanged_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.Copy(frame, unchanged_frame)

        #apply a course noise filter
        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Set binary image to have saturation channel
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binary = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        cv.SetImageCOI(hsv, 1)
        cv.Copy(hsv, binary)
        cv.SetImageCOI(hsv, 0)  #reset COI

        #shift hue of image such that orange->red are at top of spectrum
        binary = libvision.misc.cv_to_cv2(binary)
        binary = libvision.misc.shift_hueCV2(binary, self.target_shift)
        binary = libvision.misc.cv2_to_cv(binary)

        #correct for wraparound on red spectrum
        #cv.InRange(binary,a_array,b_array,binarytest) #generate mask
        #cv.Add(binary,cv.fromarray(ones*180),binary,mask=binarytest) #use mask to selectively add values
        #svr.debug("R2?",binary)
        svr.debug("R2?", binary)

        #run adaptive threshold for edge detection and more noise filtering
        cv.AdaptiveThreshold(
            binary,
            binary,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY_INV,
            self.adaptive_thresh_blocksize,
            self.adaptive_thresh,
        )

        # Morphology
        kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(binary, binary, kernel, 1)
        cv.Dilate(binary, binary, kernel, 1)
        if self.debug:
            color_filtered = cv.CloneImage(binary)

        # Get Edges
        cv.Canny(binary, binary, 30, 40)

        # Hough Transform
        line_storage = cv.CreateMemStorage()
        raw_lines = cv.HoughLines2(binary,
                                   line_storage,
                                   cv.CV_HOUGH_STANDARD,
                                   rho=1,
                                   theta=math.pi / 180,
                                   threshold=self.hough_threshold,
                                   param1=0,
                                   param2=0)

        # Get vertical lines
        vertical_lines = []
        for line in raw_lines:
            if line[1] < self.vertical_threshold or \
               line[1] > math.pi-self.vertical_threshold:

                #absolute value does better grouping currently
                vertical_lines.append((abs(line[0]), line[1]))

        #print message to user for performance purposes
        logging.debug("{} possibilities reduced to {} lines".format(
            len(raw_lines), len(vertical_lines)))

        # Group vertical lines
        vertical_line_groups = [
        ]  # A list of line groups which are each a line list
        i = 0
        for line in vertical_lines:
            group_found = False
            for line_group in vertical_line_groups:
                i += 1
                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                vertical_line_groups.append([line])

        #quick debugging statement
        logging.debug("{} internal iterations for {} groups".format(
            i, len(vertical_line_groups)))

        # Average line groups into lines
        vertical_lines = []
        for line_group in vertical_line_groups:
            rhos = map(lambda line: line[0], line_group)
            angles = map(lambda line: line[1], line_group)
            line = (sum(rhos) / len(rhos), circular_average(angles, math.pi))
            vertical_lines.append(line)

        ####################################################
        #vvvv Horizontal line code isn't used for anything

        # Get horizontal lines
        horizontal_lines = []
        for line in raw_lines:
            dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi
            if dist_from_horizontal < self.horizontal_threshold or \
               dist_from_horizontal > math.pi-self.horizontal_threshold:

                horizontal_lines.append((abs(line[0]), line[1]))

        # Group horizontal lines
        horizontal_line_groups = [
        ]  # A list of line groups which are each a line list
        for line in horizontal_lines:
            group_found = False
            for line_group in horizontal_line_groups:

                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                horizontal_line_groups.append([line])

        if len(horizontal_line_groups) is 1:
            self.seen_crossbar = True
            if self.debug:
                rhos = map(lambda line: line[0], horizontal_line_groups[0])
                angles = map(lambda line: line[1], horizontal_line_groups[0])
                line = (sum(rhos) / len(rhos),
                        circular_average(angles, math.pi))
                horizontal_lines = [line]
        else:
            self.seen_crossbar = False
            horizontal_lines = []

        #^^^ Horizontal line code isn't used for anything
        ###################################################

        self.left_pole = None
        self.right_pole = None
        #print vertical_lines
        self.returning = 0
        self.found = False

        if len(vertical_lines) is 2:
            roi = cv.GetImageROI(frame)
            width = roi[2]
            height = roi[3]
            self.left_pole = round(
                min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2
            self.right_pole = round(
                max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2

            self.returning = (self.left_pole + self.right_pole) / 2
            logging.info("Returning {} as gate center delta.".format(
                self.returning))

            #initialize first iteration with 2 known poles
            if self.last_seen < 0:
                self.last_center = None
                self.last_seen = 0

            #increment a counter if result is good.
            if self.last_center is None:
                self.last_center = self.returning
                self.seen_count = 1
            elif math.fabs(self.last_center -
                           self.returning) < self.center_trans_thresh:
                self.seen_count += 1
                self.last_seen += 2
            else:
                self.last_seen -= 1

            #if not conviced, forget left/right pole. Else proclaim success.
            if self.seen_count < self.seen_count_thresh:
                self.left_pole = None
                self.right_pole = None
            else:
                print "FOUND CENTER AND RETURNED IT"
                self.found = True
        else:
            self.returning = 0
            if self.last_seen < 0:
                self.last_center = None
                self.last_seen = 0
            self.last_seen -= 1
            self.left_pole = None
            self.right_pole = None

        #TODO: If one pole is seen, is it left or right pole?

        if self.debug:
            cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB)
            libvision.misc.draw_lines(frame, vertical_lines)
            libvision.misc.draw_lines(frame, horizontal_lines)

            if self.found:
                cv.Circle(frame, (int(frame.width / 2 + self.returning),
                                  int(frame.height / 2)), 15, (0, 255, 0), 2,
                          8, 0)
                font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 1, 3)
                cv.PutText(frame, "Gate Sent to Mission Control", (100, 400),
                           font, (255, 255, 0))
                #print frame.width

            #cv.ShowImage("Gate", cv.CloneImage(frame))
            svr.debug("Gate", cv.CloneImage(frame))
            svr.debug("Unchanged", cv.CloneImage(unchanged_frame))

        #populate self.output with infos
        self.output.seen_crossbar = self.seen_crossbar
        self.output.left_pole = self.left_pole
        self.output.right_pole = self.right_pole

        self.return_output()
示例#14
0
    #.threshold
    cv.CreateTrackbar(trackbar_threshold_name, window_threshold, 0, 255,
                      threshold_trackbar)
    threshold_trackbar(100)

    #.edge detect
    cv.CreateTrackbar(trackbar_edge_name, window_edge, 0, 255, edge_trackbar)
    edge_trackbar(100)

    cv.Threshold(src, dst, 200, 255, cv2.THRESH_BINARY)
    cv.ShowImage(window_output, dst)
    color_dst = cv.CreateImage(cv.GetSize(src), 8, 3)
    storage = cv.CreateMemStorage(0)
    cv.CvtColor(dst, color_dst, cv.CV_GRAY2BGR)
    #lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_STANDARD, 1, math.pi / 180, 100, 0, 0)
    lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_STANDARD, 0.5,
                           math.pi / 180, 50, 50, 10)
    #lines=[]

    for (rho, theta) in lines[:100]:
        a = math.cos(theta)
        b = math.sin(theta)
        x0 = a * rho
        y0 = b * rho
        pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a)))
        pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a)))
        cv.Line(color_dst, pt1, pt2, cv.RGB(255, 0, 0), 3, 8)

    cv.ShowImage('image', color_dst)

    if cv2.waitKey(0) & 0xFF == 27:
        cv2.destroyAllWindows()
示例#15
0
    def process_frame(self, frame):
        frametest = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binarytest = cv.CreateImage(cv.GetSize(frame), 8, 1)

        cv.Copy(frame, frametest)
        cv.SetImageCOI(frametest, 3)
        cv.Copy(frametest, binarytest)
        cv.SetImageCOI(frametest, 0)
        svr.debug("R?", binarytest)

        # Resize image to 320x240
        #copy = cv.CreateImage(cv.GetSize(frame), 8, 3)
        #cv.Copy(frame, copy)
        #cv.SetImageROI(frame, (0, 0, 320, 240))
        #cv.Resize(copy, frame, cv.CV_INTER_NN)

        found_gate = False

        unchanged_frame = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.Copy(frame, unchanged_frame)

        cv.Smooth(frame, frame, cv.CV_MEDIAN, 7, 7)

        # Set binary image to have saturation channel
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        binary = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        cv.SetImageCOI(hsv, 1)
        cv.Copy(hsv, binary)
        cv.SetImageCOI(hsv, 0)

        cv.AdaptiveThreshold(
            binary,
            binary,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY_INV,
            self.adaptive_thresh_blocksize,
            self.adaptive_thresh,
        )

        # Morphology
        kernel = cv.CreateStructuringElementEx(5, 5, 3, 3, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(binary, binary, kernel, 1)
        cv.Dilate(binary, binary, kernel, 1)
        if self.debug:
            color_filtered = cv.CloneImage(binary)

        # Get Edges
        cv.Canny(binary, binary, 30, 40)

        # Hough Transform
        line_storage = cv.CreateMemStorage()
        raw_lines = cv.HoughLines2(binary,
                                   line_storage,
                                   cv.CV_HOUGH_STANDARD,
                                   rho=1,
                                   theta=math.pi / 180,
                                   threshold=self.hough_threshold,
                                   param1=0,
                                   param2=0)

        # Get vertical lines
        vertical_lines = []
        for line in raw_lines:
            if line[1] < self.vertical_threshold or \
               line[1] > math.pi-self.vertical_threshold:

                #absolute value does better grouping currently
                vertical_lines.append((abs(line[0]), line[1]))

        # Group vertical lines
        vertical_line_groups = [
        ]  # A list of line groups which are each a line list
        for line in vertical_lines:
            group_found = False
            for line_group in vertical_line_groups:

                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                vertical_line_groups.append([line])

        # Average line groups into lines
        vertical_lines = []
        for line_group in vertical_line_groups:
            rhos = map(lambda line: line[0], line_group)
            angles = map(lambda line: line[1], line_group)
            line = (sum(rhos) / len(rhos), circular_average(angles, math.pi))
            vertical_lines.append(line)

        # Get horizontal lines
        horizontal_lines = []
        for line in raw_lines:
            dist_from_horizontal = (math.pi / 2 + line[1]) % math.pi
            if dist_from_horizontal < self.horizontal_threshold or \
               dist_from_horizontal > math.pi-self.horizontal_threshold:

                horizontal_lines.append((abs(line[0]), line[1]))

        # Group horizontal lines
        horizontal_line_groups = [
        ]  # A list of line groups which are each a line list
        for line in horizontal_lines:
            group_found = False
            for line_group in horizontal_line_groups:

                if line_group_accept_test(line_group, line, self.max_range):
                    line_group.append(line)
                    group_found = True

            if not group_found:
                horizontal_line_groups.append([line])

        if len(horizontal_line_groups) is 1:
            self.seen_crossbar = True
            if self.debug:
                rhos = map(lambda line: line[0], horizontal_line_groups[0])
                angles = map(lambda line: line[1], horizontal_line_groups[0])
                line = (sum(rhos) / len(rhos),
                        circular_average(angles, math.pi))
                horizontal_lines = [line]
        else:
            self.seen_crossbar = False
            horizontal_lines = []

        self.left_pole = None
        self.right_pole = None
        print vertical_lines
        self.returning = 0
        self.found = False
        if len(vertical_lines) is 2:
            roi = cv.GetImageROI(frame)
            width = roi[2]
            height = roi[3]
            self.left_pole = round(
                min(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2
            self.right_pole = round(
                max(vertical_lines[0][0], vertical_lines[1][0]), 2) - width / 2

            self.returning = (self.left_pole + self.right_pole) / 2
            print "Returning ", self.returning

            if self.last_seen < 0:
                self.last_center = None
                self.last_seen = 0
            if self.last_center is None:
                self.last_center = self.returning
                self.seen_count = 1
            elif math.fabs(self.last_center -
                           self.returning) < self.center_trans_thresh:
                self.seen_count += 1
                self.last_seen += 2
            else:
                self.last_seen -= 1

            if self.seen_count < self.seen_count_thresh:
                self.left_pole = None
                self.right_pole = None
            else:
                print "FOUND CENTER AND RETURNED IT"
                self.found = True
        else:
            self.returning = 0
            if self.last_seen < 0:
                self.last_center = None
                self.last_seen = 0
            self.last_seen -= 1
            self.left_pole = None
            self.right_pole = None

        #TODO: If one pole is seen, is it left or right pole?

        if self.debug:
            cv.CvtColor(color_filtered, frame, cv.CV_GRAY2RGB)
            libvision.misc.draw_lines(frame, vertical_lines)
            libvision.misc.draw_lines(frame, horizontal_lines)

            if self.found:
                cv.Circle(frame, (int(frame.width / 2 + self.returning),
                                  int(frame.height / 2)), 15, (0, 255, 0), 2,
                          8, 0)
                font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 1, 3)
                cv.PutText(frame, "Gate Sent to Mission Control", (100, 400),
                           font, (255, 255, 0))
                print frame.width

            #cv.ShowImage("Gate", cv.CloneImage(frame))
            svr.debug("Gate", cv.CloneImage(frame))
            svr.debug("Unchanged", cv.CloneImage(unchanged_frame))

        #populate self.output with infos
        self.output.seen_crossbar = self.seen_crossbar
        self.output.left_pole = self.left_pole
        self.output.right_pole = self.right_pole

        self.return_output()
        print self