Exemple #1
0
 def __init__(self, source, type):
     self.type = type
     try:
         self.video = cv2.VideoCapture(source)
         self.rval = True
     except cv2.error:
         self.rval = False
         if type == Mode.HIGH_GOAL:
             comms.set_high_goal_state(State.CAMERA_ERROR)
         else:
             comms.set_gear_state(State.CAMERA_ERROR)
     self.source = source
     self.live = True if isinstance(source, int) else False
     self.__configure()
Exemple #2
0
    def __configure(self):
        if self.rval:
            self.video.set(cv.CV_CAP_PROP_FRAME_WIDTH, config.RESOLUTION_X)
            self.video.set(cv.CV_CAP_PROP_FRAME_HEIGHT, config.RESOLUTION_Y)
            log.info("Set resolution to %sx%s", config.RESOLUTION_X,
                     config.RESOLUTION_Y)
            # find out if the camera is actually working
            if self.video.isOpened():
                self.rval, frame = self.video.read()

                # run some configuration if everything is good
                if self.rval:
                    log.info("Read from capture successfully")
                    # run config using v4l2 driver if the platform is linux and the feed is live
                    if platform.system() == "Linux" and self.live:
                        try:
                            log.info("Running Linux config using v4l2ctl")
                            v4l2ctl.restore_defaults(self.source)
                            if self.type == Mode.HIGH_GOAL:
                                for prop in config.CAMERA_SETTINGS_HIGH_GOAL:
                                    v4l2ctl.set(
                                        self.source, prop,
                                        config.CAMERA_SETTINGS_HIGH_GOAL[prop])
                            else:
                                for prop in config.CAMERA_SETTINGS_GEARS:
                                    v4l2ctl.set(
                                        self.source, prop,
                                        config.CAMERA_SETTINGS_GEARS[prop])
                        except AttributeError as e:
                            log.error('Setting camera properties failed!')
                            if type == Mode.HIGH_GOAL:
                                comms.set_high_goal_state(State.CAMERA_ERROR)
                            else:
                                comms.set_gear_state(State.CAMERA_ERROR)
                            print(e)
                else:
                    self.rval = False
                    log.critical("Problem reading from capture")
                    if type == Mode.HIGH_GOAL:
                        comms.set_high_goal_state(State.CAMERA_ERROR)
                    else:
                        comms.set_gear_state(State.CAMERA_ERROR)

            else:
                self.rval = False
                log.critical("Problem opening capture")

                if type == Mode.HIGH_GOAL:
                    comms.set_high_goal_state(State.CAMERA_ERROR)
                else:
                    comms.set_gear_state(State.CAMERA_ERROR)
Exemple #3
0
def gear_targeting(hsv):
    # threshold
    mask = cv2.inRange(hsv, gear_thresh_low, gear_thresh_high)

    # remove noise
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
    res = mask.copy()

    # get a list of continuous lines in the image
    contours, _ = cv2.findContours(mask, 1, 2)

    # identify the target in the image
    target = None
    if len(contours) > 0:
        # find the polygon with the largest area
        # find the two biggest contours
        areas = []
        for c in contours:
            area = cv2.contourArea(c)
            if area > min_gears_area and area < max_gears_area:
                areas.append((area, c))
        areas.sort(key=lambda x: -x[0])

        if len(areas) == 2:
            target = cv2.convexHull(np.concatenate((areas[0][1], areas[1][1])))

        # if one of the sides is cut in half
        elif len(areas) > 2:
            half_area = areas[0][0] / 2
            upper = half_area * (1 + rel_split_eps)
            lower = half_area * (1 - rel_split_eps)
            area1_within_bounds = areas[1][0] < upper and areas[1][0] > lower
            area2_within_bounds = areas[2][0] < upper and areas[2][0] > lower
            if area1_within_bounds and area2_within_bounds:
                target = cv2.convexHull(np.concatenate((areas[0][1], areas[1][1], areas[2][1])))

        if target is not None:
            e = poly_eps * cv2.arcLength(target, True)
            target = cv2.approxPolyDP(target, e, True)
            correct_number_of_sides = len(target) == len(gears_objp)
            target_within_bounds = True
            for p in target:
                # note, array is double wrapped, that's why accessing x and y values here is weird
                if p[0][0] > res_x - 3 or p[0][0] <= 1 or p[0][1] > res_y - 3 or p[0][1] <= 1:
                    target_within_bounds = False
                    break

            if not correct_number_of_sides or not target_within_bounds:
                target = None

    if target is not None:
        # set state
        comms.set_gear_state(State.TARGET_FOUND)

        # fix origin
        index = None
        best_cost = 1000000
        for i, p in enumerate(target):
            cost = (p[0][0] + p[0][1])
            if cost < best_cost:
                best_cost = cost
                index = i

        target = np.append(target, target[:index], 0)[index:]

        #Calculate the rvecs and tvecs
        rvecs, tvecs = estimate_pose(target)

        #rlen = np.linalg.norm(rvecs)
        #rvecs *= (rlen - 12*math.pi/180) / rlen
        #Rotation matrix derived from rodrigues vector rvec
        R, _ = cv2.Rodrigues(rvecs)
        sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
        y = math.atan2(-R[2, 0], sy) #Rotation of target relative to camera

        #The angle off normal of the camera mounted on the robot (to be 12 degrees)
        cam_angle = -0 * math.pi / 180

        #Alpha is the angle that the target is observed at by the camera
        alpha = math.atan(tvecs[0] / tvecs[2])

        #tlen is the distance from the camera to the center of the target
        tlen = math.sqrt(tvecs[0]*tvecs[0] + tvecs[2]*tvecs[2])

        #calculate the shift that the gear should move to be in line with the peg
        shift = tlen * (math.sin(cam_angle + alpha) + math.cos(cam_angle + alpha) * math.tan(- y - cam_angle))

        #Additionally, shift the center of the robot to be in line with the peg (note that the gear will no longer be in line)
        shift += 14.5 * math.tan( - y - cam_angle)

        rotation = (cam_angle - y) / math.pi * 180
        #horizontal = -float(tvecs[0]) - 7
	#Add a factor for the displacement of the gear from the camera
        horizontal = shift + 7.875
        forward = float(tvecs[2]) #FIX APPROACH

        comms.set_gear(rotation, horizontal, forward)

        if draw:
            cv2.drawContours(frame, [target + 12], 0, (0, 255, 0), 3)
            M = cv2.moments(target)
            cx = int(M['m10'] / M['m00'])
            cy = int(M['m01'] / M['m00'])
            cv2.drawContours(frame, [np.array([[cx + 12, cy + 12]])], 0, (0, 0, 255), 10)

            cv2.putText(frame, "rotation: " + str(rotation), (100, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, 9)
            cv2.putText(frame, "alpha: " + str(alpha / math.pi * 180), (100, 90), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, 9)
            cv2.putText(frame, "horizontal: " + str(horizontal), (100, 140), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, 9)
            cv2.putText(frame, "tlen: " + str(tlen), (100, 190), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, 9)
    else:
        comms.set_gear_state(State.TARGET_NOT_FOUND)
Exemple #4
0

# target camera matrix for
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx, dist, (res_x, res_y), 1, (res_x, res_y))

# pixels to degrees conversion factor
ptd = config.CAMERA_DIAG_FOV / math.sqrt(math.pow(res_x, 2) + math.pow(res_y, 2))
focal_length = math.sqrt(res_x**2 + res_y**2) / 2 / math.tan(0.5 * config.CAMERA_DIAG_FOV * math.pi / 180)

# initialize logging
logging.basicConfig(stream=config.LOG_STREAM, level=config.LOG_LEVEL)
log = logging.getLogger(__name__)
log.info('OpenCV %s', cv2.__version__)

# send confirmation that we're alive
comms.set_gear_state(State.POWERED_ON)

# capture init
gear_cam_server = Capture(config.VIDEO_SOURCE_GEAR, Mode.GEARS)


# estimates the pose of a target, returns rvecs and tvecs
def estimate_pose(target):
    # fix array dimensions (aka unwrap the double wrapped array)
    new = []
    for r in target:
        new.append([r[0][0], r[0][1]])
    imgp = np.array(new, dtype=np.float64)

    # calculate rotation and translation matrices
    _, rvecs, tvecs = cv2.solvePnP(gears_objp, imgp, mtx, dist)