def __init__(self): self.bridge = CvBridge() self.image_received = False self.detector = apriltag.Detector() self.objectCoord = objCoord() rospy.logwarn("AprilTag Center Node [ONLINE]...") # rospy shutdown rospy.on_shutdown(self.cbShutdown) # Subscribe to Image msg self.image_topic = "/cv_camera/image_raw/converted" self.image_sub = rospy.Subscriber(self.image_topic, Image, self.cbImage) # Subscribe to CompressedImage msg self.cameraInfo_topic = "/cv_camera/camera_info_converted" self.cameraInfo_sub = rospy.Subscriber(self.cameraInfo_topic, CameraInfo, self.cbCameraInfo) # Publish to objCenter msg self.objCoord_topic = "/objCoord" self.objCoord_pub = rospy.Publisher(self.objCoord_topic, objCoord, queue_size=10) # Allow up to one second to connection rospy.sleep(1)
def __init__(self, projectDict): cam.cam_run() self.batch_size = projectDict["BATCH_SIZE"] self.confidence = projectDict["CONFIDENCE"] self.nms_thesh = projectDict["NMS_THRESH"] self.CUDA = torch.cuda.is_available() self.num_classes = projectDict["NUM_CLASSES"] self.root_path = projectDict["ROOT_PATH"] name_file = projectDict["OBJ_NAME_PATH"] cfg_file = projectDict["CFG_PATH"] weight_file = projectDict["WEIGHTS_PATH"] self.classes = load_classes(name_file) print("Loading network.....") self.model = Darknet(cfg_file) self.model.load_weights(weight_file) print("Network successfully loaded") self.model.net_info["height"] = 416 self.inp_dim = int(self.model.net_info["height"]) assert self.inp_dim % 32 == 0 assert self.inp_dim > 32 #If there's a GPU availible, put the model on GPU if self.CUDA: self.model.cuda() #Set the model in evaluation mode self.model.eval() self.april_detector = apriltag.Detector()
def __init__(self, server_): self.frame = None self.server = server_ self.tag_name = "" self.target_name = "" self.width = int(self.server.video.width) self.height = int(self.server.video.height) self.quad_x = int(self.width / 2) self.quad_y = int(self.height / 2) self.theta = math.radians(62.2) # width angle of picam self.psi = math.radians(48.8) # height angle of picam self.pix_to_meter = 0 self.altitude = 1 options = apriltag.DetectorOptions(families='tag36h11', border=1, nthreads=1, quad_decimate=1.0, quad_blur=0.0, refine_edges=True, refine_decode=True, refine_pose=True, debug=False, quad_contours=True) self.detector = apriltag.Detector(options) self.x = None self.y = None
def main(): parser = ArgumentParser(description='test apriltag Python bindings') parser.add_argument('device_or_movie', metavar='INPUT', nargs='?', default=0,help='Movie to load or integer ID of camera device') apriltag.add_arguments(parser) options = parser.parse_args() webcam = cv.VideoCapture(0) cv.namedWindow('frame') detector = apriltag.Detector(options,searchpath=apriltag._get_demo_searchpath()) while True: _,frame = webcam.read() gray = cv.cvtColor(frame,cv.COLOR_RGB2GRAY) detections, dimg = detector.detect(gray, return_image=True) num_detections = len(detections) print('Detected {} tags.\n'.format(num_detections)) try: print (detections[0]) except: print ("not found") overlay = frame // 2 + dimg[:, :, None] // 2 cv.imshow('frame', overlay) ikey = cv.waitKey(1) if ikey == ord('q'): break if __name__ == '__main__': main()
def __init__(self): # Initialize apriltag detector self.det = apriltag.Detector() self.c = 0 # Load camera data with open('cameraParams.json', 'r') as f: data = json.load(f) self.cameraMatrix = np.array(data['cameraMatrix'], dtype=np.float32) self.distCoeffs = np.array(data['distCoeffs'], dtype=np.float32) # Load world points self.world_points = {} with open('worldPoints.json', 'r') as f: data = json.load(f) for k, v in data.items(): self.world_points[int(k)] = np.array(v, dtype=np.float32).reshape( (4, 3, 1)) self.pose_buffer = MedianBuffer(10) self.last = (False, None, None, None) self.pose_buff = [] self.yaw_buff = [] self.ret = True self.heading = np.array([0, 1, 0])
def get_closest(target_tag): global tag_sequence global seen current_tag = None cvQueue1.put(1) cvQueue1.join() time.sleep(0.1) frame = cvQueue2.get() cvQueue2.task_done() detector = apriltag.Detector() atags = detector.detect(frame) print(atags) delta = 100 if len(atags) == 0: print("bleh") return None for tag in atags: print("Atag: " + str(tag.tag_id)) if tag.tag_id in tag_sequence and not tag.tag_id in seen and abs( tag_sequence.index(tag.tag_id) - tag_sequence.index(target_tag)) < delta: print("New Delta " + str(tag.tag_id)) delta = abs( tag_sequence.index(tag.tag_id) - tag_sequence.index(target_tag)) current_tag = tag return current_tag
def main(): DIM = (640, 480) camera_params = (506.66588415210174, 507.57637424966526, 311.03765199523536, 238.60300515336095) tag_size = 160 video_capture = cv2.VideoCapture(0) detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath()) while (True): try: _, frame = video_capture.read() gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) result = detector.detect(gray_frame) ''' if (result): frame, angle = draw_tag(frame,result) print(angle) ''' for i, detection in enumerate(result): pose, e0, e1 = detector.detection_pose(detection, camera_params, tag_size) draw_pose(frame, camera_params, tag_size, pose) angles = rotationMatrixToEulerAngles(pose[0:3, 0:3]) print(angles, pose[0:3, 3]) cv2.imshow("apriltags", frame) cv2.waitKey(1) except KeyboardInterrupt: break video_capture.release()
def detect(self, image): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) options = apriltag.DetectorOptions(families="tag36h11") detector = apriltag.Detector(options) results = detector.detect(gray) for r in results: (ptA, ptB, ptC, ptD) = r.corners ptB = (int(ptB[0]), int(ptB[1])) ptC = (int(ptC[0]), int(ptC[1])) ptD = (int(ptD[0]), int(ptD[1])) ptA = (int(ptA[0]), int(ptA[1])) # extract the bounding box (x, y)-coordinates for the AprilTag # and convert each of the (x, y)-coordinate pairs to integers # draw the bounding box of the AprilTag detection cv2.line(image, ptA, ptB, (0, 255, 0), 2) cv2.line(image, ptB, ptC, (0, 255, 0), 2) cv2.line(image, ptC, ptD, (0, 255, 0), 2) cv2.line(image, ptD, ptA, (0, 255, 0), 2) # draw the center (x, y)-coordinates of the AprilTag (cX, cY) = (int(r.center[0]), int(r.center[1])) cv2.circle(image, (cX, cY), 5, (0, 0, 255), -1) # draw the estimated corner of the paper (cX_up, cY_up) = self.estimatCorner(results) cv2.circle(image, (cX_up, cY_up), 9, (255, 0, 0), -1) (cX_dw, cY_dw) = (int(r.center[0]), int(r.center[1])) cv2.circle(image, (cX_up, cY_up), 9, (255, 0, 0), -1) # show the output image after AprilTag detection return image
def __init__(self, render=False, debug=False, num_tags=6, simulation=True): # debug mode self.debug = debug # drone speed self.speed = 50 # render self.render = render # number of april tags self.num_tags = num_tags # initialize ip address self.simulation = simulation if self.simulation: ip = "10.202.0.1" # ip address for virtual drone else: ip = "192.168.42.1" # ip address for real drone self.drone = olympe.Drone(ip, loglevel=TraceLogger.level.error) # Initialize the apriltag detector self.detector = apriltag.Detector() # Initialize cv2frame self.cv2frame = None # connect to the xbox controller self.connect_controller()
def main(): DIM = (640, 480) K = np.array([[333.71994064649397, 0.0, 328.90247793390785], [0.0, 332.3864141021737, 221.06175468227764], [0.0, 0.0, 1.0]]) D = np.array([[-0.022627097023142008], [-0.045919790866641955], [0.055605531135859636], [-0.02114945903451172]]) video_capture = cv2.VideoCapture(0) map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, DIM, cv2.CV_16SC2) detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath()) while (True): try: _, frame = video_capture.read() undistorted_img = cv2.remap(frame, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) gray_frame = cv2.cvtColor(undistorted_img, cv2.COLOR_BGR2GRAY) result = detector.detect(gray_frame) if result: frame, angle = draw_tag(undistorted_img, result) print(angle) cv2.imshow("april_tags", undistorted_img) cv2.waitKey(1) except KeyboardInterrupt: break video_capture.release()
def __init__(self): self.bridge = CvBridge() self.image_received = False self.detector = apriltag.Detector() self.apriltag_detection_status = Bool() self.apriltagID = Int64() rospy.logwarn("AprilTag Detection PC Node [ONLINE]...") # rospy shutdown rospy.on_shutdown(self.cbShutdown) # Subscribe to CompressedImage msg self.image_topic = "/pc_cv_camera/image_raw" self.image_sub = rospy.Subscriber(self.image_topic, Image, self.cbImage) # Publish to Bool msg self.apriltagStatus_topic = "/pc_apriltag_detection_status" self.apriltagStatus_pub = rospy.Publisher(self.apriltagStatus_topic, Bool, queue_size=10) # Publish to Int64 msg self.apriltagID_topic = "/pc_apriltag_detection_ID" self.apriltagID_pub = rospy.Publisher(self.apriltagID_topic, Int64, queue_size=10) # Allow up to one second to connection rospy.sleep(1)
def get_position(): global frame global curr_x global curr_z global curr_heading detector = apriltag.Detector() atags = detector.detect(frame) # print(atag) yaw_bar = 0.0 x_bar = 0.0 z_bar = 0.0 atags = [a for a in atags if a.tag_id != 50] for tag in atags: corners = tag.corners corners = np.array(corners, dtype=np.float32).reshape((4, 2, 1)) tag_id = tag.tag_id retval, rvec, tvec = cv2.solvePnP(world_points[tag_id], corners, camera_matrix, camera_distortions) rot_matrix, _ = cv2.Rodrigues(rvec) R = rot_matrix.transpose() pose = -R @ tvec x_bar += pose[0] z_bar += pose[2] yaw, pitch, roll = get_orientation(camera_matrix, R, tvec) yaw_bar += yaw #print("Yaw: {} \n Pitch: {} \n Roll: {}".format(yaw,pitch,roll)) if len(atags) > 1: curr_heading = yaw_bar / len(atags) curr_x = x_bar / len(atags) curr_z = z_bar / len(atags) else: curr_heading = yaw_bar curr_x = x_bar curr_z = z_bar
def main(): args = get_args() cols = args["cols"] rows = args["rows"] camera = util.get_camera(args["index"]) detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath()) start = time.time() frame_num = 0 window = cv2.namedWindow("frame") while True: if PRINT_FPS: frame_num += 1 ret, frame = camera.read() if SHOW_IMAGE: cv2.imshow("frame", frame) if PRINT_FPS: fps = frame_num / (time.time() - start) print("Showing frame {}".format(frame_num)) print("Current speed: {} fps".format(fps)) if APPLY_CHECKERBOARD: frame = undistort_image(frame, args["file"]) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) detections, det_image = detector.detect(gray, return_image=True) print(len(detections)) if detections and APPLY_TRANSFORM: detections = apply_transform(detections, args["file"]) points = [] dim_lst = [] for d in range(len(detections)): tag_x, tag_y = detections[d].center center_point = (int(tag_x), int(tag_y)) points.append(center_point) cp = get_point(detections[d].center) tl = get_point(detections[d].corners[0]) tr = get_point(detections[d].corners[1]) br = get_point(detections[d].corners[2]) x_axis = (tl[0] + tr[0]) // 2, (tl[1] + tr[1]) // 2 y_axis = (tr[0] + br[0]) // 2, (tr[1] + br[1]) // 2 # print("Tag {} found at ({},{})".format(d.tag_id, tag_x, tag_y)) # print("with angle {}".format(util.get_tag_angle(d.corners))) # Compute size of each tag as they appear on camera tl = get_point(detections[d].corners[0]) tr = get_point(detections[d].corners[1]) br = get_point(detections[d].corners[2]) width = dist(tr, tl) height = dist(tr, br) dim = np.array([width, height]) dim_lst.append(dim) compute_dx_dy_all(frame, detections, points, dim_lst, rows, cols, CANONICAL_TAG) if cv2.waitKey(1) & 0xFF == ord("q"): break camera.release() cv2.destroyAllWindows() pass
def docks_found_in_cam(self, camera_number): docks = [] try: cap = cv.VideoCapture(self.cam_first + str(camera_number) + self.cam_last) except IOError: print "unable to connect to hik", camera_number return False else: proceed_framed = 0 while cap.isOpened( ) and self.frames_to_be_processed >= proceed_framed: ret, frame = cap.read() """converting image to gray scale and resizing""" gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) # resizing scale gray = Rs(70).rescaling_image(gray) """need work here to suppress print""" tags_in_image = aptg.Detector().detect(gray) """need to fix below statements""" # if dock is empty and tags are detected, update them as docks if not docks and len(tags_in_image) > 0: for tag in tags_in_image: temp_dict = { "tag_id": tag.tag_id, "rect_area": [[tag.corners[0][0], tag.corners[0][1]], [tag.corners[1][0], tag.corners[1][1]], [tag.corners[2][0], tag.corners[2][1]], [tag.corners[3][0], tag.corners[3][1]]], "center": [tag.center[0], tag.center[1]], "cam_no": camera_number } docks.append(temp_dict) # if dock is not empty update only when more docks are detected than there already are elif docks and len(docks) < len(tags_in_image): docks = [] for tag in tags_in_image: temp_dict = { "tag_id": tag.tag_id, "rect_area": [[tag.corners[0][0], tag.corners[0][1]], [tag.corners[1][0], tag.corners[1][1]], [tag.corners[2][0], tag.corners[2][1]], [tag.corners[3][0], tag.corners[3][1]]], "center": [tag.center[0], tag.center[1]], "cam_no": camera_number } docks.append(temp_dict) proceed_framed += 1 return docks
def main(): '''Main function.''' parser = ArgumentParser(description='test apriltag Python bindings') parser.add_argument('device_or_movie', metavar='INPUT', nargs='?', default=0, help='Movie to load or integer ID of camera device') apriltag.add_arguments(parser) options = parser.parse_args() try: cap = cv2.VideoCapture(int(options.device_or_movie)) except ValueError: cap = cv2.VideoCapture(options.device_or_movie) window = 'Camera' cv2.namedWindow(window) # set up a reasonable search path for the apriltag DLL inside the # github repo this file lives in; # # for "real" deployments, either install the DLL in the appropriate # system-wide library directory, or specify your own search paths # as needed. detector = apriltag.Detector(options, searchpath=apriltag._get_demo_searchpath()) while True: success, frame = cap.read() if not success: break gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) detections, dimg = detector.detect(gray, return_image=True) num_detections = len(detections) print('Detected {} tags.\n'.format(num_detections)) for i, detection in enumerate(detections): print('Detection {} of {}:'.format(i + 1, num_detections)) print() print(detection.tostring(indent=2)) print() overlay = frame // 2 + dimg[:, :, None] // 2 cv2.imshow(window, overlay) k = cv2.waitKey(1) if k == 27: break
def scan_image(i): detector = apriltag.Detector() dets = detector.detect(i) retval = {} for d in dets: retval[(d.tag_family, d.tag_id)] = (d.center, d.corners) return retval
def get_tag(color_frame, depth_frame, params): ret = "" #Initialize apriltage detector fov = 0.466 #in radians, half angle detector = apriltag.Detector(searchpath=_get_demo_searchpath()) #print ("Tag detector started!") #initialize image img = np.asanyarray(color_frame.get_data()) #convert to grayscale pil_img = Image.fromarray(img) gray = np.array(pil_img.convert('L')) result = detector.detect(gray) num_detections = len(result) if num_detections == 0: return "no tags detected" #print('Detected {} tags in {}\n'.format( #num_detections, 'camera image')) #print(result) i = 0 for detection in result: ret += str(i) ret += " " x, y = detection.center id = detection.tag_id ret += str(id) ret += " " dist_to_center = depth_frame.get_distance(int(x), int(y)) #print( 'Detection {} of {}:'.format(i+1, num_detections)) #Pose of the apriltags in camera view is returned from the apriltag #package as a homogeneous transform matrix. The tag position in the H #matrix is the (x, y, z) position of the center of the tag with respect #to the left center of the camera frame. # The third argument into the detector.detection_pose() function is the # size of the apriltags used in meters. The pose calculations are very # sensitive to this value pose, e0, e1 = detector.detection_pose(detection, params, 0.166) z_dist = pose[2, 3] x_dist = pose[0, 3] - math.tan(fov) * z_dist yaw = math.atan2(pose[1, 0], pose[0, 0]) ret += str(z_dist) ret += " " ret += str(x_dist) ret += " " ret += str(yaw) ret += " " #print("pose of the tag is: ") #print(pose) #print(detection.tostring(indent=2)) #print('Distance to center of apriltag is: ') #print(dist_to_center) i += 1 return ret
def callback(data): # print(data) # rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.data) detector = apriltag.Detector() bridge = CvBridge() image = bridge.imgmsg_to_cv2(data, "mono8") # image = cv2.imread(data, cv2.IMREAD_GRAYSCALE) # print(image) detections = detector.detect(image) print(detections[0])
def detect_tag(image): img_bw = cv2.cvtColor( image, cv2.COLOR_RGB2GRAY) # convert to grayscale for apriltag library (thresh, img_bw) = cv2.threshold(img_bw, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) # threshold detector = apriltag.Detector() tag_info = detector.detect(img_bw) return tag_info
def __init__(self, args): # state self.input_camera = [None, None] self.april_detector = True self.rate = 0 self.kin_joints = JointState() self.kin_joints.name = ["tilt", "lift", "yaw", "pitch"] self.kin_joints.position = [ -0.10471975803375244, 0.9031959176063538, 0.06871610879898071, 0.14394205808639526 ] self.kin_joints.effort = [0, 0, 0, 1] # init the robot Kinetic control. self.robot = miro.interface.PlatformInterface() # handle april if "apriltag" not in sys.modules: raise ValueError("April Tags library not available") options = apriltag.DetectorOptions( \ families='tag36h11', border=0, nthreads=128, quad_decimate=2.0, quad_blur=0.0, refine_edges=False, refine_decode=False, refine_pose=False, debug=False, quad_contours=True) self.april_detector = apriltag.Detector(options) # ROS -> OpenCV converter self.image_converter = CvBridge() # robot name topic_base_name = "/" + os.getenv("MIRO_ROBOT_NAME") # subscribe self.sub_caml = rospy.Subscriber(topic_base_name + "/sensors/caml/compressed", CompressedImage, self.callback_caml, queue_size=1, tcp_nodelay=True) #self.sub_camr = rospy.Subscriber(topic_base_name + "/sensors/camr/compressed", CompressedImage, self.callback_camr, queue_size=1, tcp_nodelay=True) self.sub_kin = rospy.Subscriber(topic_base_name + "/sensors/kinematic_joints", JointState, self.callback_kin, queue_size=1, tcp_nodelay=False) self.kinematic_joints_pub = rospy.Publisher( topic_base_name + "/control/kinematic_joints", JointState, queue_size=1)
def __init__(self, w, h, cameraMatrix, distortion, cornerDist, video, tag="0", exp_factor=4.4526): """ Initialize the tracker object :param w: width of the image in pixels :param h: height of the image in pixels :param cameraMatrix: intrinsic camera matrix :param distortion: distortion coefficients :param cornerDist: distance along the edge of the april tag, divided by 2 (in cm) :param cameraID: ID number of the camera :param tag: unique identifier to be applied to all the output files :param exp_factor: experimental factor (see ReadMe) """ self.width = w self.height = h self.cameraMatrix = cameraMatrix self.distortion = distortion self.cornerDist = cornerDist self.exp_factor = exp_factor newMtrx, roi = cv2.getOptimalNewCameraMatrix(cameraMatrix, distortion, (width, height), 1, (width, height)) self.fixedMatrix = newMtrx self.camera = video # initialize x and y coord so we don't get errors later self.xCoord = 0 self.yCoord = 0 self.zCoord = 0 # initialize array for frames to be stored in self.frames = [] # initialize angle relative to apriltag (also just so we don't get errors later) self.phi = 0 self.rho = 0 self.tag = tag self.pixelsFromCenterX = 0 self.pixelsFromCenterY = 0 # initialize apriltag detector self.detector = apriltag.Detector() fourcc = cv2.VideoWriter_fourcc(*'XVID') self.videoWriter = cv2.VideoWriter('cameraOutput' + self.tag + '.avi', fourcc, 20.0, (960, 720))
def camera_callback(self, data): rospy.loginfo("April tag detector loginfo") # We select bgr8 because its the OpneCV encoding by default cv_image = self.bridge_object.imgmsg_to_cv2(data, desired_encoding="bgr8") frame = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY) # April tag detector tag_detected = False detector = apriltag.Detector() result = detector.detect(frame) tagID = 0 global mission_stage_msg if len(result) == 1: tag_detected = True tag = result[0].tag_family tagID = result[0].tag_id cx = result[0].center[0] cy = result[0].center[1] corner_points = result[0].corners if len(result) > 1: print("Multiple tags detected") pub2 = rospy.Publisher("/current_tag", Int64, queue_size=1) msg2 = Int64() msg2.data = 0 if tag_detected: print("Tag detected") print('tag ID = ', tagID) #print(result) # publish current detected tag if any msg2.data = tagID if tag_detected == False: print('No tag detected') print('\n') # publish mission stage if mission_stage_msg == 0 and tagID == 8: mission_stage_msg = 1 if mission_stage_msg == 1 and tagID == 9: mission_stage_msg = 2 if mission_stage_msg == 2 and tagID == 6: mission_stage_msg = 3 if mission_stage_msg == 3 and tagID == 4: mission_stage_msg = 4 pub = rospy.Publisher("/mission_stage", Int64, queue_size=1) msg = Int64() msg.data = mission_stage_msg pub.publish(msg) pub2.publish(msg2)
def main(): tagSize = 0.16 detector = apriltag.Detector() imageUndistorted = cv2.imread("imageUndistorted.png", 0) cameraParams = (466.19345, 464.67789, 368.5271, 228.25047) detections = detector.detect(imageUndistorted) for detection in detections: cMm, _, _ = detector.detection_pose(detection, cameraParams, tag_size=tagSize) pprint(cMm)
def __init__(self): self.bridge = CvBridge() self.image_received = False self.detector = apriltag.Detector() self.objectCoord = objCoord() self.panErrval = Float32() self.telloCmdVel = Twist() self.MAX_LIN_VEL = 0.005 self.MAX_ANG_VEL = 0.005 # set PID values for panning self.panP = 0.5 self.panI = 0 self.panD = 0 # set PID values for tilting self.tiltP = 0.25 self.tiltI = 0 self.tiltD = 0 # # create a PID and initialize it self.panPID = PID(self.panP, self.panI, self.panD) self.tiltPID = PID(self.tiltP, self.tiltI, self.tiltD) self.panPID.initialize() self.tiltPID.initialize() rospy.logwarn("AprilTag Tracking Node [ONLINE]...") # rospy shutdown rospy.on_shutdown(self.cbShutdown) # Subscribe to CompressedImage msg self.cameraInfo_topic = "/cv_camera/camera_info_converted" self.cameraInfo_sub = rospy.Subscriber(self.cameraInfo_topic, CameraInfo, self.cbCameraInfo) # Subscribe to objCenter msg self.objCoord_topic = "/objCoord" self.objCoord_sub = rospy.Subscriber(self.objCoord_topic, objCoord, self.cbObjCoord) # Publish to Twist msg self.telloCmdVel_topic = "/cmd_vel" self.telloCmdVel_pub = rospy.Publisher(self.telloCmdVel_topic, Twist, queue_size=10) # Allow up to one second to connection rospy.sleep(1)
def main(): cam = cv2.VideoCapture(0) cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) # sleep(0.2) success, frame = cam.read() if not success: print 'failed to read camera' return display = frame.copy() frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) cv2.imshow('win', frame) cv2.waitKey() aprilsize = 0.08 opts = np.array([[aprilsize, -aprilsize, 0.], [-aprilsize, -aprilsize, 0.], [-aprilsize, aprilsize, 0.], [aprilsize, aprilsize, 0.]]) cmat = np.loadtxt('CameraMatrix.txt') dmat = np.loadtxt('Distortion.txt') # set upt the detector parser = ArgumentParser( description='test apriltag Python bindings') parser.add_argument('device_or_movie', metavar='INPUT', nargs='?', default=0, help='Movie to load or integer ID of camera device') apriltag.add_arguments(parser) options = parser.parse_args() # create an apriltag detector detector = apriltag.Detector(options) tags = detector.detect(frame, return_image= False) print 'found {} tags'.format(len(tags)) print for tag in tags: print 'tag has corners \n{} \n'.format(tag.corners) print tag.corners.shape, tag.corners.dtype # line = np.array([[10, 0], [10, 470]]) # cv2.polylines(display, [line], True, (255, 0, 255), 0, 16) cv2.polylines(display, [tag.corners.astype(int)], True, (255,255,0), 1, 16) # print frame.shape # corners_adj = tag.corners + np.array([frame.shape[0]/2, 0]) # print 'adjusted corners \n{} \n'.format(corners_adj) retval, rvec, tvec = cv2.solvePnP(opts, tag.corners, cmat, dmat) print 'tag has tvec \n{}'.format(tvec)
def main(): DIM = (640, 480) camera_params = (506.66588415210174, 507.57637424966526, 311.03765199523536, 238.60300515336095) tag_size = 160 video_capture = cv2.VideoCapture(0) detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath()) # ROS Publisher Init pub = rospy.Publisher('tagPose', tagInfoArray, queue_size=5) rospy.init_node('talker', anonymous=True) # Init array of tag poses #tagPoses = tagInfoArray() while (True): _, frame = video_capture.read() gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) result = detector.detect(gray_frame) # Init fresh tag arr to pass tagPoses = tagInfoArray() ''' if (result): frame, angle = draw_tag(frame,result) print(angle) ''' for i, detection in enumerate(result): pose, e0, e1 = detector.detection_pose(detection, camera_params, tag_size) draw_pose(frame, camera_params, tag_size, pose) #print(result[i].tag_id) tagPoses.tags.append(tagDataToPose(pose, result[i].tag_id)) # print(angles,pose[0:3,3]) print(tagPoses) pub.publish(tagPoses) cv2.imshow("apriltags", frame) k = cv2.waitKey(1) if (k % 256 == 32): break """ except KeyboardInterrupt: break """ video_capture.release()
def apriltag_image( input_images=[ '../media/input/single_tag.jpg', '../media/input/multiple_tags.jpg' ], output_images=False, display_images=True, detection_window_name='AprilTag', ): ''' Detect AprilTags from static images. Args: input_images [list(str)]: List of images to run detection algorithm on output_images [bool]: Boolean flag to save/not images annotated with detections display_images [bool]: Boolean flag to display/not images annotated with detections detection_window_name [str]: Title of displayed (output) tag detection window ''' parser = ArgumentParser(description='Detect AprilTags from static images.') apriltag.add_arguments(parser) options = parser.parse_args() ''' Set up a reasonable search path for the apriltag DLL. Either install the DLL in the appropriate system-wide location, or specify your own search paths as needed. ''' detector = apriltag.Detector(options, searchpath=apriltag._get_dll_path()) for image in input_images: img = cv2.imread(image) print('Reading {}...\n'.format(os.path.split(image)[1])) result, overlay = apriltag.detect_tags( img, detector, camera_params=(3156.71852, 3129.52243, 359.097908, 239.736909), tag_size=0.0762, vizualization=3, verbose=3, annotation=True) if output_images: output_path = '../media/output/' + str(os.path.split(image)[1]) output_path = output_path.replace(str(os.path.splitext(image)[1]), '.jpg') cv2.imwrite(output_path, overlay) if display_images: cv2.imshow(detection_window_name, overlay) while cv2.waitKey(5) < 0: # Press any key to load subsequent image pass
def getResults(self): try: self.converter = ImageBinaryConverter(self.threshold) self.detector = apriltag.Detector() # converts the image into black and white self.filteredImage = self.converter.getFilteredImage(self.imageArray) results = self.detector.detect(self.filteredImage) self.results = results[0].tostring() self.StringToList = StringToList(self.results) return self.StringToList except: return None
def detect_april_tag_corners(img, points, vis=False): """ points: numpy array of shape (H,W,3) # x y z img_points: numpy array of shape (H,W,3) # b g r """ pts_shape = points.shape img_shape = img.shape assert len( pts_shape) == 3 and pts_shape == img_shape and pts_shape[-1] == 3 import apriltag detector = apriltag.Detector() gray = cv.cvtColor(img, cv.COLOR_RGB2GRAY) detections, dimg = detector.detect(gray, True) if len(detections) == 0: print("[WARN]: No detections") return False, None img_to_show = img[...].copy() points_3d_for_all = [] for detection in detections: points_3d = [] points_2d = np.round(detection.corners).astype(int) c = 30 try: for point_2d in points_2d: x, y = point_2d # TODO: use bilinear interpolation to improve precision. point_3d = points[y, x] if np.any(np.isnan(point_3d)): raise ValueError img_to_show = cv.circle(img_to_show, (x, y), 4, (255, 0, 0), 2) # c *= 2 points_3d.append(point_3d) points_3d_for_all.append(points_3d) except ValueError: print( "[WARN]: Corner point in image is not registered by camera. It has 'nan' value, Please change the view" ) return False, None if vis: cv2.imshow('Detected Apriltag Corners', img_to_show) cv2.waitKey(0) # while cv.waitKey(5) < 0: # pass return True, points_3d_for_all
def followPerson(): global stop global followFlag time.sleep(1.0) cvQueue1.put(1) cvQueue1.join() time.sleep(1.0) frame = cvQueue2.get() cvQueue2.task_done() detector = apriltag.Detector() # keep looping while followFlag and (not stop): time.sleep(.1) atags = detector.detect(frame) temp_origin = np.matrix([[0, 0, 0], [1, 0, 0], [1, -1, 0], [0, -1, 0]]) temp_origin = np.array(temp_origin, dtype=np.float32).reshape( (4, 3, 1)) found = 0 for tag in atags: corners = tag.corners corners = np.array(corners, dtype=np.float32).reshape((4, 2, 1)) tag_id = tag.tag_id if tag.tag_id == 50: found = 1 break if found: center = tag.center x = center[0] retval, rvec, tvec = cv2.solvePnP(temp_origin, corners, camera_matrix, camera_distortions) rot_matrix, _ = cv2.Rodrigues(rvec) R = rot_matrix.transpose() pose = -R @ tvec if pose[0] > 10.0: if x < 150: left() print("left") elif x > 410: right() print("right") elif x >= 150 and x <= 410: forward() print("forward") else: print("You're close enough. halt") halt() else: print("I can't see you! Turn left") left() halt()