Ejemplo n.º 1
0
def main():
    DIM = (640, 480)
    K = np.array([[333.71994064649397, 0.0, 328.90247793390785],
                  [0.0, 332.3864141021737, 221.06175468227764],
                  [0.0, 0.0, 1.0]])
    D = np.array([[-0.022627097023142008], [-0.045919790866641955],
                  [0.055605531135859636], [-0.02114945903451172]])
    video_capture = cv2.VideoCapture(0)
    map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, DIM,
                                                     cv2.CV_16SC2)
    detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath())

    while (True):
        try:
            _, frame = video_capture.read()
            undistorted_img = cv2.remap(frame,
                                        map1,
                                        map2,
                                        interpolation=cv2.INTER_LINEAR,
                                        borderMode=cv2.BORDER_CONSTANT)
            gray_frame = cv2.cvtColor(undistorted_img, cv2.COLOR_BGR2GRAY)

            result = detector.detect(gray_frame)

            if result:
                frame, angle = draw_tag(undistorted_img, result)
                print(angle)

            cv2.imshow("april_tags", undistorted_img)
            cv2.waitKey(1)

        except KeyboardInterrupt:
            break
    video_capture.release()
Ejemplo n.º 2
0
def main():
    parser = ArgumentParser(description='test apriltag Python bindings')
    parser.add_argument('device_or_movie', metavar='INPUT', nargs='?', default=0,help='Movie to load or integer ID of camera device')
    apriltag.add_arguments(parser)
    options = parser.parse_args()

    webcam = cv.VideoCapture(0)
    cv.namedWindow('frame')

    detector = apriltag.Detector(options,searchpath=apriltag._get_demo_searchpath())

    while True:
        _,frame = webcam.read()
        gray = cv.cvtColor(frame,cv.COLOR_RGB2GRAY)

        detections, dimg = detector.detect(gray, return_image=True)
        num_detections = len(detections)
        print('Detected {} tags.\n'.format(num_detections))
	try:
	    print (detections[0])
	except:
        print ("not found")

        overlay = frame // 2 + dimg[:, :, None] // 2
        
        cv.imshow('frame', overlay)
        ikey = cv.waitKey(1)
        if ikey == ord('q'):
            break

if __name__ == '__main__':
    main()
Ejemplo n.º 3
0
def main():
    DIM = (640, 480)
    camera_params = (506.66588415210174, 507.57637424966526,
                     311.03765199523536, 238.60300515336095)
    tag_size = 160
    video_capture = cv2.VideoCapture(0)
    detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath())
    while (True):
        try:
            _, frame = video_capture.read()
            gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            result = detector.detect(gray_frame)
            '''
			if (result):
				frame, angle = draw_tag(frame,result)
				print(angle)
			'''

            for i, detection in enumerate(result):
                pose, e0, e1 = detector.detection_pose(detection,
                                                       camera_params, tag_size)
                draw_pose(frame, camera_params, tag_size, pose)
                angles = rotationMatrixToEulerAngles(pose[0:3, 0:3])
                print(angles, pose[0:3, 3])

            cv2.imshow("apriltags", frame)
            cv2.waitKey(1)
        except KeyboardInterrupt:
            break
    video_capture.release()
Ejemplo n.º 4
0
def main():
    args = get_args()
    cols = args["cols"]
    rows = args["rows"]
    camera = util.get_camera(args["index"])
    detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath())
    start = time.time()
    frame_num = 0
    window = cv2.namedWindow("frame")
    while True:
        if PRINT_FPS:
            frame_num += 1
        ret, frame = camera.read()
        if SHOW_IMAGE:
            cv2.imshow("frame", frame)
        if PRINT_FPS:
            fps = frame_num / (time.time() - start)
            print("Showing frame {}".format(frame_num))
            print("Current speed: {} fps".format(fps))
        if APPLY_CHECKERBOARD:
            frame = undistort_image(frame, args["file"])
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        detections, det_image = detector.detect(gray, return_image=True)
        print(len(detections))
        if detections and APPLY_TRANSFORM:
            detections = apply_transform(detections, args["file"])
        points = []
        dim_lst = []
        for d in range(len(detections)):
            tag_x, tag_y = detections[d].center
            center_point = (int(tag_x), int(tag_y))
            points.append(center_point)
            cp = get_point(detections[d].center)
            tl = get_point(detections[d].corners[0])
            tr = get_point(detections[d].corners[1])
            br = get_point(detections[d].corners[2])
            x_axis = (tl[0] + tr[0]) // 2, (tl[1] + tr[1]) // 2
            y_axis = (tr[0] + br[0]) // 2, (tr[1] + br[1]) // 2
            # print("Tag {} found at ({},{})".format(d.tag_id, tag_x, tag_y))
            # print("with angle {}".format(util.get_tag_angle(d.corners)))

            # Compute size of each tag as they appear on camera
            tl = get_point(detections[d].corners[0])
            tr = get_point(detections[d].corners[1])
            br = get_point(detections[d].corners[2])

            width = dist(tr, tl)
            height = dist(tr, br)
            dim = np.array([width, height])
            dim_lst.append(dim)

        compute_dx_dy_all(frame, detections, points, dim_lst, rows, cols,
                          CANONICAL_TAG)

        if cv2.waitKey(1) & 0xFF == ord("q"):
            break
    camera.release()
    cv2.destroyAllWindows()
    pass
Ejemplo n.º 5
0
def main():
    '''Main function.'''

    parser = ArgumentParser(description='test apriltag Python bindings')

    parser.add_argument('device_or_movie',
                        metavar='INPUT',
                        nargs='?',
                        default=0,
                        help='Movie to load or integer ID of camera device')

    apriltag.add_arguments(parser)

    options = parser.parse_args()

    try:
        cap = cv2.VideoCapture(int(options.device_or_movie))
    except ValueError:
        cap = cv2.VideoCapture(options.device_or_movie)

    window = 'Camera'
    cv2.namedWindow(window)

    # set up a reasonable search path for the apriltag DLL inside the
    # github repo this file lives in;
    #
    # for "real" deployments, either install the DLL in the appropriate
    # system-wide library directory, or specify your own search paths
    # as needed.

    detector = apriltag.Detector(options,
                                 searchpath=apriltag._get_demo_searchpath())

    while True:

        success, frame = cap.read()
        if not success:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        detections, dimg = detector.detect(gray, return_image=True)

        num_detections = len(detections)
        print('Detected {} tags.\n'.format(num_detections))

        for i, detection in enumerate(detections):
            print('Detection {} of {}:'.format(i + 1, num_detections))
            print()
            print(detection.tostring(indent=2))
            print()

        overlay = frame // 2 + dimg[:, :, None] // 2

        cv2.imshow(window, overlay)
        k = cv2.waitKey(1)

        if k == 27:
            break
Ejemplo n.º 6
0
def main():

    DIM = (640, 480)
    camera_params = (506.66588415210174, 507.57637424966526,
                     311.03765199523536, 238.60300515336095)
    tag_size = 160
    video_capture = cv2.VideoCapture(0)
    detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath())

    # ROS Publisher Init
    pub = rospy.Publisher('tagPose', tagInfoArray, queue_size=5)
    rospy.init_node('talker', anonymous=True)

    # Init array of tag poses
    #tagPoses = tagInfoArray()

    while (True):

        _, frame = video_capture.read()
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        result = detector.detect(gray_frame)

        # Init fresh tag arr to pass
        tagPoses = tagInfoArray()
        '''
        if (result):
            frame, angle = draw_tag(frame,result)
            print(angle)
        '''

        for i, detection in enumerate(result):
            pose, e0, e1 = detector.detection_pose(detection, camera_params,
                                                   tag_size)
            draw_pose(frame, camera_params, tag_size, pose)

            #print(result[i].tag_id)
            tagPoses.tags.append(tagDataToPose(pose, result[i].tag_id))

        # print(angles,pose[0:3,3])

        print(tagPoses)

        pub.publish(tagPoses)

        cv2.imshow("apriltags", frame)
        k = cv2.waitKey(1)

        if (k % 256 == 32):
            break
        """
        except KeyboardInterrupt:
            break
        """
    video_capture.release()
Ejemplo n.º 7
0
def main():
    video_capture = cv2.VideoCapture(0)
    #video_capture.set(3, 640)
    #video_capture.set(4, 480)

    detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath())
    while (True):
        try:
            _, frame = video_capture.read()
            gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            result = detector.detect(gray_frame)

            if result:
                frame, angle = draw_tag(frame, result)
                print(angle)

            cv2.imshow("apriltags", frame)
            cv2.waitKey(1)
        except KeyboardInterrupt:
            break
    video_capture.release
    def __init__(self, markers):

        self.options = apriltag.DetectorOptions(families='tag36h11',
                                                border=1,
                                                nthreads=4,
                                                quad_decimate=1.0,
                                                quad_blur=0.0,
                                                refine_edges=True,
                                                refine_decode=False,
                                                refine_pose=False,
                                                debug=False,
                                                quad_contours=True)
        self.detector = apriltag.Detector(self.options)
        self.detector = apriltag.Detector(
            self.options, searchpath=apriltag._get_demo_searchpath())
        self.kalman_filters = {}
        self.transformation_ka = Kalman(12)
        self.g_camera_base = None
        self.marker_pose_dict = {}
        self.markers = markers
        self.dimg = None  # apriltage detection overlay matrix
        self.kalman_count = {}
        for tid in self.markers:
            self.kalman_count[tid] = 0
Ejemplo n.º 9
0
def analyze(lock, options, pre_detection, save_time, bee_time, pre, cnt,
            pre_num_name, save_prefix, bee_log_dict, start_time):
    lock[0] = 'locked'

    fh_id_log = open(save_prefix + 'ImgID.log', 'a')

    #record time for each picuture

    window = 'Camera'
    cv2.namedWindow(window)
    detector = apriltag.Detector(options,
                                 searchpath=apriltag._get_demo_searchpath())

    rgb = cv2.imread(save_prefix + " top" + str(cnt) + ".jpg")
    print(save_prefix + "top" + str(cnt) + ".jpg")
    while rgb is None:
        rgb = cv2.imread(save_prefix + "top" + str(cnt) + ".jpg")
        t.sleep(0.1)
    gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)
    k = t.time()
    detections, dimg = detector.detect(gray, return_image=True)
    print('detecting time:', t.time() - k)
    lock[0] = 'unlocked'
    num_detections = len(detections)
    print('Detected {} tags.\n'.format(num_detections))
    current_detection = []
    overlay = rgb // 2 + dimg[:, :, None] // 2

    if len(detections):
        for i, detection in enumerate(detections):
            centerY = detection.center[1]
            #assume bee only travels one direction a time
            #if the tag first time appears, start append to the list until the tag dispears
            current_detection.append(detection[1])
            print('previous', pre_detection)
            if detection[1] in pre_detection.keys():
                if (float(centerY) - float(pre_detection[detection[1]]) < 0):

                    fh_id_log.write(
                        str(cnt) + '\t' + str(detection[1]) + ' exit ' +
                        str(bee_time[cnt]) + '\n')
                    #add_bee_event(pre,bee_log_dict,tag[0],t.time()-start_time,True)
                else:
                    fh_id_log.write(
                        str(cnt) + '\t' + str(detection[1]) + ' enter ' +
                        str(bee_time[cnt]) + '\n')
                    #add_bee_event(pre,bee_log_dict,tag[0],t.time()-start_time,False)

            else:
                fh_id_log.write(
                    str(cnt) + '\t' + str(detection[1]) + ' first ' +
                    str(bee_time[cnt]) + '\n')

            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = (400, int(centerY))
            fontScale = 1
            fontColor = (0, 255, 255)
            lineType = 2

            cv2.putText(rgb, str(detection[1]), bottomLeftCornerOfText, font,
                        fontScale, fontColor, lineType)

        for i, detection in enumerate(detections):
            pre_detection[detection[1]] = detection.center[
                1]  #find the previous tags

    else:
        fh_id_log.write(str(cnt) + '\t' + '-1' + '\n')
        pre_detection.clear()

    fh_id_log.close()
    cv2.imwrite(save_prefix + " top" + str(cnt) + ".jpg", rgb)

    os.system('sync')
Ejemplo n.º 10
0
def main():
    camera = setup_camera()

    detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath())
    start = time.time()
    frame_num = 0
    while True:
        frame_num += 1
        ret, frame = camera.read()
        if not DETECT_TAGS:
            if SHOW_IMAGE:
                cv2.imshow("frame", frame)
        fps = frame_num / (time.time() - start)
        # print("Showing frame {}".format(frame_num))
        # print("Current speed: {} fps".format(fps))
        if DETECT_TAGS:
            horizontal_dist = []
            vertical_dist = []
            row = -1
            gray = cvtColor(frame, cv2.COLOR_BGR2GRAY)
            detections, det_image = detector.detect(gray, return_image=True)
            print(len(detections))
            points = []
            for d in range(len(detections)):
                tag_x, tag_y = detections[d].center
                center_point = (int(tag_x), int(tag_y))
                points.append(center_point)
                cp = get_point(detections[d].center)
                tl = get_point(detections[d].corners[0])
                tr = get_point(detections[d].corners[1])
                br = get_point(detections[d].corners[2])
                x_axis = (tl[0] + tr[0]) // 2, (tl[1] + tr[1]) // 2
                y_axis = (tr[0] + br[0]) // 2, (tr[1] + br[1]) // 2
                # print("Tag {} found at ({},{})".format(d.tag_id, tag_x, tag_y))
                # print("with angle {}".format(util.get_tag_angle(d.corners)))

                # 2D array to keep track of horizontal distance of current tag
                # and the one next to it
                if detections[d].center[0] < detections[d - 1].center[0]:
                    print("adding new array")
                    horizontal_dist.append([])
                    row += 1
                else:
                    print("current_row")
                    print(row)
                    horizontal_dist[row].append(detections[d].center[0] -
                                                detections[d - 1].center[0])

                cv2.circle(frame, (int(tag_x), int(tag_y)), 5, (0, 0, 255))
                cv2.line(frame, cp, x_axis, (0, 0, 255), 5)
                cv2.line(frame, cp, y_axis, (0, 255, 0), 5)

            if SHOW_IMAGE:
                cv2.imshow("frame", frame)

            # 2D array to keep track of vertical distance of current tag and the
            # one immediate below it.
            num_cols = len(horizontal_dist[0]) + 1
            for i in range(num_cols):
                vertical_dist.append([])
                for j in range(row):
                    vertical_dist[i][j] = (detections[i + num_cols].center[1] -
                                           detections[i].center[1])

            horizontal_dist = np.array(horizontal_dist)
            vertical_dist = np.array(vertical_dist)

            horizontal_median = np.median(horizontal_dist)
            horizontal_std = np.std(horizontal_dist)
            vertical_median = np.median(vertical_dist)
            vertical_std = np.std(vertical_dist)

            x_pts, y_pts = [p[0] for p in points], [p[1] for p in points]
            dx, dy = [], []
            for col in range(8 - 1):
                for row in range(3):
                    dx.append(x_pts[8 * row + col + 1] - x_pts[8 * row + col])
                    dy.append(y_pts[8 * row + col + 1] - y_pts[8 * row + col])
            print("H: {}".format(dx))
            print("V: {}".format(dy))

            print("H: {}".format(horizontal_dist))
            print("H median: {}".format(horizontal_median))
            print("H stddev: {}".format(horizontal_std))
            print("")
            print("V: {}".format(vertical_dist))
            print("V median: {}".format(vertical_median))
            print("V stddev: {}".format(vertical_std))
            print("")

        if cv2.waitKey(1) & 0xFF == ord("q"):
            break
    camera.release()
    cv2.destroyAllWindows()
    pass
Ejemplo n.º 11
0
import RPi.GPIO as rp
from time import sleep
from argparse import ArgumentParser
import apriltag
from Methods import *
import threading
##############################
def map( x, in_min,in_max, out_min, out_max):
    maps = -((x - in_max)*(out_max-out_min)/(in_max-in_min)+out_min)
    return maps
##############################
parser = ArgumentParser(description='test apriltag Python bindings')
parser.add_argument('device_or_movie', metavar='INPUT', nargs='?', default=0,help='Movie to load or integer ID of camera device')
apriltag.add_arguments(parser)
options = parser.parse_args()
detector = apriltag.Detector(options,searchpath=apriltag._get_demo_searchpath())
##############################
kti = 10
ksa = 10
servo1 = 15
servo2 = 14
deltadt = 0.1
dt1 = 4
dt2 = 6
cv.namedWindow('window')
tagid = 7
thread1 = threading.Thread(target=play_action,args=('Soccer_Forward'))
##############################
rp.setmode(rp.BCM)
rp.setwarnings(False)
rp.setup(servo1,rp.OUT)
Ejemplo n.º 12
0
def main():

    '''Main function.'''

    parser = ArgumentParser(
        description='test apriltag Python bindings')

    # parser.add_argument('device_or_movie', metavar='INPUT', nargs='?', default=0,
    #                     help='Movie to load or integer ID of camera device')

    parser.add_argument('-k', '--camera-params', type=_camera_params,
                        default=None,
                        help='intrinsic parameters for camera (in the form fx,fy,cx,cy)')

    parser.add_argument('-s', '--tag-size', type=float,
                        default=1.0,
                        help='tag size in user-specified units (default=1.0)')

    add_arguments(parser)

    options = parser.parse_args()

    apriltag.add_arguments(parser)

    options = parser.parse_args()

    # try:
    #     cap = cv2.VideoCapture(int(options.device_or_movie))
    # except ValueError:
    #     cap = cv2.VideoCapture(options.device_or_movie)

    cap = cv2.VideoCapture(1)

    window = 'Camera'
    cv2.namedWindow(window)

    # set up a reasonable search path for the apriltag DLL inside the
    # github repo this file lives in;
    #
    # for "real" deployments, either install the DLL in the appropriate
    # system-wide library directory, or specify your own search paths
    # as needed.
    
    detector = apriltag.Detector(options,
                                 searchpath=apriltag._get_demo_searchpath())

    while True:

        success, frame = cap.read()
        if not success:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        detections, dimg = detector.detect(gray, return_image=True)

        num_detections = len(detections)
        print('Detected {} tags.\n'.format(num_detections))

        for i, detection in enumerate(detections):
            print( 'Detection {} of {}:'.format(i+1, num_detections))
            print()
            print(detection.tostring(indent=2))

            #if options.camera_params is not None:
                
            pose, e0, e1 = detector.detection_pose(detection,
                                              options.camera_params,
                                              options.tag_size)

            if _HAVE_CV2:
                _draw_pose(overlay,
                           options.camera_params,
                           options.tag_size,
                           pose)
            
            print(detection.tostring(
                collections.OrderedDict([('Pose',pose),
                                         ('InitError', e0),
                                         ('FinalError', e1)]),
                indent=2))
                
            print()

        overlay = frame // 2 + dimg[:, :, None] // 2

        cv2.imshow(window, overlay)
        k = cv2.waitKey(1)

        if k == 27:
            break
Ejemplo n.º 13
0
####################################
cv.createTrackbar('smt1', 'window', 0, 10, nothing)
cv.createTrackbar('smt2', 'window', 0, 10, nothing)
cv.createTrackbar('smt3', 'window', 0, 10, nothing)
cv.createTrackbar('smt4', 'window', 0, 10, nothing)
###/################################
parser = ArgumentParser(description='test apriltag Python bindings')
parser.add_argument('device_or_movie',
                    metavar='INPUT',
                    nargs='?',
                    default=0,
                    help='Movie to load or integer ID of camera device')
apriltag.add_arguments(parser)
options = parser.parse_args()
detector = apriltag.Detector(options,
                             searchpath=apriltag._get_demo_searchpath())
###/################################
while True:
    _, frame = webcam.read()
    mrkzy = int(len(frame) / 2)
    mrkzx = int(len(frame[0]) / 2)
    xframe = len(frame[0])
    yframe = len(frame)
    smt1 = cv.getTrackbarPos('smt1', 'window')
    smt2 = cv.getTrackbarPos('smt2', 'window')
    smt3 = cv.getTrackbarPos('smt3', 'window')
    smt4 = cv.getTrackbarPos('smt4', 'window')
    gray = cv.cvtColor(frame, cv.COLOR_RGB2GRAY)
    detections, dimg = detector.detect(gray, return_image=True)
    hilight = frame // 2 + dimg[:, :, None] // 2
Ejemplo n.º 14
0
    def locate_tag(camera, data):

        camera_matrix = np.array(data["camera_matrix"])
        dist_coeffs = np.array(data["dist_coeffs"])
        new_camera_matrix = np.array(data["new_camera_matrix"])
        transformation_matrix = np.array(data["transformation_matrix"])

        detector = apriltag.Detector(
            searchpath=apriltag._get_demo_searchpath())
        frame = []
        gray = []
        while True:
            if not camera.isOpened():
                print("Failed to open camera")
                exit(0)
            ret, frame = camera.read()
            cv2.imshow("april_tag", frame)
            # take a picture and get detections
            k = cv2.waitKey(1)
            if k % 256 == 32:
                print("space has been pressed")
                frame = get_image(camera)
                gray = cvtColor(frame, COLOR_BGR2GRAY)
                cv2.imshow("april_tag", frame)
                # dst = cv2.undistort(frame, camera_matrix,
                #                     dist_coeffs, None, new_camera_matrix)
                # gray = cvtColor(dst, COLOR_BGR2GRAY)
                detections, det_image = detector.detect(gray,
                                                        return_image=True)

                if len(detections) == 0:
                    print("no tag found")
                    continue
                print("Found " + str(len(detections)) + " apriltags")

                for d in detections:
                    tag_x, tag_y = d.center
                    top_left = d.corners[0]
                    top_right = d.corners[1]
                    bottom_right = d.corners[2]
                    bottom_left = d.corners[3]
                    # TODO draw tag - might be better to generalize, because
                    # locate_cameras does this too.
                    #      tag_x, tag_y = d.center
                    # print("Tag {} found at ({},{})".format(d.tag_id, tag_x, tag_y))
                    # # cv2.circle(frame, d.center, 5, BLUE)
                    # cv2.circle(frame, (int(tag_x), int(tag_y)), 5, (0, 0, 255))

                    # (x, y, z, angle) = compute_tag_undistorted_pose(
                    #     camera_matrix, dist_coeffs, transformation_matrix, d, TAG_SIZE)

                    # Scale the coordinates, and print for debugging
                    # prints Device ID :: tag id :: x y z angle
                    # TODO debug offset method - is better, but not perfect.
                    # x = MULT_FACTOR * (x)
                    # y = MULT_FACTOR * (y)
                    # print(tag_xyz)
                    print("Tag {} found at ({},{})".format(
                        d.tag_id, tag_x, tag_y))

                    cv2.circle(frame, (int(tag_x), int(tag_y)), 5, (0, 0, 255))

                    # angle = math.radians(360 - angle)
                    # cv2.line(
                    # frame, (int(x + 800), int(y)), (int(x + 800 + 50 *
                    #                                     math.cos(angle)), int(y + 50*math.sin(angle))), (255, 0, 0), 5)
                    # cv2.line(
                    #     frame, (int(x + 800), int(y)), (int(x))
                    # )
                cv2.imshow("april_tag", frame)

            if k % 256 == 27:
                break
Ejemplo n.º 15
0
 def __init__(self):
     self.detector = None
     if 'apriltag' in sys.modules:
         self.detector = apriltag.Detector(
             searchpath=apriltag._get_demo_searchpath())
Ejemplo n.º 16
0
def main():
    transmitCntr=0
    hostname = socket.gethostname()
    ip_address = socket.gethostbyname(hostname)
    HOST = '192.168.1.78'  # The server's hostname or IP address # This is the server
    PORT = 65432       # The port used by the server
    print(ip_address)
    serverSocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
    serverSocket.bind((HOST,PORT))
    serverSocket.listen()
    
    robotClient,addr=serverSocket.accept()
    msg=robotClient.recv(1024).decode('ascii')

    if(msg=='RobotConnected'):
        print('Starting Localization for 1 robot')

        parser = ArgumentParser(
            description='test apriltag Python bindings')

        parser.add_argument('device_or_movie', metavar='INPUT', nargs='?', default=0,
                            help='Movie to load or integer ID of camera device')
        orient = 0
        apriltag.add_arguments(parser)

        options = parser.parse_args()

        try:
            cap = cv2.VideoCapture(0)
            cap.set(cv2.CAP_PROP_FRAME_WIDTH, 2560)
            cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1440)
        except ValueError:
            cap = cv2.VideoCapture(options.device_or_movie)

        window = 'Camera'
        window2 = 'Overlay1'
        cv2.namedWindow(window)
        cv2.namedWindow(window2)

        detector = apriltag.Detector(options,
                                    searchpath=apriltag._get_demo_searchpath())

        font                   = cv2.FONT_HERSHEY_SIMPLEX
        fontScale              = 0.3
        fontColor              = (0,0,255)
        lineType               = 1
        print(type(fontColor))
        while True:
            
            
            endpt=[0,0]
            strtPt=[0,0]
            strtFlag=False
            endptFlag=False
            success, frame = cap.read()
            if not success:
                break

            gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
            detections, dimg = detector.detect(gray, return_image=True)
            headDir=np.array([0,0])
            num_detections = len(detections)
            #print('Detected {} tags.\n'.format(num_detections))
            #dimg=np.zeros
        
            dimg1=dimg

            for i, detection in enumerate(detections):
                dimg1 = draw(frame,detection.corners)
                center=detection.center
                centerTxt=((center.ravel()).astype(int)).astype(str)
                if detection.tag_id==6:
                    strtPt=center
                    strtFlag=True
                    headDir=headingDir(detection.corners,center)
                if detection.tag_id==9:
                    dimg1=cv2.putText(dimg1,'End Point', tuple((center.ravel()).astype(int)),font,0.8,fontColor,2)
                    dimg1 = cv2.circle(dimg1, tuple((center.ravel()).astype(int)),30, (0,128,255), 2) 
                    endpt=center
                    endptFlag=True
                else:
                    cv2.putText(dimg1,'Id:'+str(detection.tag_id), tuple((center.ravel()).astype(int)),font,0.8,(0,0,0),2)
                    botDir=headingDir(detection.corners,center)
                    dimg1=draw1(dimg1,botDir,center,(0,0,255))
                cv2.putText(dimg1,'('+centerTxt[0]+','+centerTxt[1]+')', tuple((center.ravel()).astype(int)+10),font,fontScale,(255,0,0),lineType)
            if num_detections >0:
                if endptFlag and strtFlag:
                    orient=getTheta(strtPt,endpt,strtPt,headDir)
                    dimg1=cv2.putText(dimg1,'T:'+str(int(orient)), tuple((strtPt.ravel()).astype(int)+50),font,0.8,(0,0,0),2)
                    dimg1=draw1ine(dimg1,strtPt,endpt,(255,0,255))
                overlay=dimg1
            else: 
                overlay = frame

        # Change the following line to get back the connection part.
             
            transmitCntr=0
            sndString=str(strtPt[0])+' '+str(strtPt[1])+' '+str(headDir[0])+' '+str(headDir[1])+' '+str(endptFlag)+' '+str(endpt[0])+' '+str(endpt[1])
            
            if robotClient.recv(2048).decode('ascii')=='ok':
                robotClient.send(CalTheta(sndString).encode('ascii'))

        


            cv2.imshow(window, overlay)
Ejemplo n.º 17
0
def tracker(qlist, ids, goals, loc, obs, ob_ids, debug=False):

    parser = ArgumentParser(description='test apriltag Python')

    parser.add_argument('device_or_movie',
                        metavar='INPUT',
                        nargs='?',
                        default=1)

    apriltag.add_arguments(parser)

    options = parser.parse_args()

    cap = cv2.VideoCapture(2)

    # window = 'Camera'
    win2 = "bigger_image"
    cv2.namedWindow(win2)
    # cv2.namedWindow(window)

    dic = {}
    dic2 = {}
    detector = apriltag.Detector(options,
                                 searchpath=apriltag._get_demo_searchpath())
    got_detect = {}
    got_detect2 = {}
    while True:
        # print("visual man loc", loc)
        success, frame = cap.read()
        if not success:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        r, c = gray.shape

        detections, dimg = detector.detect(gray, return_image=True)

        num_detections = len(detections)
        if debug:
            print(c, r)
            print('Detected {} tags.\n'.format(num_detections))

        for i in ids:
            got_detect[i] = False

        for i, detection in enumerate(detections):
            # print('Detection {} of {}:'.format(i+1, num_detections))
            # print()
            # print(detection.tostring(indent=2))
            # print(pose)

            p1 = detection.corners[0]
            p2 = detection.corners[1]
            location = detection.center
            tid = detection.tag_id
            # print("detection_id is", tid)
            got_detect[tid] = True
            orient = orientation(p1, p2, r, c)

            p = detection.center

            x, y = p[0], p[1]
            x, y = change_grid((x, y), r, c)
            x, y = x * scale_x, y * scale_y

            if debug:
                print(p1, p2)
                print(detection.corners)
                print("center is", p)
                print("x and y", x, y, "orientation:",
                      degrees(orientation(p1, p2, r, c)), "tid is", tid)

            if tid in ids:
                # print("found id",tid)
                dic[tid] = (x, y, orient, 1)
                # loc.value[tid] = (x, y, orient,1)
                # print("x and y and orient", x,y,orient)
                got_detect[tid] = True
                # print("loc.value", loc.value, tid)
                if debug:
                    print("stored in", tid)

            if tid in ob_ids:
                # print("**************************")
                # print("Found obstacle {}".format(tid))
                # print("**************************")
                p = detection.center
                x, y = p[0], p[1]
                x, y = change_grid((x, y), r, c)
                x, y = x * scale_x, y * scale_y
                dic2[tid] = (x, y)
                got_detect2[tid] = True

            # print(theta_x, theta_y,theta_z)

        for i in ids:
            if not got_detect[i]:
                dic[i] = (goals[i][0], goals[i][1], 0, 0)
                # loc.value[i] = ((goals[i][0],goals[i][1],0))
        default = {
            12: (200, -210),
            19: (
                -320,
                10,
            )
        }
        for i in ob_ids:
            if not got_detect2[i]:
                dic2[i] = default[i]
                # loc.value[i] = ((goals[i][0],goals[i][1],0))

        loc.value = dic
        obs.value = dic2
        # print("loc.value", loc.value)

        overlay = frame // 2 + dimg[:, :, None] // 2

        # cv2.imshow(window, overlay)
        scale_percent_r = 160
        scale_percent_c = 150
        width = int(c * scale_percent_c / 100)
        height = int(r * scale_percent_r / 100)
        dim = (width, height)
        resized = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
        cv2.imshow(win2, resized)
        k = cv2.waitKey(1)

        if k == 27:
            break
Ejemplo n.º 18
0
def main():
    args = get_args()
    BOARD_TAG_SIZE = args["board"]
    ORIGIN_TAG_SIZE = args["origin"]
    calib_file_name = args["file"]

    # offsets to reposition where (0,0) is
    x_offset = 0
    y_offset = 0

    camera = VideoCapture(0)  # Open the camera and set camera params
    if not VideoCapture.isOpened(camera):
        print("Failed to open video capture device")
        exit(0)
    camera.set(CAP_PROP_FRAME_WIDTH, 1280)
    camera.set(CAP_PROP_FRAME_HEIGHT, 720)
    camera.set(CAP_PROP_FPS, 30)

    # Get matrices from file
    calib_file, camera_matrix, dist_coeffs = get_matrices_from_file(
        calib_file_name)
    calib_file.close()
    print("Read from calibration file")
    print("CAMERA MATRIX: {}".format(camera_matrix))
    print("DIST COEFFS: {}".format(dist_coeffs))
    print()
    # Initialize the detector
    detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath())
    frame = []
    gray = []
    img_points = np.ndarray((4 * NUM_DETECTIONS, 2))
    obj_points = np.ndarray((4 * NUM_DETECTIONS, 3))
    detections = []

    print(
        "The program will now attempt to detect the 4 tags on the axis calibration board"
    )
    print(
        "The four tags should have a red circle on their centers if detected properly."
    )
    print(
        "There will also be a blue circle in the middle of the 4 tags if 4 are detected."
    )
    print("Align the blue dot with the middle of the screen.")
    print("Then, press SPACE.")
    while True:
        frame = get_image(camera)  # take a new picture

        # For weird reasons, anti-distortion measures WORSENED the problem,
        # so they have been removed. If you need to put them back,
        # this is the place for it.

        # Convert undistorted image to grayscale
        gray = cvtColor(frame, COLOR_BGR2GRAY)

        # Use the detector and compute useful values from it
        detections, det_image = detector.detect(gray, return_image=True)

        x_offset = 0
        y_offset = 0

        for i in range(len(detections)):
            d = detections[i]
            id = int(d.tag_id)

            # Add to offsets
            (ctr_x, ctr_y) = d.center
            x_offset += ctr_x
            y_offset += ctr_y

            # Draw onto the frame
            cv2.circle(frame, (int(ctr_x), int(ctr_y)), 5, (0, 0, 255), 3)

        # Draw origin
        if len(detections) == 4:
            cv2.circle(frame, (int(x_offset / 4), int(y_offset / 4)), 5,
                       (255, 0, 0), 3)
        imshow("Calibration board", frame)
        if cv2.waitKey(1) & 0xFF == ord(" "):
            break
        else:
            continue
    cv2.destroyAllWindows()

    # Compute transformation via PnP
    # TODO What's the reasoning from this math?
    # This was from a tutorial somehwhere and was directly
    # transcribed from the C++ system.
    for d in detections:
        id = int(d.tag_id)
        img_points[0 + 4 * id] = d.corners[0]
        img_points[1 + 4 * id] = d.corners[1]
        img_points[2 + 4 * id] = d.corners[2]
        img_points[3 + 4 * id] = d.corners[3]
        a = (id % 2) * 2 + 1
        b = -((id / 2) * 2 - 1)
        # 8.5 and 11 are letter paper dimensions!
        x1 = -0.5 * BOARD_TAG_SIZE + a * 8.5 * 0.5
        x2 = 0.5 * BOARD_TAG_SIZE + a * 8.5 * 0.5
        y1 = -0.5 * BOARD_TAG_SIZE + b * 11 * 0.5
        y2 = 0.5 * BOARD_TAG_SIZE + b * 11 * 0.5
        obj_points[0 + 4 * id] = (x1, y1, 0.0)
        obj_points[1 + 4 * id] = (x2, y1, 0.0)
        obj_points[2 + 4 * id] = (x2, y2, 0.0)
        obj_points[3 + 4 * id] = (x1, y2, 0.0)

    # Make transform matrices
    ret, rvec, tvec = solvePnP(obj_points, img_points, camera_matrix,
                               dist_coeffs)
    dst, jac = Rodrigues(rvec)

    # Make origin to camera matrix
    """
    The origin_to_camera matrix looks like this:

    dst[0,0] dst[0,1] dst[0,2] tvec[0,0]
    dst[1,0] dst[1,1] dst[1,2] tvec[1,0]
    dst[2,0] dst[2,1] dst[2,2] tvec[2,0]
    0           0       0       1
    """
    temp = np.append(dst, tvec, axis=1)
    temp = np.append(temp, np.array([[0, 0, 0, 1]]), axis=0)
    origin_to_camera = np.asmatrix(temp)
    camera_to_origin = np.linalg.inv(origin_to_camera)
    print("CAMERA TO ORIGIN: {}".format(camera_to_origin))

    # Generate the location of the camera
    # Seems to use a homogenous coordinates system (x,y,z,k)
    gen_out = np.array([0, 0, 0, 1]).T
    camera_coordinates = np.matmul(camera_to_origin, gen_out)
    print("CAMERA COORDINATES: {}".format(camera_coordinates))

    # write matrix to file
    calib_file = open(calib_file_name, "a")
    rows, cols = np.shape(camera_to_origin)
    calib_file.write("transform_matrix =")
    write_matrix_to_file(camera_to_origin, calib_file)

    # Compute offsets via new calibration process
    print("Axis calibration was successful!")
    print("We will now center the camera. Place any apriltag where you would \
        like (0,0) to be.")
    print("A blue dot will appear in the center of the tag you placed to help \
        show where (0,0) will be set to.")
    print("When you have your tag in the right place, press SPACE.")

    while True:
        # Locate tag for use as origin
        frame = get_image(camera)
        gray = cvtColor(frame, COLOR_BGR2GRAY)
        detections, det_image = detector.detect(gray, return_image=True)

        if len(detections) == 0:
            continue
        (x_offset, y_offset, _,
         _) = compute_tag_undistorted_pose(camera_matrix, dist_coeffs,
                                           camera_to_origin, detections[0],
                                           ORIGIN_TAG_SIZE)
        cv2.circle(frame, (int(x_offset), int(y_offset)), 5, (255, 0, 0), 3)

        imshow("Origin tag", frame)
        if cv2.waitKey(1) & 0xFF == ord(" "):
            break
        else:
            continue

    # Write offsets
    calib_file.write("offsets = ")
    calib_file.write(str(-1 * x_offset))
    calib_file.write(" ")
    calib_file.write(str(-1 * y_offset))
    calib_file.write("\n")

    calib_file.close()
    print("Finished writing transformation matrix to calibration file")
    pass
Ejemplo n.º 19
0
def main():
    # DEBUGGING AND TIMING VARIABLES
    past_time = -1  # time to start counting. Set just before first picture taken
    num_frames = 0  # number of frames processed

    args = get_args()
    url = args['url']
    SEND_DATA = (args['url'] != None)
    calib_file_name = args['file']
    TAG_SIZE = args['size']
    calib_file = open(calib_file_name)

    camera = VideoCapture(DEVICE_ID)  # Open the camera & set camera arams
    if (not VideoCapture.isOpened(camera)):
        print("Failed to open video capture device")
        exit(0)
    camera.set(CAP_PROP_FRAME_WIDTH, 1280)
    camera.set(CAP_PROP_FRAME_HEIGHT, 720)
    camera.set(CAP_PROP_FPS, 30)

    # Get matrices from calibration file
    print("Parsing calibration file " + calib_file_name + "...")
    calib_file, camera_matrix, dist_coeffs = get_matrices_from_file(
        calib_file_name)
    transform_matrix = get_transform_matrix(calib_file)
    x_offset, y_offset = get_offsets_from_file(calib_file)
    calib_file.close()

    assert (camera_matrix.shape == (3, 3))
    assert (dist_coeffs.shape == (1, 5))
    assert (transform_matrix.shape == (4, 4))

    # print("TRANSFORM MATRIX:\n {}\n".format(transform_matrix))
    print("Calibration file parsed successfully.")
    print("Initializing apriltag detector...")

    # make the detector
    detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath())
    frame = []
    gray = []

    print("Detector initialized")
    print("")
    print("The program will begin sending data in 3 seconds.")
    print("Press CTRL+C to stop this program.")
    time.sleep(3)
    print("Starting detection")

    img_points = np.ndarray((4, 2))  # 4 2D points
    obj_points = np.ndarray((4, 3))  # 4 3D points
    detections = []
    past_time = time.time()
    while True:
        if not camera.isOpened():
            print("Failed to open camera")
            exit(0)

        # take a picture and get detections
        frame = get_image(camera)
        gray = cvtColor(frame, COLOR_BGR2GRAY)

        # Un-distorting an image worsened distortion effects
        # Uncomment this if needed
        # dst = undistort_image(frame, camera_matrix, dist_coeffs)
        # gray = cvtColor(dst, COLOR_BGR2GRAY)
        detections, det_image = detector.detect(gray, return_image=True)
        if len(detections) == 0:
            continue  # Try again if we don't get anything

        print("Found " + str(len(detections)) + " apriltags")

        for d in detections:
            # TODO draw tag - might be better to generalize, because
            # locate_cameras does this too.

            (x, y, z, angle) = compute_tag_undistorted_pose(
                camera_matrix, dist_coeffs, transform_matrix, d, TAG_SIZE)

            # Scale the coordinates, and print for debugging
            # prints Device ID :: tag id :: x y z angle
            # TODO debug offset method - is better, but not perfect.
            x = MULT_FACTOR * (x + x_offset)
            y = MULT_FACTOR * (y + y_offset)
            # print(tag_xyz)
            print("{} :: {} :: {} {} {} {}".format(
                DEVICE_ID, d.tag_id, x, y, z, angle))

            # Send the data to the URL specified.
            # This is usually a URL to the base station.
            if SEND_DATA:
                payload = {
                    "id": d.tag_id,
                    "x": x,
                    "y": y,
                    "orientation": angle
                }
                r = requests.post(url, json=payload)
                status_code = r.status_code
                if status_code / 100 != 2:
                    # Status codes not starting in '2' are usually error codes.
                    print("WARNING: Basestation returned status code {}".format(
                        status_code))
                else:
                    num_frames += 1
                    print("Vision FPS (Vision System outflow): {}".format(
                        num_frames / (time.time() - past_time)))
    pass
Ejemplo n.º 20
0
def main():
	global target_x
	global target_y
	global target_angle
	global turning_radius

	DIM = (800, 600)

	# USB Camera Parameters
	K=np.array([[646.986342788419, 0.0, 383.9135552285603], [0.0, 647.4588452248628, 315.82293891405163], [0.0, 0.0, 1.0]])
	D=np.array([[0.40302428743352114], [0.10116712172043976], [0.6206370706341585], [-4.18627051202362]])
	camera_params = (646.986342788419,647.4588452248628,383.9135552285603,315.82293891405163)	

	video_capture = cv2.VideoCapture(0)
	video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT,600)
	video_capture.set(cv2.CAP_PROP_FRAME_WIDTH,800) 
	map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, DIM, cv2.CV_16SC2)
	detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath())
	tag_size = 160

	target_x = np.int(input("key in target x coordinates "))
	target_y = np.int(input("key in target y coordinates "))
	target_angle = np.float(input("key in target angle (in degrees from -180 to 180) "))
	turning_radius = np.int(input("key in turning radius (in pixels) "))
	print("target point is "+"("+str(target_x)+","+str(target_y)+")")
	print("traget angle is "+str(target_angle))
	print("turning radius is "+str(turning_radius))
	
	cv2.namedWindow('image')
	cv2.setMouseCallback('image', set_target)
	
	#target_right = (np.int(target_x +math.cos(math.radians(target_angle))*turning_radius),np.int(target_y +math.sin(math.radians(target_angle))*turning_radius))
	#target_left  = (np.int(target_x -math.cos(math.radians(target_angle))*turning_radius),np.int(target_y -math.sin(math.radians(target_angle))*turning_radius))
	while (True):
		_, frame = video_capture.read()
		gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
		gray_frame = cv2.remap(gray_frame, map1, map2, interpolation = cv2.INTER_LINEAR,borderMode=cv2.BORDER_CONSTANT)
		result = detector.detect(gray_frame)
		
		target_right = (np.int(target_x +math.cos(math.radians(target_angle))*turning_radius),np.int(target_y +math.sin(math.radians(target_angle))*turning_radius))
		target_left  = (np.int(target_x -math.cos(math.radians(target_angle))*turning_radius),np.int(target_y -math.sin(math.radians(target_angle))*turning_radius))
		cv2.circle(gray_frame,target_right,turning_radius,(255,255,255),-1)	
		cv2.circle(gray_frame,target_left,turning_radius,(0,0,0),-1)	
		
		if (result): 
			start_point = result[0].center.astype(int)
			start_angle = math.degrees(math.atan2(result[0].corners[1][1]-result[0].corners[0][1],result[0].corners[1][0]-result[0].corners[0][0]))
			curr_right = (np.int(start_point[0]+math.cos(math.radians(start_angle))*turning_radius),np.int(start_point[1]+math.sin(math.radians(start_angle))*turning_radius))
			curr_left = (np.int(start_point[0]-math.cos(math.radians(start_angle))*turning_radius),np.int(start_point[1]-math.sin(math.radians(start_angle))*turning_radius))
			cv2.circle(gray_frame,curr_right,turning_radius,(255,255,255),-1)	
			cv2.circle(gray_frame,curr_left,turning_radius,(0,0,0),-1)	

			dist = np.array([	np.linalg.norm(np.array(target_right)-np.array(curr_right)),
								np.linalg.norm(np.array(target_right)-np.array(curr_left)),
								np.linalg.norm(np.array(target_left)-np.array(curr_right)),
								np.linalg.norm(np.array(target_left)-np.array(curr_left))])
			dist[dist<=2*turning_radius] = 100000
			index = np.argmin(dist)
			if index == 0:
				dest_angle = math.degrees(math.atan2(target_right[0]-curr_right[0],target_right[1]-curr_right[1]))
				dest_angle = (180-dest_angle) if dest_angle>0 else -180-dest_angle
				pt1 = (np.int(curr_right[0]-math.cos(math.radians(dest_angle))*turning_radius),np.int(curr_right[1]-math.sin(math.radians(dest_angle))*turning_radius))
				pt2 = (np.int(target_right[0]-math.cos(math.radians(dest_angle))*turning_radius),np.int(target_right[1]-math.sin(math.radians(dest_angle))*turning_radius))
				cv2.line(gray_frame,pt1,pt2,(0,0,0),5) 
				cv2.line(gray_frame,target_right,curr_right,(0,0,0),5) 
			elif index == 1:
				dest_angle = math.degrees(math.atan2(target_right[0]-curr_left[0],target_right[1]-curr_left[1]))
				dest_angle = (180-dest_angle) if dest_angle>0 else -180-dest_angle
				dest_angle = dest_angle + math.degrees(math.acos(turning_radius/(dist[1]/2)))+90
				pt1 = (np.int(curr_left[0]-math.cos(math.radians(dest_angle))*turning_radius),np.int(curr_left[1]-math.sin(math.radians(dest_angle))*turning_radius))
				pt2 = (np.int(target_right[0]+math.cos(math.radians(dest_angle))*turning_radius),np.int(target_right[1]+math.sin(math.radians(dest_angle))*turning_radius))
				cv2.line(gray_frame,pt1,pt2,(255,255,255),5) 
				cv2.line(gray_frame,target_right,curr_left,(0,0,0),5)
			elif index == 2:
				dest_angle = math.degrees(math.atan2(target_left[0]-curr_right[0],target_left[1]-curr_right[1]))
				dest_angle = (180-dest_angle) if dest_angle>0 else -180-dest_angle
				dest_angle = dest_angle - math.degrees(math.acos(turning_radius/(dist[2]/2)))+ 90
				pt1 = (np.int(curr_right[0]-math.cos(math.radians(dest_angle))*turning_radius),np.int(curr_right[1]-math.sin(math.radians(dest_angle))*turning_radius))
				pt2 = (np.int(target_left[0]+math.cos(math.radians(dest_angle))*turning_radius),np.int(target_left[1]+math.sin(math.radians(dest_angle))*turning_radius))
				cv2.line(gray_frame,pt1,pt2,(255,255,255),5) 
				cv2.line(gray_frame,target_left,curr_right,(0,0,0),5)
			else:
				dest_angle = math.degrees(math.atan2(target_left[0]-curr_left[0],target_left[1]-curr_left[1]))
				dest_angle = (180-dest_angle) if dest_angle>0 else -180-dest_angle
				pt1 = (np.int(curr_left[0]+math.cos(math.radians(dest_angle))*turning_radius),np.int(curr_left[1]+math.sin(math.radians(dest_angle))*turning_radius))
				pt2 = (np.int(target_left[0]+math.cos(math.radians(dest_angle))*turning_radius),np.int(target_left[1]+math.sin(math.radians(dest_angle))*turning_radius))
				cv2.line(gray_frame,pt1,pt2,(0,0,0),5) 
				cv2.line(gray_frame,target_left,curr_left,(0,0,0),5)

		cv2.imshow("image",gray_frame)
		key = cv2.waitKey(1)&0xFF
		if key == ord('q'):
			break	
		elif key == ord('w'):
			if turning_radius < 100:
				turning_radius+=1
		elif key == ord('s'):
			if turning_radius > 10:
				turning_radius-=1
		elif key == ord('a'):
			target_angle -= 5
			if target_angle <= -180:
				target_angle = 180
		elif key == ord('d'):
			target_angle += 5
			if target_angle >= 180:
				target_angle = -180
	video_capture.release()
	cv2.destroyAllWindows()
Ejemplo n.º 21
0
def Main():
    reference_tags = [0, 1, 2]
    x_distance = 95  # 2.1844
    y_distance = 67.5  # 1.1938
    global odoData
    global ref_x
    global ref_y
    global orig
    global rotation_matrix
    global transform_matrix
    print(odoData)
    odoData1 = {'robot1': 170, 'robot2': 650}
    window = 'Overlay1'

    cv2.namedWindow(window)

    endpt = [0, 0]

    endptFlag = True
    HOST = '192.168.1.78'  # The server's hostname or IP address
    PORT = 12346
    SERVER_SOCKET = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    SERVER_SOCKET.bind((HOST, PORT))
    print('Socket Bound to Port ', PORT)
    SERVER_SOCKET.listen(5)

    # Starts a thread that looks for connections form the swarmbots

    connection_thread = threading.Thread(target=find_connections,
            args=(SERVER_SOCKET, ))
    connection_thread.start()

    # Localization Code Here

    parser = ArgumentParser(description='test apriltag Python bindings')
    parser.add_argument('device_or_movie', metavar='INPUT', nargs='?',
                        default=0,
                        help='Movie to load or integer ID of camera device'
                        )
    orient = 0
    apriltag.add_arguments(parser)
    options = parser.parse_args()
    try:
        try:
            cap = cv2.VideoCapture(0)
            cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
            cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
        except ValueError:
            cap = cv2.VideoCapture(options.device_or_movie)

        detector = apriltag.Detector(options,
                searchpath=apriltag._get_demo_searchpath())
        font = cv2.FONT_HERSHEY_SIMPLEX
        fontScale = 0.3
        fontColor = (0, 0, 255)
        lineType = 1
        while True:
            odoData1.clear()
            (success, frame) = cap.read()
            if not success:
                break
            gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
            (detections, dimg) = detector.detect(gray,
                    return_image=True)
            headDir = np.array([0, 0])
            num_detections = len(detections)
            dimg1 = dimg

            try:

                # Gets the x and y positions of the reference tags

                ref_x = detections[reference_tags[1]].center
                ref_y = detections[reference_tags[2]].center
                orig = detections[reference_tags[0]].center
            except IndexError:
                continue

            # Creates the transform matrix for tags (units are inches)

            transform_matrix.append(np.abs(x_distance)
                                    / np.abs(ref_x[0] - orig[0]))
            transform_matrix.append(np.abs(y_distance) / np.abs(orig[1]
                                    - ref_y[1]))

            # Creates the rotaion matrix for the reference frame

            rotation_matrix = np.array([[0, 1], [-1, 0]])

            for (i, detection) in enumerate(detections):
                dimg1 = draw(frame, detection.corners)
                center = detection.center

                # Gets the center of the tag in inches and rotated accordingly

                center_meters = transform(center)

                # Labels are the position in inches

                posString = '({x:.2f},{y:.2f})'.format(x=center_meters[0],y=center_meters[1])
                if not detection.tag_id in reference_tags:

                    # Gets the forward direction

                    (forwardDir, angle) = headingDir(detection.corners,
                            center)

                    # Converts the forward direction to meters

                    forwardDir_meters = transform(forwardDir)

                    # Only draws arrows on tags that are not references

                    dimg1 = draw1(dimg1, forwardDir, center, (0, 0,
                                  255))
                centerTxt = center.ravel().astype(int).astype(str)
                cv2.putText(dimg1,posString,tuple(center.ravel().astype(int) + 10),font,fontScale,(255, 0, 0),lineType,)

                if detection.tag_id == int(endPtID):
                    dimg1 = cv2.putText(dimg1,'End Point',tuple(center.ravel().astype(int)),font,0.8,fontColor,2,)
                    dimg1 = cv2.circle(dimg1,tuple(center.ravel().astype(int)), 30, (0,128, 255), 2)
                else:
                    cv2.putText(dimg1,'Id:' + str(detection.tag_id),tuple(center.ravel().astype(int)),font,0.8,(0, 0, 0),2,)

                overlay = dimg1
                if not detection.tag_id in reference_tags:

                    # Gets the center position, heading vector and current angle of all none reference tags

                    odoData1[str(detection.tag_id)] = \
                        [tuple(center_meters)
                         + tuple(forwardDir_meters), angle]
                else:

                    # Only gets the center of reference tags

                    odoData1[str(detection.tag_id)] = \
                        [tuple(center_meters)]
            if len(detections) == 0 and odoData1 == 0:
                overlay = frame
                odoData = odoData1.copy()
            elif odoData1:

                odoData = odoData1.copy()

            # print('frame')

            cv2.imshow(window, overlay)
            cv2.waitKey(1)
    except KeyboardInterrupt:

    # Change the following line to get back the connection part.

        # sndString=str(strtPt[0])+' '+str(strtPt[1])+' '+str(headDir[0])+' '+str(headDir[1])+' '+str(endptFlag)+' '+str(endpt[0])+' '+str(endpt[1])

        # k = cv2.waitKey(1)

        return
Ejemplo n.º 22
0
def main():
    global target_x
    global target_y
    global target_angle
    global turning_radius

    # ROS Publisher Init
    pub = rospy.Publisher('watch_tower_control',
                          watch_tower_control,
                          queue_size=1)
    rospy.init_node('custom_talker', anonymous=True)
    control_signal = watch_tower_control()

    DIM = (800, 600)

    # USB Camera Parameters
    K = np.array([[646.986342788419, 0.0, 383.9135552285603],
                  [0.0, 647.4588452248628, 315.82293891405163],
                  [0.0, 0.0, 1.0]])
    D = np.array([[0.40302428743352114], [0.10116712172043976],
                  [0.6206370706341585], [-4.18627051202362]])
    camera_params = (646.986342788419, 647.4588452248628, 383.9135552285603,
                     315.82293891405163)

    video_capture = cv2.VideoCapture(0)
    video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)
    video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
    map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, DIM,
                                                     cv2.CV_16SC2)
    detector = apriltag.Detector(searchpath=apriltag._get_demo_searchpath())
    tag_size = 160

    target_x = 300
    target_y = 200
    target_angle = 0
    turning_radius = 30
    print("target point is " + "(" + str(target_x) + "," + str(target_y) + ")")
    print("traget angle is " + str(target_angle))
    print("turning radius is " + str(turning_radius))

    cv2.namedWindow('image')
    cv2.setMouseCallback('image', set_target)

    while (True):
        _, frame = video_capture.read()
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray_frame = cv2.remap(gray_frame,
                               map1,
                               map2,
                               interpolation=cv2.INTER_LINEAR,
                               borderMode=cv2.BORDER_CONSTANT)
        result = detector.detect(gray_frame)

        target_right = (
            np.int(target_x +
                   math.cos(math.radians(target_angle)) * turning_radius),
            np.int(target_y +
                   math.sin(math.radians(target_angle)) * turning_radius))
        target_left = (
            np.int(target_x -
                   math.cos(math.radians(target_angle)) * turning_radius),
            np.int(target_y -
                   math.sin(math.radians(target_angle)) * turning_radius))
        cv2.circle(gray_frame, target_right, turning_radius, (255, 255, 255),
                   -1)
        cv2.circle(gray_frame, target_left, turning_radius, (0, 0, 0), -1)

        if (result):
            start_point = result[0].center.astype(int)
            start_angle = math.degrees(
                math.atan2(result[0].corners[1][1] - result[0].corners[0][1],
                           result[0].corners[1][0] - result[0].corners[0][0]))
            curr_right = (
                np.int(start_point[0] +
                       math.cos(math.radians(start_angle)) * turning_radius),
                np.int(start_point[1] +
                       math.sin(math.radians(start_angle)) * turning_radius))
            curr_left = (
                np.int(start_point[0] -
                       math.cos(math.radians(start_angle)) * turning_radius),
                np.int(start_point[1] -
                       math.sin(math.radians(start_angle)) * turning_radius))
            cv2.circle(gray_frame, curr_right, turning_radius, (255, 255, 255),
                       -1)
            cv2.circle(gray_frame, curr_left, turning_radius, (0, 0, 0), -1)

            dist = np.array([
                np.linalg.norm(np.array(target_right) - np.array(curr_right)),
                np.linalg.norm(np.array(target_right) - np.array(curr_left)),
                np.linalg.norm(np.array(target_left) - np.array(curr_right)),
                np.linalg.norm(np.array(target_left) - np.array(curr_left))
            ])
            dist[dist <= 2 * turning_radius] = 100000
            index = np.argmin(dist)
            if np.linalg.norm(
                    np.array([target_x, target_y]) -
                    np.array(start_point)) < 10:
                control_signal.control = [0, speed_center]
            else:
                control_signal.control = [0, max_speed]
            time.sleep(0.01)

            if index == 0:
                dest_angle = math.degrees(
                    math.atan2(target_right[0] - curr_right[0],
                               target_right[1] - curr_right[1]))
                dest_angle = (
                    180 - dest_angle) if dest_angle > 0 else -180 - dest_angle
                pt1 = (np.int(curr_right[0] -
                              math.cos(math.radians(dest_angle)) *
                              turning_radius),
                       np.int(curr_right[1] -
                              math.sin(math.radians(dest_angle)) *
                              turning_radius))
                pt2 = (np.int(target_right[0] -
                              math.cos(math.radians(dest_angle)) *
                              turning_radius),
                       np.int(target_right[1] -
                              math.sin(math.radians(dest_angle)) *
                              turning_radius))
                temp_angle = -math.degrees(
                    math.atan2(pt1[0] - pt2[0], pt1[1] - pt2[1]))
                cv2.line(gray_frame, pt1, pt2, (0, 0, 0), 5)
                cv2.line(gray_frame, target_right, curr_right, (0, 0, 0), 5)
                if (abs(start_angle - temp_angle) < 10):
                    control_signal.control[0] = center
                    #pwm.set_pwm(1, 0, center)
                else:
                    control_signal.control[0] = left_max
                    #pwm.set_pwm(1, 0, left_max)
                time.sleep(0.01)
            elif index == 1:
                dest_angle = math.degrees(
                    math.atan2(target_right[0] - curr_left[0],
                               target_right[1] - curr_left[1]))
                dest_angle = (
                    180 - dest_angle) if dest_angle > 0 else -180 - dest_angle
                dest_angle = dest_angle + math.degrees(
                    math.acos(turning_radius / (dist[1] / 2))) + 90
                pt1 = (np.int(curr_left[0] -
                              math.cos(math.radians(dest_angle)) *
                              turning_radius),
                       np.int(curr_left[1] -
                              math.sin(math.radians(dest_angle)) *
                              turning_radius))
                pt2 = (np.int(target_right[0] +
                              math.cos(math.radians(dest_angle)) *
                              turning_radius),
                       np.int(target_right[1] +
                              math.sin(math.radians(dest_angle)) *
                              turning_radius))
                temp_angle = -math.degrees(
                    math.atan2(pt1[0] - pt2[0], pt1[1] - pt2[1]))
                cv2.line(gray_frame, pt1, pt2, (255, 255, 255), 5)
                cv2.line(gray_frame, target_right, curr_left, (0, 0, 0), 5)
                if (abs(start_angle - temp_angle) < 10):
                    control_signal.control[0] = center
                    #pwm.set_pwm(1, 0, center)
                else:
                    control_signal.control[0] = right_max
                    #pwm.set_pwm(1, 0, right_max)
                time.sleep(0.01)
                #print(start_angle,temp_angle)
            elif index == 2:
                dest_angle = math.degrees(
                    math.atan2(target_left[0] - curr_right[0],
                               target_left[1] - curr_right[1]))
                dest_angle = (
                    180 - dest_angle) if dest_angle > 0 else -180 - dest_angle
                dest_angle = dest_angle - math.degrees(
                    math.acos(turning_radius / (dist[2] / 2))) + 90
                pt1 = (np.int(curr_right[0] -
                              math.cos(math.radians(dest_angle)) *
                              turning_radius),
                       np.int(curr_right[1] -
                              math.sin(math.radians(dest_angle)) *
                              turning_radius))
                pt2 = (np.int(target_left[0] +
                              math.cos(math.radians(dest_angle)) *
                              turning_radius),
                       np.int(target_left[1] +
                              math.sin(math.radians(dest_angle)) *
                              turning_radius))
                temp_angle = -math.degrees(
                    math.atan2(pt1[0] - pt2[0], pt1[1] - pt2[1]))
                cv2.line(gray_frame, pt1, pt2, (255, 255, 255), 5)
                cv2.line(gray_frame, target_left, curr_right, (0, 0, 0), 5)
                if (abs(start_angle - temp_angle) < 10):
                    control_signal.control[0] = center
                    #pwm.set_pwm(1, 0, center)
                else:
                    control_signal.control[0] = left_max
                    #pwm.set_pwm(1, 0, left_max)
                time.sleep(0.01)
                #print(start_angle,temp_angle)
            else:
                dest_angle = math.degrees(
                    math.atan2(target_left[0] - curr_left[0],
                               target_left[1] - curr_left[1]))
                dest_angle = (
                    180 - dest_angle) if dest_angle > 0 else -180 - dest_angle
                pt1 = (np.int(curr_left[0] +
                              math.cos(math.radians(dest_angle)) *
                              turning_radius),
                       np.int(curr_left[1] +
                              math.sin(math.radians(dest_angle)) *
                              turning_radius))
                pt2 = (np.int(target_left[0] +
                              math.cos(math.radians(dest_angle)) *
                              turning_radius),
                       np.int(target_left[1] +
                              math.sin(math.radians(dest_angle)) *
                              turning_radius))
                temp_angle = -math.degrees(
                    math.atan2(pt1[0] - pt2[0], pt1[1] - pt2[1]))
                cv2.line(gray_frame, pt1, pt2, (0, 0, 0), 5)
                cv2.line(gray_frame, target_left, curr_left, (0, 0, 0), 5)
                if (abs(start_angle - temp_angle) < 10):
                    control_signal.control[0] = center
                    #pwm.set_pwm(1, 0, center)
                else:
                    control_signal.control[0] = right_max
                    #pwm.set_pwm(1, 0, right_max)
                time.sleep(0.01)
                #print(start_angle,temp_angle)
        else:
            control_signal.control = [center, 400]
            #pwm.set_pwm(2, 0, 400)

        pub.publish(control_signal)
        cv2.imshow("image", gray_frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            break
        elif key == ord('w'):
            if turning_radius < 100:
                turning_radius += 1
        elif key == ord('s'):
            if turning_radius > 10:
                turning_radius -= 1
        elif key == ord('a'):
            target_angle -= 5
            if target_angle <= -180:
                target_angle = 180
        elif key == ord('d'):
            target_angle += 5
            if target_angle >= 180:
                target_angle = -180
    video_capture.release()
    cv2.destroyAllWindows()
    control_signal.control = [center, 400]
    pub.publish(control_signal)