def main(serial):
    # Read camera parameters
    image_width, image_height, camera_matrix, dist_coeffs = utils.get_camera_params(serial)

    # Set up webcam
    cap = utils.get_video_cap(serial, image_width, image_height)

    # Set up aruco dict
    aruco_dict = cv2.aruco.Dictionary_get(utils.get_marker_dict_id())

    while True:
        if cv2.waitKey(1) == 27:  # Esc key
            break

        image = None
        while image is None:
            _, image = cap.read()

        # Undistort image and detect markers
        image = cv2.undistort(image, camera_matrix, dist_coeffs)
        corners, ids, _ = cv2.aruco.detectMarkers(image, aruco_dict)

        # Show detections
        image_copy = image.copy()
        if ids is not None:
            cv2.aruco.drawDetectedMarkers(image_copy, corners, ids)
        cv2.imshow('out', image_copy)

    cap.release()
    cv2.destroyAllWindows()
Пример #2
0
 def __init__(self, serial):
     image_width, image_height, camera_matrix, dist_coeffs = utils.get_camera_params(serial)
     self._map_x, self._map_y = cv2.initUndistortRectifyMap(camera_matrix, dist_coeffs, None, camera_matrix, (image_width, image_height), cv2.CV_32FC1)
     self._cap = utils.get_video_cap(serial, image_width, image_height)
     self._queue = Queue(maxsize=1)
     self._thread = Thread(target=self._worker)
     self._thread.start()
Пример #3
0
def main(args):
    # Read camera parameters
    camera_params_file_path = utils.get_camera_params_file_path(
        args.camera_name)
    image_width, image_height, camera_matrix, dist_coeffs = utils.get_camera_params(
        camera_params_file_path)

    # Set up webcam
    cap = utils.get_video_cap(image_width, image_height, args.camera_id)

    # Set up aruco dict
    params = utils.get_marker_parameters()
    aruco_dict = cv2.aruco.Dictionary_get(params['dict_id'])

    # Enable corner refinement
    #detector_params = cv2.aruco.DetectorParameters_create()
    #detector_params.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX

    while True:
        if cv2.waitKey(1) == 27:  # Esc key
            break

        _, image = cap.read()
        if image is None:
            continue

        # Undistort image and detect markers
        image = cv2.undistort(image, camera_matrix, dist_coeffs)
        #corners, ids, _ = cv2.aruco.detectMarkers(image, aruco_dict, parameters=detector_params)
        corners, ids, _ = cv2.aruco.detectMarkers(image, aruco_dict)

        # Show detections
        image_copy = image.copy()
        if ids is not None:
            cv2.aruco.drawDetectedMarkers(image_copy, corners, ids)
        cv2.imshow('out', image_copy)

    cap.release()
    cv2.destroyAllWindows()
Пример #4
0
Ping pong game
"""

import sys

import cv2
import numpy as np

import obj_loader
from utils import (init_game, draw_game, update_game, load_ref_images,
                   get_camera_params, get_homographies_contour)

if __name__ == "__main__":
    REF_IMAGES, REF_DSC = load_ref_images()
    VID_FEED = cv2.VideoCapture(-1)
    CAM_MAT = get_camera_params()
    GAME_STATE = None

    flag_g = True
    flag_b = True
    MATCH_DATA = [None, None]
    CORNERS = [None, None]
    while True:
        RET, FRAME = VID_FEED.read()
        if not RET:
            print("Unable to capture video")
            sys.exit()
        elif GAME_STATE is None:
            SIZE = FRAME.shape
            GAME_STATE = init_game(SIZE)
def extract_features_labelled(drc, config_file, dir_labels, mhad_color_map,
                              subjects, actions, recordings):

    # Fetch Camera parameters
    fx1, fy1, cx1, cy1, fx2, fy2, cx2, cy2, h1, h2 = get_camera_params(
        config_file)

    for sub in range(1, subjects + 1):
        for act in range(1, actions + 1):
            for rec in range(1, recordings + 1):
                try:
                    skeleton_file = drc + 'Skeletons/skl_s%02d_a%02d_r%02d.mat' % (
                        sub, act, rec)
                    im_mc, im_mc2, skel_jnt, center, stand = get_skeleton_info(
                        skeleton_file)

                    print '\nExtraction of PCL in subject ' + str(
                        sub) + ' action ' + str(act) + ' recording ' + str(rec)

                    for i in tqdm(range(min(im_mc.shape[1], im_mc2.shape[1]))):
                        try:

                            # if not os.path.exists(out_pc_name):
                            d1 = misc.imread(
                                '%s/Kinect/Kin01/S%02d/A%02d/R%02d/kin_k01_s%02d_a%02d_r%02d_depth_%05d.pgm'
                                % (drc, sub, act, rec, sub, act, rec, i))
                            d2 = misc.imread(
                                '%s/Kinect/Kin02/S%02d/A%02d/R%02d/kin_k02_s%02d_a%02d_r%02d_depth_%05d.pgm'
                                % (drc, sub, act, rec, sub, act, rec, i))

                            # Apply inverse projection to fetch point clouds in mm and corresponding indices
                            pts1, indices1, mask1 = map_range_2_pc(d1,
                                                                   fx1,
                                                                   fy1,
                                                                   cx1,
                                                                   cy1,
                                                                   h1,
                                                                   center,
                                                                   stand,
                                                                   thresh=5)

                            pts2, indices2, mask2 = map_range_2_pc(d2,
                                                                   fx2,
                                                                   fy2,
                                                                   cx2,
                                                                   cy2,
                                                                   h2,
                                                                   center,
                                                                   stand,
                                                                   thresh=5)

                            skel = skel_jnt.T
                            jnt = np.reshape(skel[int(im_mc[2, i]) + 1, :],
                                             (35, 3)).T

                            # For combined PC
                            # out_pc_name = os.path.join(OUT_PC_DIR,
                            #                            'sub_%02d_act_%02d_rec_%02d_cap_%05d.pkl' % (
                            #                            sub, act, rec, i))

                            # For single view
                            out_pc1_name = os.path.join(
                                OUT_PC1_DIR,
                                'sub_%02d_act_%02d_rec_%02d_cap_cam01_%05d.pkl'
                                % (sub, act, rec, i))

                            out_pc2_name = os.path.join(
                                OUT_PC2_DIR,
                                'sub_%02d_act_%02d_rec_%02d_cap_cam02_%05d.pkl'
                                % (sub, act, rec, i))

                            # To store the GT Joint locations
                            # out_gt_name = os.path.join(OUT_GT_DIR,
                            #                            'sub_%02d_act_%02d_rec_%02d_cap_%05d.pkl' % (
                            #                            sub, act, rec, i))

                            # pickle.dump(jnt, open(out_gt_name, 'wb'))
                            # plot_basic_object(pts1[[1, 2, 0], :], jnt)
                            # plot_basic_object(pts2[[1, 2, 0], :], jnt)
                            #
                            # pickle.dump(pts1[[1, 2, 0], :], open(out_pc1_name, 'wb'))
                            # pickle.dump(pts2[[1, 2, 0], :], open(out_pc2_name, 'wb'))

                            # full_pc = np.hstack([pts1, pts2])
                            # full_pc = full_pc[[1, 2, 0], :]
                            #
                            # pickle.dump(full_pc, open(out_pc_name, 'wb'))

                        except (IndexError, IOError) as ee:
                            print '\nNumber ' + str(i) + ' image is empty!'

                except IOError:
                    print '\nFile does not exist!'

    return 0
def extract_features_raw(drc, config_file, subjects, actions, recordings):
    feature_dictionary = dict(
        (str(k).decode(), []) for k in range(1, subjects + 1))
    gt_dictionary = dict((str(k).decode(), []) for k in range(1, subjects + 1))

    # Fetch Camera parameters
    fx1, fy1, cx1, cy1, fx2, fy2, cx2, cy2, h1, h2 = utils.get_camera_params(
        config_file)

    for sub in range(1, subjects + 1):

        # Declare Feature and ground truth lists
        feature, gt = np.array([]), np.array([])

        for act in range(1, actions + 1):
            for rec in range(1, recordings + 1):
                try:
                    skeleton_file = drc + 'Skeletons/skl_s%02d_a%02d_r%02d.mat' % (
                        sub, act, rec)
                    im_mc, im_mc2, skel_jnt, center, stand = utils.get_skeleton_info(
                        skeleton_file)

                    print '\nExtraction of features in subject ' + str(
                        sub) + ' action ' + str(act) + ' recording ' + str(rec)

                    for i in tqdm(range(min(im_mc.shape[1], im_mc2.shape[1]))):
                        try:
                            # Read Depth data from two kinects
                            d1 = misc.imread(
                                '%s/Kinect/Kin01/S%02d/A%02d/R%02d/kin_k01_s%02d_a%02d_r%02d_depth_%05d.pgm'
                                % (drc, sub, act, rec, sub, act, rec, i))
                            d2 = misc.imread(
                                '%s/Kinect/Kin02/S%02d/A%02d/R%02d/kin_k02_s%02d_a%02d_r%02d_depth_%05d.pgm'
                                % (drc, sub, act, rec, sub, act, rec, i))

                            # Apply inverse projection to fetch point clouds in cm
                            pts1, __, __ = utils.map_range_2_pc(d1,
                                                                fx1,
                                                                fy1,
                                                                cx1,
                                                                cy1,
                                                                h1,
                                                                center,
                                                                stand,
                                                                thresh=5)
                            pts2, __, __ = utils.map_range_2_pc(d2,
                                                                fx2,
                                                                fy2,
                                                                cx2,
                                                                cy2,
                                                                h2,
                                                                center,
                                                                stand,
                                                                thresh=5)

                            full_pc = np.hstack([pts1, pts2])
                            full_pc = full_pc[[1, 2, 0], :]
                            plotxyz(full_pc.T, color='b', hold=False)

                            skel = skel_jnt.T
                            jnt = np.reshape(skel[int(im_mc[2, i]) + 1, :],
                                             (35, 3)).T

                            plotxyz(jnt.T, color='r', hold=False)

                            # nearest neighbour
                            diff = full_pc.T - np.reshape(jnt, (3, 1, 35)).T
                            dfsq = np.sqrt(np.sum((diff**2), axis=2))
                            label_idx = np.argmin(dfsq, axis=0)
                            full_pcl = np.vstack([
                                full_pc,
                                np.reshape(label_idx, (1, label_idx.shape[0]))
                            ])

                            # store joint-wise features
                            feature_image = np.array([])

                            for joint in range(jnt.shape[1]):
                                ind_seg = full_pcl[3, :] == joint
                                pcl_joint = full_pcl[0:3, ind_seg]
                                pcl_joint[np.isnan(pcl_joint)] = 0

                                # extract moments
                                med_joint = np.median(
                                    pcl_joint,
                                    1) if pcl_joint.size else np.zeros(3)
                                med_joint[np.isnan(med_joint)] = 0

                                std_joint = np.std(
                                    pcl_joint,
                                    1) if pcl_joint.size else np.zeros(3)
                                std_joint[np.isnan(std_joint)] = 0

                                min_joint = np.min(
                                    pcl_joint,
                                    1) if pcl_joint.size else np.zeros(3)
                                min_joint[np.isnan(min_joint)] = 0

                                max_joint = np.max(
                                    pcl_joint,
                                    1) if pcl_joint.size else np.zeros(3)
                                max_joint[np.isnan(max_joint)] = 0

                                cov_joint = np.cov(
                                    pcl_joint) if pcl_joint.size else np.zeros(
                                        (3, 3))
                                cov_joint[np.isnan(cov_joint)] = 0

                                eig_joint = np.linalg.eigvals(cov_joint)
                                eig_joint[np.isnan(eig_joint)] = 0

                                feature_joint = np.concatenate([
                                    np.ravel(med_joint),
                                    np.ravel(std_joint),
                                    np.ravel(min_joint),
                                    np.ravel(max_joint),
                                    np.ravel(eig_joint),
                                    np.ravel(cov_joint),
                                ],
                                                               axis=0)

                                feature_image = np.concatenate([np.ravel(feature_image), np.ravel(feature_joint)]) \
                                    if feature_image.size else feature_joint

                            feature = np.vstack([
                                feature, feature_image
                            ]) if feature.size else feature_image
                            # print feature.shape
                            gt = np.vstack([gt, np.ravel(jnt)
                                            ]) if gt.size else np.ravel(jnt)

                        except IndexError:
                            print '\nNumber ' + str(i) + ' image is empty!'

                except IOError:
                    print '\nFile does not exist!'

        feature_dictionary[str(sub)].append(feature)
        gt_dictionary[str(sub)].append(gt)

    return feature_dictionary, gt_dictionary
def extract_features_labelled(drc, config_file, dir_labels, mhad_color_map,
                              subjects, actions, recordings):

    # Fetch Camera parameters
    fx1, fy1, cx1, cy1, fx2, fy2, cx2, cy2, h1, h2 = utils.get_camera_params(
        config_file)

    for sub in range(1, subjects + 1):

        feature_dictionary = {str(sub).decode(): []}
        gt_dictionary = {str(sub).decode(): []}

        # Declare Feature and ground truth lists
        feature, gt = np.array([]), np.array([])

        for act in range(1, actions + 1):
            for rec in range(1, recordings + 1):
                try:
                    skeleton_file = drc + 'Skeletons/skl_s%02d_a%02d_r%02d.mat' % (
                        sub, act, rec)
                    im_mc, im_mc2, skel_jnt, center, stand = utils.get_skeleton_info(
                        skeleton_file)

                    print '\nExtraction of features in subject ' + str(
                        sub) + ' action ' + str(act) + ' recording ' + str(rec)

                    for i in tqdm(range(min(im_mc.shape[1], im_mc2.shape[1]))):
                        try:
                            # Read Depth data from two kinects
                            d1 = misc.imread(
                                '%s/Kinect/Kin01/S%02d/A%02d/R%02d/kin_k01_s%02d_a%02d_r%02d_depth_%05d.pgm'
                                % (drc, sub, act, rec, sub, act, rec, i))
                            d2 = misc.imread(
                                '%s/Kinect/Kin02/S%02d/A%02d/R%02d/kin_k02_s%02d_a%02d_r%02d_depth_%05d.pgm'
                                % (drc, sub, act, rec, sub, act, rec, i))

                            # read in the color data
                            il1 = misc.imread(
                                '%s/Kin01/S%02d/A%02d/R%02d/kin_k01_s%02d_a%02d_r%02d_depth_%05d.png'
                                %
                                (dir_labels, sub, act, rec, sub, act, rec, i))
                            il2 = misc.imread(
                                '%s/Kin02/S%02d/A%02d/R%02d/kin_k02_s%02d_a%02d_r%02d_depth_%05d.png'
                                %
                                (dir_labels, sub, act, rec, sub, act, rec, i))

                            # Apply inverse projection to fetch point clouds in mm and corresponding indices
                            pts1, indices1, mask1 = utils.map_range_2_pc(
                                d1,
                                fx1,
                                fy1,
                                cx1,
                                cy1,
                                h1,
                                center,
                                stand,
                                thresh=5)
                            pts2, indices2, mask2 = utils.map_range_2_pc(
                                d2,
                                fx2,
                                fy2,
                                cx2,
                                cy2,
                                h2,
                                center,
                                stand,
                                thresh=5)

                            idx_mask1, idx_mask2 = (mask1 == 0), (mask2 == 0)

                            il1[idx_mask1], il2[idx_mask2] = 0, 0

                            # get pixel level classes for all the body parts (+ Background)
                            lbl1 = utils.get_pixel_level_classes(
                                il1, mhad_color_map)
                            lbl2 = utils.get_pixel_level_classes(
                                il2, mhad_color_map)

                            label_idx1 = np.zeros(indices1.shape[0])
                            label_idx2 = np.zeros(indices2.shape[0])

                            for index in range(indices1.shape[0]):
                                label_idx1[index] = lbl1[indices1[index, 1],
                                                         indices1[index, 0]]

                            for index in range(indices2.shape[0]):
                                label_idx2[index] = lbl2[indices2[index, 1],
                                                         indices2[index, 0]]

                            full_pc = np.hstack([pts1, pts2])
                            full_pc = full_pc[[1, 2, 0], :]

                            # plotxyz(full_pc.T, color='b', hold=False)

                            skel = skel_jnt.T
                            jnt = np.reshape(skel[int(im_mc[2, i]) + 1, :],
                                             (35, 3)).T

                            # plotxyz(jnt.T, color='r', hold=False)

                            label_idx = np.hstack([label_idx1, label_idx2])

                            full_pcl = np.vstack([
                                full_pc,
                                np.reshape(label_idx, (1, label_idx.shape[0]))
                            ])

                            # store joint-wise features
                            feature_image = np.array([])

                            for joint in range(jnt.shape[1]):
                                ind_seg = full_pcl[3, :] == joint
                                pcl_joint = full_pcl[0:3, ind_seg]
                                pcl_joint[np.isnan(pcl_joint)] = 0

                                # extract moments
                                med_joint = np.median(
                                    pcl_joint,
                                    1) if pcl_joint.size else np.zeros(3)
                                med_joint[np.isnan(med_joint)] = 0

                                std_joint = np.std(
                                    pcl_joint,
                                    1) if pcl_joint.size else np.zeros(3)
                                std_joint[np.isnan(std_joint)] = 0

                                min_joint = np.min(
                                    pcl_joint,
                                    1) if pcl_joint.size else np.zeros(3)
                                min_joint[np.isnan(min_joint)] = 0

                                max_joint = np.max(
                                    pcl_joint,
                                    1) if pcl_joint.size else np.zeros(3)
                                max_joint[np.isnan(max_joint)] = 0

                                cov_joint = np.cov(
                                    pcl_joint) if pcl_joint.size else np.zeros(
                                        (3, 3))
                                cov_joint[np.isnan(cov_joint)] = 0

                                eig_joint = np.linalg.eigvals(cov_joint)
                                eig_joint[np.isnan(eig_joint)] = 0

                                feature_joint = np.concatenate([
                                    np.ravel(med_joint),
                                    np.ravel(std_joint),
                                    np.ravel(min_joint),
                                    np.ravel(max_joint),
                                    np.ravel(eig_joint),
                                    np.ravel(cov_joint),
                                ],
                                                               axis=0)

                                feature_image = np.concatenate([np.ravel(feature_image), np.ravel(feature_joint)]) \
                                    if feature_image.size else feature_joint

                            feature = np.vstack([
                                feature, feature_image
                            ]) if feature.size else feature_image
                            # print feature.shape
                            gt = np.vstack([gt, np.ravel(jnt)
                                            ]) if gt.size else np.ravel(jnt)

                        except (IndexError, IOError) as ee:
                            print '\nNumber ' + str(i) + ' image is empty!'

                except IOError:
                    print '\nFile does not exist!'

        feature_dictionary[str(sub)].append(feature)
        pickle.dump(feature_dictionary, pickle_fv_inferred)

        gt_dictionary[str(sub)].append(gt)
        pickle.dump(gt_dictionary, pickle_gt_inferred)

    return feature_dictionary, gt_dictionary
Пример #8
0
import obj_loader
from utils import (draw_harris_kps, draw_rectangle, get_camera_params,
                   get_homographies_contour, get_matrix, load_ref_images,
                   render, draw_corners)

RECTANGLE = False  # Display bounding rectangle or not
DRAW_MATCHES = False  # Draw matches
DRAW_HARRIS = False

if __name__ == "__main__":

    OBJ_PATH = sys.argv[1]

    OBJ = obj_loader.OBJ(OBJ_PATH, swapyz=True)
    REF_IMAGES, REF_DSC = load_ref_images(1)
    CAM_PARAMS = get_camera_params()
    VID_FEED = cv2.VideoCapture(-1)
    MATCH_DATA = [None, None]

    while True:
        RET, FRAME = VID_FEED.read()
        if not RET:
            print("Unable to capture video")
            sys.exit()

        if DRAW_HARRIS:
            FRAME = draw_harris_kps(FRAME)

        # MATCH_DATA = find_homographies(REF_DSC, FRAME)
        MATCH_DATA, _ = get_homographies_contour(FRAME, REF_IMAGES, MATCH_DATA,
                                                 None)
Пример #9
0
def main(args):
    # Read camera parameters
    camera_params_file_path = utils.get_camera_params_file_path(
        args.camera_name)
    image_width, image_height, camera_matrix, dist_coeffs = utils.get_camera_params(
        camera_params_file_path)

    # Set up webcam
    cap = utils.get_video_cap(image_width, image_height, args.camera_id)

    # Board and marker params
    boards = [{
        'name': 'robots',
        'corner_offset_mm': 36
    }, {
        'name': 'cubes',
        'corner_offset_mm': 12
    }]
    marker_params = utils.get_marker_parameters()
    room_length_mm = 1000 * args.room_length
    room_width_mm = 1000 * args.room_width
    room_length_pixels = int(room_length_mm * marker_params['pixels_per_mm'])
    room_width_pixels = int(room_width_mm * marker_params['pixels_per_mm'])

    # Set up aruco dicts
    for board in boards:
        aruco_dict = cv2.aruco.Dictionary_get(marker_params['dict_id'])
        aruco_dict.bytesList = aruco_dict.bytesList[utils.get_marker_ids(
            'corners_{}'.format(board['name']))]
        board['board_dict'] = aruco_dict
        aruco_dict = cv2.aruco.Dictionary_get(marker_params['dict_id'])
        aruco_dict.bytesList = aruco_dict.bytesList[utils.get_marker_ids(
            board['name'])]
        board['marker_dict'] = aruco_dict

    # Board warping
    for board in boards:
        corner_offset_pixels = board['corner_offset_mm'] * marker_params[
            'pixels_per_mm']
        board['src_points'] = None
        board['dst_points'] = [
            [-corner_offset_pixels, -corner_offset_pixels],
            [room_length_pixels + corner_offset_pixels, -corner_offset_pixels],
            [
                room_length_pixels + corner_offset_pixels,
                room_width_pixels + corner_offset_pixels
            ],
            [-corner_offset_pixels, room_width_pixels + corner_offset_pixels]
        ]

    # Enable corner refinement
    detector_params = cv2.aruco.DetectorParameters_create()
    detector_params.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX

    # Set up server
    address = ('localhost', 6000)
    listener = Listener(address, authkey=b'secret password')
    conn = None

    def process_image(image):
        # Undistort image
        image = cv2.undistort(image, camera_matrix, dist_coeffs)

        data = {}
        for board in boards:
            board_name = board['name']
            data[board_name] = {}

            if board['src_points'] is None:
                # Detect board markers (assumes board won't move since this is only done once)
                #board_corners, board_indices, _ = cv2.aruco.detectMarkers(image, board['board_dict'], parameters=detector_params)
                board_corners, board_indices, _ = cv2.aruco.detectMarkers(
                    image, board['board_dict'])

                # Show detections
                if args.debug:
                    image_copy = image.copy()
                    if board_indices is not None:
                        cv2.aruco.drawDetectedMarkers(image_copy,
                                                      board_corners,
                                                      board_indices)
                    cv2.imshow('{}_board_corners'.format(board_name),
                               image_copy)

                # Ensure board was found
                if board_indices is None or len(board_indices) < 4:
                    data[
                        board_name] = None  # None rather than {} to signify board was not detected
                    continue

                board['src_points'] = []
                for marker_index, corner in sorted(
                        zip(board_indices, board_corners)):
                    board['src_points'].append(
                        corner.squeeze(0).mean(axis=0).tolist())
            else:
                # Warp the board
                M = cv2.getPerspectiveTransform(
                    np.asarray(board['src_points'], dtype=np.float32),
                    np.asarray(board['dst_points'], dtype=np.float32))
                warped_image = cv2.warpPerspective(
                    image, M, (room_length_pixels, room_width_pixels))

                # Detect markers in warped image
                corners, indices, _ = cv2.aruco.detectMarkers(
                    warped_image,
                    board['marker_dict'],
                    parameters=detector_params)

                # Show detections
                if args.debug:
                    image_copy = warped_image.copy()
                    if indices is not None:
                        cv2.aruco.drawDetectedMarkers(image_copy, corners,
                                                      indices)
                    image_copy = cv2.resize(image_copy,
                                            (int(image_copy.shape[1] / 2),
                                             int(image_copy.shape[0] / 2)))
                    cv2.imshow(board_name, image_copy)

                if indices is None:
                    continue

                # Compute poses
                board_data = {}
                for marker_index, corner in zip(indices, corners):
                    marker_index = marker_index.item()
                    marker_corners = corner.squeeze(0)
                    marker_center = marker_corners.mean(axis=0)

                    # Compute heading estimates for each of the four marker corners
                    diffs = [c - marker_center for c in marker_corners]
                    angles = np.array(
                        [np.arctan2(-diff[1], diff[0]) for diff in diffs])
                    angles = angles + np.radians([-135, -45, 45, 135])
                    angles = np.mod(angles + np.pi, 2 * np.pi) - np.pi

                    # If multiple markers are detected on same cube, use the marker on top (which should have the lowest angle_std)
                    angle_std = angles.std()
                    if board_name == 'cubes' and marker_index in board_data and angle_std > board_data[
                            marker_index]['angle_std']:
                        continue

                    # Compute position and heading
                    position = [
                        (marker_center[0] / marker_params['pixels_per_mm'] -
                         room_length_mm / 2) / 1000,
                        (room_width_mm / 2 -
                         (marker_center[1] / marker_params['pixels_per_mm'])) /
                        1000
                    ]
                    heading = angles.mean()
                    marker_data = {'position': position, 'heading': heading}
                    if board_name == 'cubes':
                        marker_data['angle_std'] = angle_std
                    board_data[marker_index] = marker_data
                data[board_name] = board_data

        return data

    while True:
        if cv2.waitKey(1) == 27:  # Esc key
            break

        if conn is None:
            print('Waiting for connection...')
            conn = listener.accept()
            print('Connected!')

        _, image = cap.read()
        if image is None:
            continue

        data = process_image(image)
        try:
            conn.send(data)
        except:
            conn = None

    cap.release()
    cv2.destroyAllWindows()