예제 #1
0
def setobj(flag):
    cap = cv2.VideoCapture(1)
    w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    #	print w
    #	print h
    time.sleep(0)
    while True:
        ret, frame = cap.read()

        framebw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)
        parameters = aruco.DetectorParameters_create()
        corners, ids, _ = aruco.detectMarkers(framebw,
                                              aruco_dict,
                                              parameters=parameters)
        #		print ids
        t = len(ids)  #gives total aruco markers
        #		print t
        l = 0
        l1 = 0
        for i in range(0, t):
            z = int(ids[i][0])
            #			print'>>>>>>>'
            cv2.circle(frame, (corners[i][0][0][0], corners[i][0][0][1]), 5,
                       (125, 125, 125), -1)
            cv2.circle(frame, (corners[i][0][1][0], corners[i][0][1][1]), 5,
                       (0, 255, 0), -1)
            cv2.circle(frame, (corners[i][0][2][0], corners[i][0][2][1]), 5,
                       (180, 105, 255), -1)
            cv2.circle(frame, (corners[i][0][3][0], corners[i][0][3][1]), 5,
                       (255, 255, 255), -1)
            x = ((corners[i][0][0][0] + corners[i][0][2][0]) / 2)
            y = ((corners[i][0][0][1] + corners[i][0][2][1]) / 2)
            cv2.circle(frame, (int(x), int(y)), 5, (255, 0, 0), -1)  #center
            x1 = (corners[i][0][0][0] + corners[i][0][1][0]) / 2
            y1 = (corners[i][0][0][1] + corners[i][0][1][1]) / 2
            cv2.circle(frame, (int(x1), int(y1)), 5, (0, 0, 255),
                       -1)  #midpoint of starting edge
            cv2.line(frame, (int(x), int(y)), (int(x1), int(y1)), (255, 0, 0),
                     3)
            a = y1 - y
            b = x1 - x
            theta = 3.14 - math.atan2(
                a, b
            )  #taking negative to the resultant angle .Angle lies between [-pi,pi]
            #			print theta
            #			print '........'
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(frame, str(theta), (int(x + 100), int(y - 20)), font,
                        0.8, (0, 255, 0), 2, cv2.LINE_AA)
            q = (
                x - 320
            ) * 0.003351064  #0.003351064 #(x-320)*0.00303077335822 #0.00353077335822ipx along x axis=0.003515625 m
            r = (
                y - 240
            ) * 0.003351064  #0.003389831 #(y-240)*0.00302814921311 #0.00352814921311ipx along y axis=0.0034375 m
            emptyBuff = bytearray()
            orientation = [0, 0, theta]
            if z == 0:
                position = [-q, r, +4.1000e-02]
                returnCode1, outInts1, path_pos, outStrings1, outBuffer1 = vrep.simxCallScriptFunction(
                    clientID, 'script', vrep.sim_scripttype_childscript,
                    'shift', [], position, [], emptyBuff,
                    vrep.simx_opmode_blocking)
                returnCode1, outInts1, path_pos, outStrings1, outBuffer1 = vrep.simxCallScriptFunction(
                    clientID, 'script', vrep.sim_scripttype_childscript,
                    'rotate', [], orientation, [], emptyBuff,
                    vrep.simx_opmode_blocking)
#				print '0303030303'
            else:
                if flag == 0:
                    if z in freshfruits_id:
                        position = [-q, r, +4.1000e-02]
                        if position[0] > 0:
                            if position[1] > 0:
                                returnCode = vrep.simxSetObjectPosition(
                                    clientID, freshfruits[0], -1, position,
                                    vrep.simx_opmode_oneshot_wait)
                            if position[1] < 0:
                                returnCode = vrep.simxSetObjectPosition(
                                    clientID, freshfruits[1], -1, position,
                                    vrep.simx_opmode_oneshot_wait)
                        if position[0] < 0:
                            returnCode = vrep.simxSetObjectPosition(
                                clientID, freshfruits[2], -1, position,
                                vrep.simx_opmode_oneshot_wait)
#						print returnCode
#						print r
#						print -q
#						print '03030330-------------'
                    if z in damagedfruits_id:
                        position = [-q, r, +4.1000e-02]
                        returnCode = vrep.simxSetObjectPosition(
                            clientID, damagedfruits[l1], -1, position,
                            vrep.simx_opmode_oneshot_wait)
                        #						print returnCode
                        #						print r
                        #						print -q
                        #						print '03030330-------------'
                        l1 = l1 + 1

        flag = flag + 1
        cv2.imshow('frame', frame)
        if i == t - 1:
            cap.release()
            cv2.destroyAllWindows()
            return
예제 #2
0
def custom_detect_Aruco(
        img, n,
        C):  # returns the detected aruco list dictionary with id: corners
    aruco_list = {}
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    if n == 'ARUCO' and C == 'ORIGINAL':
        aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)

    elif n == '4':
        if C == '50':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)

        elif C == '100':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_100)

        elif C == '250':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)

        elif C == '1000':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_1000)

    elif n == '5':
        if C == '50':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_50)

        elif C == '100':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_100)

        elif C == '250':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)

        elif C == '1000':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_1000)

    elif n == '6':
        if C == '50':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_50)

        elif C == '100':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_100)

        elif C == '250':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)

        elif C == '1000':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_1000)

    elif n == '7':
        if C == '50':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_7X7_50)

        elif C == '100':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_7X7_100)

        elif C == '250':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_7X7_250)

        elif C == '1000':
            aruco_dict = aruco.Dictionary_get(aruco.DICT_7X7_1000)

    print(aruco_dict)
    parameters = aruco.DetectorParameters_create(
    )  # refer opencv page for clarification
    # lists of ids and the corners beloning to each id
    print(parameters)
    corners, ids, _ = aruco.detectMarkers(gray,
                                          aruco_dict,
                                          parameters=parameters)
    # corners is the list of corners(numpy array) of the detected markers. For each marker, its four corners are returned in their original order (which is clockwise starting with top left). So, the first corner is the top left corner, followed by the top right, bottom right and bottom left.
    # print len(corners), corners, ids
    print(corners)
    print(len(corners))
    gray = aruco.drawDetectedMarkers(gray, corners, ids)
    # cv2.imshow('frame',gray)
    # print (type(corners[0]))
    if len(corners):  # returns no of arucos
        # print (len(corners))
        # print (len(ids))
        for k in range(len(corners)):
            temp_1 = corners[k]
            temp_1 = temp_1[0]
            temp_2 = ids[k]
            temp_2 = temp_2[0]
            aruco_list[temp_2] = temp_1
        return aruco_list
def main():

    # First we set up the cameras to start streaming
    # Define some constants
    resolution_width = 1280  # pixels
    resolution_height = 720  # pixels
    frame_rate = 30  # fps
    dispose_frames_for_stablisation = 30  # frames

    # Enable the streams from all the intel realsense devices
    rs_config = rs.config()
    rs_config.enable_stream(rs.stream.depth, resolution_width,
                            resolution_height, rs.format.z16, frame_rate)
    rs_config.enable_stream(rs.stream.infrared, 1, resolution_width,
                            resolution_height, rs.format.y8, frame_rate)
    rs_config.enable_stream(rs.stream.color, resolution_width,
                            resolution_height, rs.format.bgr8, frame_rate)

    # Use the device manager class to enable the devices and get the frames
    device_manager = DeviceManager(rs.context(), rs_config)
    device_manager.enable_all_devices()
    device_manager._available_devices.sort()
    device_list = device_manager._available_devices

    # Allow some frames for the auto-exposure controller to stablise
    for frame in range(dispose_frames_for_stablisation):
        frames = device_manager.poll_frames_keep()

    assert (len(device_manager._available_devices) > 0)

    #Then we calibrate the images

    # Get the intrinsics of the realsense device
    intrinsics_devices = device_manager.get_device_intrinsics(frames)

    # Set the charuco board parameters for calibration
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    charuco_width = 8
    charuco_height = 5
    square_length = 0.03425
    marker_length = .026

    coordinate_dimentions = 3
    charuco_board = aruco.CharucoBoard_create(charuco_width, charuco_height,
                                              square_length, marker_length,
                                              aruco_dict)
    # Set the charuco board parameters for robot
    aruco_dict_r = aruco.Dictionary_get(aruco.DICT_4X4_250)
    charuco_width_r = 3
    charuco_height_r = 3
    square_length_r = 0.0311
    marker_length_r = .0247

    charuco_board_robot = aruco.CharucoBoard_create(charuco_width_r,
                                                    charuco_height_r,
                                                    square_length_r,
                                                    marker_length_r,
                                                    aruco_dict_r)

    # Set the charuco board parameters for robot
    aruco_dict_ro = aruco.Dictionary_get(aruco.DICT_5X5_100)
    charuco_width_ro = 3
    charuco_height_ro = 3
    square_length_ro = 0.0311
    marker_length_ro = .0247

    charuco_board_robot_2 = aruco.CharucoBoard_create(charuco_width_ro,
                                                      charuco_height_ro,
                                                      square_length_ro,
                                                      marker_length_ro,
                                                      aruco_dict_ro)

    # Decide if you want to use previous calibration or make a new one
    calibration_decision = input(
        'To use previous calibration, press "y". For new calibration press any key \n'
    )
    if calibration_decision != 'y':
        # Choose amount of frames to average
        correct_input = False
        while not correct_input:
            the_input = input(
                "Write amount of frames you want to use to calibrate \n")
            try:
                amount_frames = int(the_input)
                if amount_frames > 0:
                    correct_input = True
                else:
                    print("Frame count has to be positive")
            except ValueError:
                print('Input has to be int')

        # Make a dict to store all images for calibration
        frame_dict = {}
        transform_dict = {}
        rms_dict = {}
        for from_device in device_list:
            transform_dict[from_device] = {}
            rms_dict[from_device] = {}
            for to_device in device_list:
                transform_dict[from_device][to_device] = {}
                rms_dict[from_device][to_device] = np.inf
        print("Starting to take images in 5 seconds")
        time.sleep(5)
        devices_stitched = False

        while not devices_stitched:
            print("taking new set of  images")
            for frame_count in range(amount_frames):
                print("taking image")
                print(amount_frames - frame_count, "images left")
                frames = device_manager.poll_frames_keep()
                time.sleep(0.5)
                frame_dict[frame_count] = {}
                for device in device_list:
                    ir_frame = np.asanyarray(
                        frames[device][(rs.stream.infrared, 1)].get_data())
                    depth_frame = np.asanyarray(
                        post_process_depth_frame(
                            frames[device][rs.stream.depth]).get_data())
                    frame_dict[frame_count][device] = {
                        'ir_frame': ir_frame,
                        'depth_frame': depth_frame
                    }
                del frames

            # Make transfer matrices between all possible cameras
            for idx, from_device in enumerate(device_list[:-1]):
                for to_device in device_list[idx + 1:]:
                    if to_device != from_device:
                        temp_transform, temp_rms = get_transformation_matrix_wout_rsobject(
                            frame_dict, [from_device, to_device],
                            intrinsics_devices, charuco_board)
                        if temp_rms < rms_dict[from_device][to_device]:
                            rms_dict[from_device][to_device] = temp_rms
                            rms_dict[to_device][from_device] = temp_rms
                            transform_dict[from_device][
                                to_device] = temp_transform
                            transform_dict[to_device][
                                from_device] = temp_transform.inverse()

            #Use Djikstra's to fin shortest path and check if all cameras are connected
            transformation_matrices = least_error_transfroms(
                transform_dict, rms_dict)
            if transformation_matrices != 0:
                devices_stitched = True
            # Prints rms error between camera transforms
            test = matrix_viewer(rms_dict)
            print(test)

        # Turns transformation matrices into Transfomation objects
        transformation_devices = {}
        for matrix in transformation_matrices:
            the_matirx = transformation_matrices[matrix]
            transformation_devices[matrix] = Transformation(
                the_matirx[:3, :3], the_matirx[:3, 3])

        # Saves calibration transforms if the user wants to
        save_calibration = input(
            'Press "y" if you want to save calibration \n')
        if save_calibration == "y":
            calibration_name = input
            saved = False
            while not saved:
                name = input(
                    "Type in name of file to save. remember to end name with '.npy' \n"
                )
                try:
                    np.save(name, transformation_matrices)
                    saved = True
                except:
                    print(
                        "could not save, try another name and remember '.npy' in the end"
                    )

        frame_dict.clear()
    else:
        correct_filename = False
        while not correct_filename:
            file_name = input('Type in calibration file name \n')
            try:
                transfer_matirices_save = np.load(file_name, allow_pickle=True)
                transfer_matrices = transfer_matirices_save[()]
                correct_filename = True
            except:
                print('No such file in directory: "', file_name, '"')
        transformation_devices = {}
        for matrix in transfer_matrices:
            the_matrix = transfer_matrices[matrix]
            transformation_devices[matrix] = Transformation(
                the_matrix[:3, :3], the_matrix[:3, 3])

    print("Calibration completed...")

    # Enable the emitter of the devices and extract serial numbers to identify cameras
    device_manager.enable_emitter(True)
    key_list = device_manager.poll_frames().keys()

    while True:
        visualisation = input(
            'Presss "1" for RMS error, "2" for chessboard visualisation and "3" for 3d pointcloud and "4" for robot to camea calibration\n'
        )

        if visualisation == '1':

            calculate_avg_rms_error(device_list, device_manager,
                                    transformation_devices, charuco_board,
                                    intrinsics_devices)

        elif visualisation == '2':

            visualise_chessboard(device_manager, device_list,
                                 intrinsics_devices, charuco_board,
                                 transformation_devices)

        elif visualisation == '3':

            visualise_point_cloud(key_list, resolution_height,
                                  resolution_width, device_manager,
                                  coordinate_dimentions,
                                  transformation_devices)
        elif visualisation == '4':
            robot_calibration = CameraRobotCalibration(transformation_devices,
                                                       device_manager,
                                                       charuco_board_robot_2,
                                                       charuco_board_robot)
            input("Set target in position for calibration\n")
            test = robot_calibration.test_functions()
            print(test)

        else:
            print("Input not recognised")
예제 #4
0
import copy
import tf
import csv

cwd = os.getcwd()
fk_ur5 = UR5Kin()

################### Initialisation ################################
camera = 2  # number of cameras
rootDir = "/home/zheyu/ur5_calib_toolkit/ZED/"
robotYAML = "{}robotCalibration.yaml".format(rootDir)
cameraYAML = "cameraCalibration.yaml"
extrinsicDataDir = "{}calibDataset/".format(rootDir)
tableDataDir = "{}tableCalibDataset/".format(rootDir)
fileName = "jointAngles.csv"
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_1000)  # 4X4 = 4x4 bit markers
charuco_board = aruco.CharucoBoard_create(
    4, 6, 0.063, 0.048,
    aruco_dict)  # width, height (full board), square length, marker length
##################################################################


class chArucoCalib:
    def __init__(self):

        images = np.array([
            extrinsicDataDir + f for f in os.listdir(extrinsicDataDir)
            if f.endswith(".png")
        ])
        order = np.argsort(
            [int(p.split(".")[-2].split("_")[-1]) for p in images])
예제 #5
0
    def detectARUCO(self):
        i = 0
        aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
        parameters = aruco.DetectorParameters_create()
        lists = []
        idd = ""

        cv_file = cv2.FileStorage("../Calibration/calib_images/Data.yaml",
                                  cv2.FILE_STORAGE_READ)

        #note we also have to specify the type to retrieve other wise we only get a
        # FileNode object back instead of a matrix
        camera_matrix = cv_file.getNode("camera_matrix").mat()
        dist_matrix = cv_file.getNode("dist_coeff").mat()

        cap = cv2.VideoCapture(0)
        # termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30,
                    0.001)
        # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
        objp = np.zeros((6 * 7, 3), np.float32)
        objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)
        # Arrays to store object points and image points from all the images.
        objpoints = []  # 3d point in real world space
        imgpoints = []  # 2d points in image plane.
        images = glob.glob('../Calibration/calib_images/*.jpg')

        for fname in images:
            img = cv2.imread(fname)
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            # Find the chess board corners
            ret, corners = cv2.findChessboardCorners(gray, (7, 6), None)
            # If found, add object points, image points (after refining them)

            if ret == True:
                objpoints.append(objp)
                corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                            criteria)
                imgpoints.append(corners2)
                # Draw and display the corners
                img = cv2.drawChessboardCorners(img, (7, 6), corners2, ret)

        ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
            objpoints, imgpoints, gray.shape[::-1], None, None)

        while (i < 25):
            # print(i)
            ide = ""
            ret, frame = cap.read()
            # operations on the frame come here
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            #lists of ids and the corners beloning to each id
            corners, ids, rejectedImgPoints = aruco.detectMarkers(
                gray, aruco_dict, parameters=parameters)
            font = cv2.FONT_HERSHEY_SIMPLEX  #font for displaying text (below)
            if np.all(ids != None):
                rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
                    corners[0], 0.05, mtx, dist
                )  #Estimate pose of each marker and return the values rvet and tvec---different from camera coefficients
                (rvec -
                 tvec).any()  # get rid of that nasty numpy value array error
                aruco.drawAxis(frame, mtx, dist, rvec[0], tvec[0],
                               0.1)  #Draw Axis
                aruco.drawDetectedMarkers(
                    frame, corners)  #Draw A square around the markers
                ###### DRAW ID #####
                cv2.putText(frame, "Id: " + str(ids), (0, 64), font, 1,
                            (0, 255, 0), 2, cv2.LINE_AA)
                # Display the resulting frame
                idd = str(ids)
                # print ("___________> ids = ",ids)
                for i in range(len(idd)):
                    if idd[i] != "[" and idd[i] != "]" and idd[
                            i] != "\n" and idd[i] != " ":
                        if idd[i + 1] != "[" and idd[i + 1] != "]" and idd[
                                i + 1] != "\n" and idd[i + 1] != " ":
                            ide = str(idd[i]) + str(idd[i + 1])
                            if int(ide) not in lists:
                                lists.append(int(ide))
                                if int(idd[i]) not in lists and ide == "":
                                    lists.append(int(idd[i]))
                                    # print("")
            cv2.imshow('frame', frame)
            i = i + 1

        lists.sort()
        cap.release()
        cv2.destroyAllWindows()

        return lists
# System information:
# - Linux Mint 18.1 Cinnamon 64-bit
# - Python 2.7 with OpenCV 3.2.0

import numpy
import cv2
from cv2 import aruco
import pickle
import glob

# ChAruco board variables
CHARUCOBOARD_ROWCOUNT = 7
CHARUCOBOARD_COLCOUNT = 5
ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_5X5_1000)

# Create constants to be passed into OpenCV and Aruco methods
CHARUCO_BOARD = aruco.CharucoBoard_create(squaresX=CHARUCOBOARD_COLCOUNT,
                                          squaresY=CHARUCOBOARD_ROWCOUNT,
                                          squareLength=0.051,
                                          markerLength=0.0255,
                                          dictionary=ARUCO_DICT)

criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

# Create the arrays and variables we'll use to store info like corners and IDs from images processed
corners_all = []  # Corners discovered in all images processed
ids_all = []  # Aruco ids corresponding to corners discovered
image_size = None  # Determined at runtime

objectPoints = []
imagePointsLeft = []
예제 #7
0
# Initialize communication with intel realsense
'''pipeline = rs.pipeline()
realsense_cfg = rs.config()
realsense_cfg.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 0)
pipeline.start(realsense_cfg)'''

# Check communication
''''print("Test dara source...")
try:
    np.asanyarray(pipeline.wait_for_frames().get_color_frame().get_data())
except:
    raise Exception("Can't get rgb frame from data source")'''

# Define what the calibration board looks like (same as the pdf)
board = cv2.aruco.CharucoBoard_create(4, 4, .025, .0125,
                                      aruco.Dictionary_get(dictionary))
record_count = 0
# Create two arrays to store the recorded corners and ids
all_corners = []
all_ids = []

print("Start recording [1/4]")
print(
    "1. Move the grid from calibration directory a little bit in front of the camera and press [r] to make a record (if auto record is not set to True)"
)
print("2. Finish this task and start calculation press [c]")
print("3. Interrupt application [ESC]")
cap = cv2.VideoCapture(0)
scaling_factor = 0.5
while True:
    # Get frame from realsense and convert to grayscale image
예제 #8
0
def detect_markers(device_id, hold_info_count=15):
    cap = cv2.VideoCapture(device_id)

    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((6 * 7, 3), np.float32)
    objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)

    # Arrays to store object points and image points from all the images.
    objpoints = []  # 3d point in real world space
    imgpoints = []  # 2d points in image plane.

    print('1. start read calib imgs')
    images = glob.glob(os.path.join(calib_imgs, '*.png'))

    if not len(images):
        print("calib images not exist!!!!!!!!!")
        return

    for fname in images:
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, (7, 6), None)

        # If found, add object points, image points (after refining them)
        if ret:
            objpoints.append(objp)

            corners2 = cv2.cornerSubPix(
                gray, corners, (11, 11), (-1, -1), criteria)
            imgpoints.append(corners2)

            # Draw and display the corners
            if enable_gui:
                img = cv2.drawChessboardCorners(img, (7, 6), corners2, ret)

    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
        objpoints, imgpoints, gray.shape[::-1], None, None)

    print('2. start detect markers')
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters = aruco.DetectorParameters_create()

    update_counter = 0

    while True:
        ret, frame = cap.read()
        # operations on the frame come here
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # lists of ids and the corners beloning to each id
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)

        font = cv2.FONT_HERSHEY_SIMPLEX  # font for displaying text (below)

        if np.all(ids != None):
            # print(type(ids), ids)
            result = []

            for id_, corner in zip(ids, corners):
                rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
                    corner, 0.05, mtx, dist)
                result.append((id_[0], rvec, tvec))

            update_detected(result)
            update_counter = hold_info_count

            if enable_gui:
                # Draw Axis
                aruco.drawAxis(frame, mtx, dist, rvec[0], tvec[0], 0.1)
                # Draw A square around the markers
                aruco.drawDetectedMarkers(frame, corners)
                cv2.putText(frame, "Id: " + str(ids), (0, 64),
                            font, 1, (0, 255, 0), 2, cv2.LINE_AA)

        # Display the resulting frame
        if enable_gui:
            cv2.imshow('frame', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        # clear update
        if update_counter <= 0:
            update_detected([])
            update_counter = 0

        update_counter -= 1

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
def arucoid(ide, dire, flag):
    global an
    print ide, dire
    while flag:
        ret, frame = cap.read()
        #print(frame.shape) #480x640
        # Our operations on the frame come here
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_50)
        #print aruco_dict
        parameters = aruco.DetectorParameters_create()
        '''    detectMarkers(...)
                detectMarkers(image, dictionary[, corners[, ids[, parameters[, rejectedI
                mgPoints]]]]) -> corners, ids, rejectedImgPoints
                '''
        #lists of ids and the corners beloning to each id
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)

        #print "im here"
        frame = aruco.drawDetectedMarkers(frame, corners)

        if len(corners):
            #print(corners)
            corners = corners[0][0]
            tl = corners[0]
            bl = corners[3]
            cv2.circle(frame, tuple(corners[0]), 4, color=(255, 0, 0))
            cv2.circle(frame, tuple(corners[3]), 4, color=(0, 255, 0))
            #print tl,bl
            dist_px = np.linalg.norm(tl - bl)
            #print dist_px
            distance = (aru_size * focal_len) / dist_px
            #print distance
            '''pixel/focal=actual/dist'''
            ids = ids[0][0]

            if distance < 1000:
                if str(ids) == str(ide):
                    #print "im in"
                    #arduino.flush()
                    #arduino.write('4')
                    logger('@rasp found marker id {}'.format(ids))
                    '''
                        flagfile=open('flag.txt','w+')
                        flagfile.write('1')
                        flagfile.close()'''
                    #print ard_dire[dire]
                    print ide, dire
                    ard.write(str(ard_dire[dire]))
                    logger('@arduino sent to arduino')

                    # with the port open, the response will be buffered
                    # so wait a bit longer for response here

                    # Serial read section
                    time.sleep(2)

                    msg = ard.readline(ard.inWaiting())
                    logger('@arduino received {}'.format(msg))
                    print 'arduino', msg
                    print("found id no : {} \n in distance of : {}".format(
                        ids, distance))
                    #time.sleep(1)
                    #arduino.write('1')
                    flag = False
                cv2.putText(frame, str(ids), tuple(corners[2]),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
예제 #10
0
def cv_exercise7():
    frame = cv2.imread('markers.png')

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters = aruco.DetectorParameters_create()
    corners, ids, _ = aruco.detectMarkers(gray,
                                          aruco_dict,
                                          parameters=parameters)
    height, width = gray.shape
    deltaX = 0
    delyaY = 0

    absX = int(width / 2)
    absY = int(height / 2)

    print("Corner: ", corners)
    if len(corners):  #returns no of arucos
        #print (len(corners)
        aruco_list = {}
        #print (len(ids))
        markerOne = corners[0][0]

        cornerOne = markerOne[0]
        cornerTwo = markerOne[1]
        cornerThree = markerOne[2]
        cornerFour = markerOne[3]

        centerX1 = int((cornerTwo[0] + cornerFour[0]) / 2)
        centerY1 = int((cornerTwo[1] + cornerFour[1]) / 2)
        centerX2 = int((cornerOne[0] + cornerThree[0]) / 2)
        centerY2 = int((cornerOne[1] + cornerThree[1]) / 2)

        centerX = (centerX1 + centerX2) / 2
        centerY = (centerY1 + centerY2) / 2

        deltaX = abs(absX - centerX)
        deltaY = abs(absY - centerY)

        xFOV = (deltaX / width) * 54
        yFOV = (deltaY / height) * 41

        angle = np.sqrt(xFOV * xFOV + yFOV * yFOV)

        print("Angle: ", angle)
        print("CenterX: ", centerX, "CenterY: ", centerY)

    cv2.imshow("angles", frame)
    #print("test")

    # start video capture for distance
    cap = cv2.VideoCapture(0)

    while (True):
        # Capture frame-by-frame
        ret, frame = cap.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
        parameters = aruco.DetectorParameters_create()

        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)
        #if ids != None:
        #print("Found ids:")
        #print(ids)
        #else:
        #print("Aruco Marker not found")

        gray = aruco.drawDetectedMarkers(gray, corners, ids)

        height, width = gray.shape
        deltaX = 0
        delyaY = 0

        absX = int(width / 2)
        absY = int(height / 2)

        #print("Corner: ", corners)
        if len(corners):  #returns no of arucos
            #print (len(corners)
            aruco_list = {}
            #print (len(ids))
            markerOne = corners[0][0]

            cornerOne = markerOne[0]
            cornerTwo = markerOne[1]
            cornerThree = markerOne[2]
            cornerFour = markerOne[3]

            deltaX1 = abs(cornerTwo[0] - cornerOne[0])
            deltaX2 = abs(cornerThree[0] - cornerFour[0])

            deltaY1 = abs(cornerFour[1] - cornerOne[1])
            deltaY2 = abs(cornerThree[1] - cornerTwo[1])

            arucoWidth = (deltaX1 + deltaX2) / 2
            arucoHeight = (deltaY1 + deltaY2) / 2

            #figure out width of screen
            screenWidth = 0.15 / (arucoWidth / width)
            screenHeight = 0.15 / (arucoHeight / height)

            # figure out how many pixels correlates t
            f = 0.0036
            x = 0.0038
            y = 0.0029
            Z1 = f * (screenWidth / x)
            Z2 = f * (screenHeight / y)

            finalDistance = (Z1 + Z2) / 2

            print("Distance: ", finalDistance)

        cv2.imshow('frame', gray)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    def visionSystem(server, in_q):
        pass
        """

        Vision functionality for the system (includes AI-yolo5 and aruco marker detection).

        If the arm is ready to pickup a package, determines what the parcel is (AI) and the postion of said parcel.

        Using the bounding box (if parcel was detected), gets center of parcel and returns a depth value. Used for TCP values.

        Args:

            server.systemStatus (string): Used to determine if the vision system needs to shutdown or go to standby.

            in_q (Queue): Queue shared between videoStream and the visionSystem (allows for thread-safe communication). Inserts a frame into the queue to send to the user client.


        Returns:

            None


        """

        # #Check this function call.
        # #Add error handling
        sensor = rs.DepthCamera()
        ret, depth_frame, color_frame = sensor.get_frames()
        model, objects, obj_colors = yolov5.create_model('weight_v1.pt')

        while (server.systemStatus != "Offline"):
            if (server.readyForTCPValues == True):
                row_position_average = np.zeros(0)
                column_position_average = np.zeros(0)

                id_matrix = np.zeros(20)

                worldPosition = 0

                ret, depth_frame, color_frame = sensor.get_frames()
                status, depth, bounds, frame = yolov5.detect(
                    model, color_frame, depth_frame, 192, objects, obj_colors)

                if (status == False):
                    continue

                #Length is same as moving average length
                for counter in range(0, 50):
                    gray = cv2.cvtColor(color_frame, cv2.COLOR_BGR2GRAY)
                    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_1000)
                    arucoParameters = aruco.DetectorParameters_create()
                    corners, ids, rejectedImgPoints = aruco.detectMarkers(
                        gray, aruco_dict, parameters=arucoParameters)

                    frame = aruco.drawDetectedMarkers(color_frame, corners)
                    if (in_q.empty() == True):
                        in_q.put(frame)

                    try:
                        #Set id matrix position to 1 (marker visible)
                        for i in range(0, len(ids)):
                            if (ids[i] > 20):
                                print("")
                            else:
                                id_matrix[ids[i]] = 1

                        row_position_average, column_position_average, worldPosition = server.findLocation(
                            id_matrix, row_position_average,
                            column_position_average)

                        id_matrix = np.zeros(20)

                        ret, depth_frame, color_frame = sensor.get_frames()
                    except Exception as error:
                        print(error)

                server.readyForTCPValues == False

                #Can use simple trig to get an aproximate of the depth.

                # marker_distance would hold the values for the displacement between the markers and the camera (x-axis)
                x_distance = server.marker_distance[round(
                    row_position_average)]

                depth = math.sqrt(pow(depth, 2) - pow(x_distance, 2))

                jsonResult = {
                    "first": "Target TCP",
                    "second": (worldPosition[0] / 1000),
                    "third": (worldPosition[1] / 1000),
                    "fourth": (depth / 1000)
                }

                server.send(jsonResult, 1)

            else:

                gray = cv2.cvtColor(color_frame, cv2.COLOR_BGR2GRAY)
                aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_1000)
                arucoParameters = aruco.DetectorParameters_create()
                corners, ids, rejectedImgPoints = aruco.detectMarkers(
                    gray, aruco_dict, parameters=arucoParameters)

                frame = aruco.drawDetectedMarkers(color_frame, corners)
                if (in_q.empty() == True):
                    in_q.put(frame)

                ret, depth_frame, color_frame = sensor.get_frames()
def main():
    face_landmark_path = './shape_predictor_68_face_landmarks.dat'
    #flags/initializations to be set
    kalman_flag = 1
    skip_frame = 1
    skip_frame_to_send = 4
    socket_connect = 1  # set to 0 if we are testing the code locally on the computer with only the realsense tracking.
    simplified_calib_flag = 0  # this is set to 1 if we want to do one-time calibration
    arucoposeflag = 1
    img_idx = 0  # img_idx keeps track of image index (frame #).
    RSTrSum = np.zeros(
        (4, 4)
    )  #initialization of empty buffer for sending the mean of the transformation matrix across every skip_frames_to_send frames
    skip_frame_reinit = 120  #after every 150 frames, reinitialize detection
    N_samples_calib = 10  # number of samples for computing the calibration matrix using homography

    # kalman filter initialization
    stateMatrix = np.zeros((12, 1), np.float32)  # [x, y, delta_x, delta_y]
    estimateCovariance = np.eye(stateMatrix.shape[0])
    transitionMatrix = np.array([[1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
                                 [0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
                                 [0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0],
                                 [0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0],
                                 [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0],
                                 [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1],
                                 [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
                                 [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
                                 [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
                                 [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
                                 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
                                 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]],
                                np.float32)
    processNoiseCov = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                                [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                                [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                                [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
                                [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
                                [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
                                [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
                                [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
                                [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]],
                               np.float32) * 0.001
    measurementStateMatrix = np.zeros((6, 1), np.float32)
    observationMatrix = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                                  [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                                  [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                                  [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
                                  [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
                                  [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]],
                                 np.float32)
    measurementNoiseCov = np.array(
        [[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0],
         [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]],
        np.float32) * 1
    kalman = KalmanFilter(X=stateMatrix,
                          P=estimateCovariance,
                          F=transitionMatrix,
                          Q=processNoiseCov,
                          Z=measurementStateMatrix,
                          H=observationMatrix,
                          R=measurementNoiseCov)

    if socket_connect == 1:
        #  create a socket object
        s = socket.socket()
        print("Socket successfully created")

        # reserve a port on your computer in our case it is 2020 but it can be anything
        port = 2020
        s.bind(('', port))
        print("socket binded to %s" % (port))

        # put the socket into listening mode
        s.listen(5)
        print("socket is listening")
        c, addr = s.accept()
        print('got connection from ', addr)

    # dlib face detector
    dlibdetector = dlib.get_frontal_face_detector()
    # dlib landmark detector
    predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')

    if socket_connect == 1 and simplified_calib_flag == 0:
        arucoinstance = arucocalibclass()
        ReturnFlag = 1
        aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
        marker_len = 0.0645
        MLRSTr = arucoinstance.startcamerastreaming(c, ReturnFlag, marker_len,
                                                    aruco_dict,
                                                    N_samples_calib)
        print(MLRSTr)
    elif socket_connect == 1 and simplified_calib_flag == 1:
        T_M2_RS = np.array([
            -1.0001641, 0.00756584, 0.00479072, 0.03984956, -0.00774137,
            -0.99988126, -0.03246199, -0.01359556, 0.00453644, -0.03251681,
            0.99971441, -0.00428408, 0., 0., 0., 1.
        ])
        T_M2_RS = T_M2_RS.reshape(4, 4)
        MLRSTr = simplified_calib(T_M2_RS, c)
    else:
        MLRSTr = np.array((1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1))
        MLRSTr = MLRSTr.reshape(4, 4)
        print(MLRSTr)


# Configure depth and color streams
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    # Start streaming
    profile = pipeline.start(config)
    align_to = rs.stream.color
    align = rs.align(align_to)

    print('Start detecting pose ...')

    while True:
        # get video frame
        frames = pipeline.wait_for_frames()
        aligned_frames = align.process(frames)
        color_frame = aligned_frames.get_color_frame()
        aligned_depth_frame = aligned_frames.get_depth_frame()

        if not aligned_depth_frame or not color_frame:
            continue

        # Intrinsics & Extrinsics of Realsense
        depth_intrin = profile.get_stream(
            rs.stream.depth).as_video_stream_profile().get_intrinsics()
        intr = profile.get_stream(
            rs.stream.color).as_video_stream_profile().get_intrinsics()
        depth_to_color_extrin = aligned_depth_frame.profile.get_extrinsics_to(
            color_frame.profile)
        mtx = np.array([[intr.fx, 0, intr.ppx], [0, intr.fy, intr.ppy],
                        [0, 0, 1]])
        dist = np.array(intr.coeffs)

        input_img = np.asanyarray(color_frame.get_data())
        img_idx = img_idx + 1
        img_h, img_w, _ = np.shape(input_img)

        # Process the first frame and every frame after "skip_frame" frames
        if img_idx == 1 or img_idx % skip_frame == 0:

            gray_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
            # detect faces using dlib
            rects = dlibdetector(gray_img, 1)
            input_imgcopy = input_img
            for rect in rects:
                # detect facial landmarks
                shape = predictor(gray_img, rect)

                #head pose estimation
                reprojectdst, rotation_vec, rotation_mat, translation_vec, euler_angle, image_pts = get_head_pose(
                    shape, mtx, dist)
                # Project a 3D point (0, 0, 1000.0) onto the image plane.
                #We use this to draw a line sticking out of the nose
                (nose_end_point2D,
                 jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]),
                                               rotation_vec, translation_vec,
                                               mtx, dist)
                for p in image_pts:
                    cv2.circle(input_img, (int(p[0]), int(p[1])), 3,
                               (0, 0, 255), -1)
                # draw line sticking out of the nose tip and showing the head pose
                p1 = (int(image_pts[0][0]), int(image_pts[0][1]))
                p2 = (int(nose_end_point2D[0][0][0]),
                      int(nose_end_point2D[0][0][1]))
                cv2.line(input_img, p1, p2, (255, 0, 0), 2)

                # convert landmarks detected to numpy type
                shape = face_utils.shape_to_np(shape)
                landmarks = np.float32(shape)

                for (x, y) in landmarks:
                    cv2.circle(input_img, (x, y), 1, (0, 0, 255), -1)

                #get 3D co-ord of nose
                depth = aligned_depth_frame.get_distance(
                    image_pts[0][0], image_pts[0][1])
                cv2.circle(input_img, (image_pts[0][0], image_pts[0][1]), 3,
                           (0, 255, 0), -1)

                depth_point = rs.rs2_deproject_pixel_to_point(
                    depth_intrin, [image_pts[0][0], image_pts[0][1]], depth)
                depth_point = np.array(depth_point)
                depth_point = np.reshape(depth_point, [1, 3])
                print("depth_point", depth_point)

                if (depth_point[0][2] != 0 and depth_point[0][2] < 0.8):

                    if kalman_flag == 0:  # or img_idx - img_sent > reinit_thresh or img_idx % skip_frame_reinit == 0:

                        ##print("reset img_sent", img_idx, img_sent)
                        print("reinit", img_idx, skip_frame_reinit)
                        Posarr = np.array([
                            depth_point[0][0], depth_point[0][1],
                            depth_point[0][2]
                        ])
                        Posarr = Posarr.reshape(1, 3)
                        print("Posarr", Posarr)
                        r = R.from_euler('zyx', euler_angle, degrees=True)
                        RotMat = r.as_matrix()
                        img_sent = img_idx

                    else:
                        # execute the following if kalman filter to be applied
                        ##print("kalman loop", img_idx, img_sent)
                        print("euler angle", euler_angle, euler_angle[0],
                              euler_angle[1], euler_angle[2])

                        current_measurement = np.append(
                            depth_point, euler_angle)
                        current_prediction = kalman.predict()
                        current_prediction = np.array(current_prediction,
                                                      np.float32)
                        current_prediction = current_prediction.transpose()[0]
                        # predicted euler angles
                        euler_angle_P = current_prediction[3:6]
                        # predicted posarr
                        Posarr_P = np.array(current_prediction[:3]).reshape(
                            [1, 3])
                        # convert to rotation matrix using r function
                        r = R.from_euler('zyx', euler_angle_P, degrees=True)
                        rotation_mat = r.as_matrix()
                        # update the estimate of the kalman filter
                        print("current measurement", current_measurement)
                        #kalman.correct(np.transpose(current_measurement))
                        kalman.correct(
                            np.transpose(
                                np.reshape(current_measurement, [1, 6])))
                        Posarr_noKalman = np.array([
                            depth_point[0][0], depth_point[0][1],
                            depth_point[0][2]
                        ])
                        depth_point = Posarr_P
                        print("Posarr_P", depth_point, Posarr_noKalman,
                              euler_angle, euler_angle_P)

                    #RSTr is the head pose - combine depth point (which gives the translation, and the rotation to get RSTr)
                    RSTr = np.hstack([rotation_mat, depth_point.transpose()])
                    RSTr = np.vstack([RSTr, [0, 0, 0, 1]])
                    print("head pose", RSTr)
                    # compute mean of the head pose every few frames
                    RSTrSum += RSTr
                    if img_idx == skip_frame_to_send:
                        RSTrTosend = RSTrSum / skip_frame_to_send
                        RSTr = RSTrTosend
                        RSTrSum = np.zeros((4, 4))

                    if arucoposeflag == 1:
                        print("aruco")
                        gray = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
                        # set dictionary size depending on the aruco marker selected
                        aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
                        # detector parameters can be set here (List of detection parameters[3])
                        parameters = aruco.DetectorParameters_create()
                        parameters.adaptiveThreshConstant = 10
                        # lists of ids and the corners belonging to each id
                        corners, ids, rejectedImgPoindetectorts = aruco.detectMarkers(
                            gray, aruco_dict, parameters=parameters)

                        # check if the ids list is not empty
                        if np.all(ids != None):
                            # estimate pose of each marker
                            intr = profile.get_stream(
                                rs.stream.color
                            ).as_video_stream_profile().get_intrinsics(
                            )  #profile.as_video_stream_profile().get_intrinsics()
                            mtx = np.array([[intr.fx, 0, intr.ppx],
                                            [0, intr.fy, intr.ppy], [0, 0, 1]])
                            dist = np.array(intr.coeffs)
                            rvec, tvec, _ = aruco.estimatePoseSingleMarkers(
                                corners, 0.045, mtx, dist)  #
                            for i in range(0, ids.size):
                                # draw axis for the aruco markers
                                aruco.drawAxis(input_img, mtx, dist, rvec[i],
                                               tvec[i], 0.1)

    # draw a square around the markers
                            aruco.drawDetectedMarkers(input_img, corners)
                            #Combine rotation matrix and translation vector to get Aruco pose
                            R_rvec = R.from_rotvec(rvec[0])
                            R_rotmat = R_rvec.as_matrix()
                            AruRSTr = np.hstack(
                                [R_rotmat[0], tvec[0].transpose()])
                            AruRSTr = np.vstack([AruRSTr, [0, 0, 0, 1]])
                            RSTr = AruRSTr
                            print("Aruco pose", AruRSTr)

                    if img_idx % skip_frame_to_send == 0:
                        # Since pose detected in OpenCV will be right handed coordinate system, it needs to be converted to left-handed coordinate system of Unity
                        RSTr_LH = np.array([
                            RSTr[0][0], RSTr[0][2], RSTr[0][1], RSTr[0][3],
                            RSTr[2][0], RSTr[2][2], RSTr[2][1], RSTr[2][3],
                            RSTr[1][0], RSTr[1][2], RSTr[1][1], RSTr[1][3],
                            RSTr[3][0], RSTr[3][1], RSTr[3][2], RSTr[3][3]
                        ])  # converting to left handed coordinate system
                        RSTr_LH = RSTr_LH.reshape(4, 4)
                        #Compute the transformed pose to be streamed to MagicLeap
                        HeadPoseTr = np.matmul(MLRSTr, RSTr_LH)

                        ArrToSend = np.array([
                            HeadPoseTr[0][0], HeadPoseTr[0][1],
                            HeadPoseTr[0][2], HeadPoseTr[0][3],
                            HeadPoseTr[1][0], HeadPoseTr[1][1],
                            HeadPoseTr[1][2], HeadPoseTr[1][3],
                            HeadPoseTr[2][0], HeadPoseTr[2][1],
                            HeadPoseTr[2][2], HeadPoseTr[2][3],
                            HeadPoseTr[3][0], HeadPoseTr[3][1],
                            HeadPoseTr[3][2], HeadPoseTr[3][3]
                        ])

                        ArrToSendPrev = ArrToSend
                        if socket_connect == 1:
                            dataTosend = struct.pack('f' * len(ArrToSend),
                                                     *ArrToSend)
                            c.send(dataTosend)
            else:
                print("No Face Detected")
            cv2.imshow('Landmark_Window', input_img)

        key = cv2.waitKey(1)
예제 #13
0
# System information:
# - Linux Mint 18.1 Cinnamon 64-bit
# - Python 2.7 with OpenCV 3.2.0

import numpy
import cv2
from cv2 import aruco
import pickle
import glob

# ChAruco board variables
CHARUCOBOARD_ROWCOUNT = 7
CHARUCOBOARD_COLCOUNT = 5
ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_4X4_100)

# Create constants to be passed into OpenCV and Aruco methods
CHARUCO_BOARD = aruco.CharucoBoard_create(squaresX=CHARUCOBOARD_COLCOUNT,
                                          squaresY=CHARUCOBOARD_ROWCOUNT,
                                          squareLength=0.04,
                                          markerLength=0.02,
                                          dictionary=ARUCO_DICT)

# Create the arrays and variables we'll use to store info like corners and IDs from images processed
corners_all = []  # Corners discovered in all images processed
ids_all = []  # Aruco ids corresponding to corners discovered
image_size = None  # Determined at runtime

# This requires a set of images or a video taken with the camera you want to calibrate
# I'm using a set of images taken with the camera with the naming convention:
# 'camera-pic-of-charucoboard-<NUMBER>.jpg'
# All images used should be the same size, which if taken with the same camera shouldn't be a problem
예제 #14
0
def track_truck(rp):
    cap = cv2.VideoCapture(1)
    w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    #	print w
    #	print h
    d_pos = 1  #distance difference between cb and truck
    while True:
        ret, frame = cap.read()
        if ret == False:
            continue
        framebw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)
        parameters = aruco.DetectorParameters_create()
        corners, ids, _ = aruco.detectMarkers(framebw,
                                              aruco_dict,
                                              parameters=parameters)
        #		print ids
        i = ids.tolist().index([0])  #gives total aruco markers
        #		print i
        l = 0
        l1 = 0
        z = int(ids[i][0])
        #		print z
        cv2.circle(frame, (corners[i][0][0][0], corners[i][0][0][1]), 5,
                   (125, 125, 125), -1)
        cv2.circle(frame, (corners[i][0][1][0], corners[i][0][1][1]), 5,
                   (0, 255, 0), -1)
        cv2.circle(frame, (corners[i][0][2][0], corners[i][0][2][1]), 5,
                   (180, 105, 255), -1)
        cv2.circle(frame, (corners[i][0][3][0], corners[i][0][3][1]), 5,
                   (255, 255, 255), -1)
        x = ((corners[i][0][0][0] + corners[i][0][2][0]) / 2)
        y = ((corners[i][0][0][1] + corners[i][0][2][1]) / 2)
        cv2.circle(frame, (int(x), int(y)), 5, (255, 0, 0), -1)  #center
        x1 = (corners[i][0][0][0] + corners[i][0][1][0]) / 2
        y1 = (corners[i][0][0][1] + corners[i][0][1][1]) / 2
        cv2.circle(frame, (int(x1), int(y1)), 5, (0, 0, 255),
                   -1)  #midpoint of starting edge
        cv2.line(frame, (int(x), int(y)), (int(x1), int(y1)), (255, 0, 0), 3)
        a = y1 - y
        b = x1 - x
        theta = 3.14 - math.atan2(
            a, b
        )  #taking negative to the resultant angle .Angle lies between [-pi,pi]
        #		print theta
        #		print '........'
        font = cv2.FONT_HERSHEY_SIMPLEX
        q = (
            x - 320
        ) * 0.003351064  #0.003351064 #(x-320)*0.00303077335822 #0.00353077335822ipx along x axis=0.003515625 m
        r = (
            y - 240
        ) * 0.003351064  #0.003389831 #(y-240)*0.00302814921311 #0.00352814921311ipx along y axis=0.0034375 m
        emptyBuff = bytearray()
        position = [-q, r, +4.1000e-02]
        orientation = [0, 0, theta]
        #		print position
        #		print orientation
        x1 = position[0] - rp[0]
        y1 = position[1] - rp[1]
        d_pos = math.sqrt((x1)**2 + (y1)**2)
        if d_pos < 0.015:
            break

    cap.release()
    cv2.destroyAllWindows()
    return
예제 #15
0
#         marker_center_list.clear()
while True:
    pk_obj.device_get_capture()
    color_image_handle = pk_obj.capture_get_color_image()
    depth_image_handle = pk_obj.capture_get_depth_image()
    if color_image_handle and depth_image_handle:
        color_image = pk_obj.image_convert_to_numpy(color_image_handle)
        # cv2.imshow("test", color_image)
        # cv2.waitKey(0)
        point_cloud = pk_obj.transform_depth_image_to_point_cloud(
            depth_image_handle)
        parameters = aruco.DetectorParameters_create()
        color_image = cv2.cvtColor(color_image, cv2.COLOR_BGRA2BGR)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            color_image,
            dictionary=aruco.Dictionary_get(aruco.DICT_4X4_250),
            parameters=parameters)
        if len(corners) == 0:
            pk_obj.image_release(color_image_handle)
            pk_obj.image_release(depth_image_handle)
            pk_obj.capture_release()
            continue
        image_xy_list = []
        for corner_list in corners:
            image_xy_list.append(
                np.mean(np.mean(corner_list, axis=0), axis=0).astype(np.int16))
        flag = False
        pcd_pnt_list = []
        for image_xy in image_xy_list:
            pcd_pnt = pk_obj.transform_color_xy_to_pcd_xyz(
                input_color_image_handle=color_image_handle,
    # draw pillars in blue color
    for i, j in zip(range(4), range(4, 8)):
        img = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]), (255), 3)

    # draw top layer in red color
    img = cv2.drawContours(img, [imgpts[4:]], -1, (0, 0, 255), 3)

    return img


cameraMatrix = np.load('mtx.npy')
distCoeffs = np.load('dist.npy')

ARUCO_PARAMETERS = aruco.DetectorParameters_create()
ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_6X6_250)
board = aruco.GridBoard_create(markersX=1,
                               markersY=1,
                               markerLength=0.09,
                               markerSeparation=0.01,
                               dictionary=ARUCO_DICT)

# Create vectors we'll be using for rotations and translations for postures
rotation_vectors, translation_vectors = None, None
axis = np.float32([[-.5, -.5, 0], [-.5, .5, 0], [.5, .5, 0], [.5, -.5, 0],
                   [-.5, -.5, 1], [-.5, .5, 1], [.5, .5, 1], [.5, -.5, 1]])

# Make output image fullscreen
cv2.namedWindow('ProjectImage', cv2.WINDOW_NORMAL)
pipeline = rs.pipeline()
config = rs.config()
def CameraService():
    """
    Example entry point; please see Enumeration example for more in-depth
    comments on preparing and cleaning up the system.

    :return: True if successful, False otherwise.
    :rtype: bool
    """
    # Retrieve singleton reference to system object
    system = PySpin.System.GetInstance()

    # Retrieve list of cameras from the system
    cam_list = system.GetCameras()

    num_cameras = cam_list.GetSize()

    print "Number of cameras detected: %d" % num_cameras

    # Finish if there are no cameras
    if num_cameras == 0:

        # Clear camera list before releasing system
        cam_list.Clear()

        # Release system
        system.ReleaseInstance()

        print "Not enough cameras!"
        raw_input("Done! Press Enter to exit...")
        return False

    # Run example on each camera
    for i in range(num_cameras):
        cam = cam_list.GetByIndex(i)
        Cam1 = CameraParams(5813.48508684566, 2560.092268747669,
                            5857.787526106612, 1928.738413521021, 1.0, -0.0862,
                            0.3082, 0.0, 0.0, 0.0)

        print "Running example for camera %d..." % i

        result = run_single_camera(cam)
        print "Camera %d example complete..." % i

        # Operations on the frame
        if result != False:
            frame = np.array(result.GetData(), dtype="uint8").reshape(
                (result.GetHeight(), result.GetWidth(), 1))

            aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
            parameters = aruco.DetectorParameters_create()
            corners, ids, rejectedImgPoints = aruco.detectMarkers(
                frame, aruco_dict, parameters=parameters)

            print corners
            idx_All = np.zeros([4, 1])
            Corners_All = np.zeros([4, 4, 2])
            if len(ids) == 4:
                for k in range(4):
                    m = corners[k]
                    idx = ids[k]
                    idx_All[idx - 1, :] = idx
                    idx_All[idx - 1, :] = idx
                    Corners_All[idx - 1, :, :] = m[0, :, :]

                Pca, Rca = get_object_pose_M(
                    np.reshape(Corners_All[0:4, :, :], [16, 2]), 97.92, 103.49,
                    103.49, Cam1.camMatrix, Cam1.distCoeff)
                Rcatmp = np.vstack((np.hstack((Rca, [[0], [0],
                                                     [0]])), [0, 0, 0, 1]))
                qca = quaternion_from_matrix(Rcatmp)
                Rca = quaternion_matrix(
                    [1.0 * qca[1], 1.0 * qca[0], -1.0 * qca[3], -1.0 * qca[2]])
                Rca = Rca[0:3, 0:3]
                Poa, Roa = Map_Camera2Robot(Pca, Rca)

                Pset = np.matmul(Roa,
                                 np.array([-0.3, 0.528, 0.13415
                                           ]))  #(-z,x,y)[-0.3,0.538,0.18415]
                #Rset = [[0.975004629674004,-0.012471665796434,-0.221834239166364],[0.006172005904842,0.999558362226998,-0.029068657437025],[ 0.222098803367709,0.026972913345476,0.974651005995628]] #ZYX
                Rset = [[
                    0.993265901528871, -0.009385243115853, -0.115476257610362
                ], [0.009746998214369, 0.999949198262679, 0.002568446930442],
                        [
                            0.115446285716984, -0.003676697632625,
                            0.993306919843344
                        ]]  #ZYX
                #Rset = [[0.993313684906749,  -0.000007686718905 , -0.115446625407115],[0.000280162683008 ,  0.999997213667025 ,  0.002343963962486],[ 0.115446285716984,  -0.002360635317184,   0.993310919357606]] #ZYX

                Roa = np.matmul(Roa, Rset)

                Rtmp = np.vstack((np.hstack((Roa, [[0], [0],
                                                   [0]])), [0, 0, 0, 1]))
                qoa = quaternion_from_matrix(Rtmp)

                #tmp = euler_from_matrix(Rtmp)
                #                print tmp
                #                qoa2 = quaternion_from_euler(1.0*tmp[2],1.0*tmp[0],-1.0*tmp[0])
                #Roa2 = quaternion_matrix([qoa[3],qoa[0],qoa[1],qoa[2]])
                #Roa2 = Roa2[0:3,0:3]

                print 'Poa:', Poa
                print 'Roa:', Roa
                #print 'Roa2:',Roa2
                print 'qoa_***:', [qoa[3], qoa[0], qoa[1], qoa[2]]

                print 'set ponit', Pset
                Poa = Poa + Pset

                #print 'qoa2:',[qoa2[3],qoa2[0],qoa2[1],qoa2[2]]

    # Release reference to camera
    # NOTE: Unlike the C++ examples, we cannot rely on pointer objects being automatically
    # cleaned up when going out of scope.
    # The usage of del is preferred to assigning the variable to None.
    del cam

    # Clear camera list before releasing system
    cam_list.Clear()

    # Release instance
    system.ReleaseInstance()

    #raw_input("Done! Press Enter to exit...")
    return Poa, [qoa[3], qoa[0], qoa[1], qoa[2]]
예제 #18
0
def loadArucoDict(requestedDict):  #TODO
    global ARUCO_DICT
    ARUCO_DICT = aruco.Dictionary_get(aruLibrary[requestedDict])
예제 #19
0
path = [int("99")] * (2)

for raj in range(2*(len(jail))):
    print("raj changed")

    if raj<(2*(len(jail))):
        if raj%2==0:
            if raj==0:
                while True:
                    #cap = cv.VideoCapture(1)

                    ret, im = cap.read()
                    cropped = im[y_1:y_2,x_1:x_2]
                    img = cv.resize(cropped, (dim, dim), interpolation=cv.INTER_AREA)
                    aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)
                    parameters = aruco.DetectorParameters_create()
                    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
                    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
                    #print(size(corners))
                    #cap.release()
                    try:
                        print(corners)
                        c1x = (corners[0][0][0][0] + corners[0][0][1][0]) / 2
                        c2x = (corners[0][0][2][0] + corners[0][0][3][0]) / 2
                        c1y = (corners[0][0][0][1] + corners[0][0][1][1]) / 2
                        c2y = (corners[0][0][2][1] + corners[0][0][3][1]) / 2
                        cenx = (c1x + c2x) / 2
                        ceny = (c1y + c2y) / 2
                        print(cenx,ceny)
                        distblue=[-1,-1,-1,-1]
예제 #20
0
    def __init__(self):
        camera_id = str(rospy.get_param('~camera_id'))
        color_width = rospy.get_param('~color_width')
        color_high = rospy.get_param('~color_high')
        charuco_row = rospy.get_param('~charuco_row')
        charuco_col = rospy.get_param('~charuco_col')
        square_length = rospy.get_param('~square_length')
        marker_length = rospy.get_param('~marker_length')
        self.cnd = 0
        self.frameId = 0
        self.pipeline = rs.pipeline()
        rs_config = rs.config()
        rs_config.enable_stream(rs.stream.color, color_width, color_high,
                                rs.format.bgr8, 30)
        self.pipeline.start(rs_config)

        # Constant parameters used in Aruco methods
        self.aruco_param = aruco.DetectorParameters_create()
        self.aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
        # Create grid board object we're using in our stream
        self.charuco_board = aruco.CharucoBoard_create(
            squaresX=charuco_col,
            squaresY=charuco_row,
            squareLength=square_length,
            markerLength=marker_length,
            dictionary=self.aruco_dict)

        # Check for camera calibration data

        config = ConfigParser.ConfigParser()
        config.optionxform = str
        rospack = rospkg.RosPack()
        self.curr_path = rospack.get_path('hand_eye')
        config.read(self.curr_path + '/config/camera_' + str(camera_id) +
                    '_internal.ini')
        internal_name = 'Internal_' + str(color_width) + '_' + str(color_high)
        b00 = float(config.get(internal_name, "Key_1_1"))
        b01 = float(config.get(internal_name, "Key_1_2"))
        b02 = float(config.get(internal_name, "Key_1_3"))
        b10 = float(config.get(internal_name, "Key_2_1"))
        b11 = float(config.get(internal_name, "Key_2_2"))
        b12 = float(config.get(internal_name, "Key_2_3"))
        b20 = float(config.get(internal_name, "Key_3_1"))
        b21 = float(config.get(internal_name, "Key_3_2"))
        b22 = float(config.get(internal_name, "Key_3_3"))

        self.cameraMatrix = np.mat([[b00, b01, b02], [b10, b11, b12],
                                    [b20, b21, b22]])
        # c_x = 643.47548083
        # c_y = 363.67742746
        # f_x = 906.60886808
        # f_y = 909.34831447
        # k_1 = 0.16962942
        # k_2 = -0.5560001
        # p_1 = 0.00116353
        # p_2 = -0.00122694
        # k_3 = 0.52491878

        # c_x = 649.007507324219
        # c_y = 356.122222900391
        # f_x = 922.76806640625
        # f_y = 923.262023925781

        # self.cameraMatrix = np.array([[f_x, 0, c_x],
        #                        [0, f_y, c_y],
        #                        [0, 0, 1]])
        # self.distCoeffs = np.array([k_1, k_2, p_1, p_2, k_3])
        self.distCoeffs = np.array([0.0, 0, 0, 0, 0])

        # Create vectors we'll be using for rotations and translations for postures
        self.rvecs = None
        self.tvecs = None
        self.rvecs_arr = np.zeros((3, NUMBER))
        self.tvecs_arr = np.zeros((3, NUMBER))

        self.cam = None
        self.QueryImg = None
        self.init_server()
        frames = self.pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        self.QueryImg = np.asanyarray(color_frame.get_data())
예제 #21
0
import numpy as np
import cv2
import cv2.aruco as aruco

# Select type of aruco marker (size)
aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_1000)

# Create an image from the marker
# second param is ID number
# last param is total image size
for x in list(range(101,131)):
    img = aruco.drawMarker(aruco_dict, x, 800)
    cv2.imwrite(str(x)+".jpg", img)
    
# Display the image to us
cv2.imshow('frame', img)
# Exit on any key
cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #22
0
파일: track.py 프로젝트: arem-cdr/Camera
 def get(self, img):
     gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
     aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
     parameters = aruco.DetectorParameters_create()
     corners, ids, rejectedImgPoints = aruco.detectMarkers(
         gray, aruco_dict, parameters=parameters)
예제 #23
0
def generateImage( nNumMark, strName, strDstPath = "./generated/", strSrcPath = "./imgs/" ):
    try: os.makedirs( strDstPath )
    except: pass

    w=2100
    h=2970 # ~ A4 proportions
    
    white = 255,255,255
    grey = 127,127,127
    lgrey = 191,191,191
    llgrey = 223,223,223
    black = 0,0,0
    
    strImgIn = strSrcPath + strName + "."
    if os.path.exists(strImgIn+"png"):
        strImgIn += "png"
    else:
        strImgIn += "jpg"
        
    pic = cv2.imread(strImgIn,cv2.IMREAD_UNCHANGED) # also read potential alpha layer
    hp, wp, nNbrPlane = pic.shape
    if nNbrPlane == 4:
        # set white to pixel containing alpha at 0
        pic = rgba2rgb( pic, white )
        
    #~ BGRA[y,x,3]
    
    global global_dictionnary_type
    aruco_dict = aruco.Dictionary_get(global_dictionnary_type)
    #~ computeStatOnArucoDict(aruco_dict)

    aru = aruco.drawMarker(aruco_dict,nNumMark, 256)

    
    strImgOut = strDstPath + "card_" + strName + ".png"


    if 0:
        # reduce image to debug
        w//=4
        h//=4
    
    
    h_pic_dst = int(h/2.1) # define the final size of the pictures on the page
    w_pic_dst = (h_pic_dst * wp)//hp
    if w_pic_dst > w:
        w_pic_dst = int(w * 0.8)
        h_pic_dst = (w_pic_dst*hp)//wp
    
    xpic = (w-w_pic_dst)//2
    ypic = (h-h_pic_dst)//2
    
    w_aru = int(w/3.4) # 3.4 was fine, but seen from too much distance. /3.1 define the final size of the mark on the page
    h_aru = w_aru
    
    xaru = (w-w_aru)//2
    rAruMarginCoef = 0.036 # 0.015
    yaru = int(0+h*rAruMarginCoef)
    yaru2 = int(0+h*(1-rAruMarginCoef)-h_aru)
    
    im = np.zeros( (h,w,3), dtype=np.uint8 )
    im[:] = white
    
    pic = cv2.resize( pic, (w_pic_dst, h_pic_dst) )
    
    if 0:
        # turn mark to grey
        aru[:] //= 2
        aru[:] += 128
    aru = cv2.resize( aru, (w_aru, h_aru) )
    aru = cv2.cvtColor(aru,cv2.COLOR_GRAY2RGB)
    
    
    im[ypic:h_pic_dst+ypic,xpic:w_pic_dst+xpic]=pic[:]
    im[yaru:h_aru+yaru,xaru:w_aru+xaru]=aru[:]
    im[yaru2:h_aru+yaru2,xaru:w_aru+xaru]=aru[:]
    
    nMarginBorder = 70
    #~ cv2.putText(im, str(nNumMark), (nMarginBorder+20, int(h-nMarginBorder-20) ), cv2.FONT_HERSHEY_SIMPLEX, 1.4, black, 1)
    cv2.putText(im, str(nNumMark), (w-nMarginBorder-20-140, int(h-nMarginBorder-30) ), cv2.FONT_HERSHEY_SIMPLEX, 1.4, lgrey, 2 )
    
    if 1:
        nThickness = 44 # 30=> 3mm of plastic border after cut
        cv2.rectangle(im, (nThickness//2,nThickness//2),(w-1-nThickness//2,h-1-nThickness//2), llgrey, nThickness )

    if 1:
        # too bad redoing that for each image!!!
        logo = cv2.imread("logo_sbr.jpg")
        hl,wl,dummy = logo.shape
        wlf = 400
        hlf = (wlf*hl) // wl
        logo = cv2.resize( logo, (wlf,hlf) )
        nAddedMargin = 30
        #im[h-hlf-nMarginBorder-nAddedMargin:h-nMarginBorder-nAddedMargin,w-wlf-nMarginBorder-nAddedMargin:w-nMarginBorder-nAddedMargin] = logo
        im[h-hlf-nMarginBorder-nAddedMargin:h-nMarginBorder-nAddedMargin,nMarginBorder+nAddedMargin+10:nMarginBorder+nAddedMargin+wlf+10] = logo
    cv2.imwrite(strImgOut, im)

    if 0:
        im = cv2.resize( im, (w//4, h//4) )
        cv2.imshow("", im)
        cv2.waitKey(0)
    return strImgOut
예제 #24
0
	def __init__(self):
		self.aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
		self.parameters = aruco.DetectorParameters_create()
예제 #25
0
ap = argparse.ArgumentParser()
ap.add_argument("-c",
                "--camera",
                required=True,
                help="path to the input camera")
args = vars(ap.parse_args())

# Initialize camera input
cap = cv2.VideoCapture(int(args["camera"]))

while (True):
    # Capture frame-by-frame
    ret, frame = cap.read()
    # print(frame.shape) #480x640
    # Our operations on the frame come here
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters = aruco.DetectorParameters_create()

    # Detect Markers and print the corners of the found ones
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        frame, aruco_dict, parameters=parameters)
    print(corners)

    # Draw the found markers in the image, and show the image.
    frame = aruco.drawDetectedMarkers(frame, corners)

    # Perspective transformation
    if corners != []:
        warped, M, _ = four_point_transform(frame, corners[0][0], 0.144)
        cv2.imshow('warped', warped)
예제 #26
0
def main():
    global flFirst
    """
    This functions loads the target surface image,
    """
    homography = None
    # matrix of camera parameters (made up but works quite well for me)
    camera_parameters = np.array(mtx)

    dir_name = os.getcwd()

    obj2 = OBJ(os.path.join(dir_name, 'models1/rat.obj'), swapyz=True)
    obj = OBJ(os.path.join(dir_name, 'models1/fox.obj'), swapyz=True)
    # init video capture
    cap = cv2.VideoCapture(0)

    while True:
        # read the current frame
        ret, frame = cap.read()
        frame2 = np.copy(frame)
        if not ret:
            print("Unable to capture video")
            return
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
        parameters = aruco.DetectorParameters_create()
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)
        #res = aruco.detectMarkers(gray, aruco.getPredefinedDictionary(aruco.DICT_6X6_250))
        '''for i in range(len(res[0])):
            print(res[0][i])
            rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(res[0][i], 1, mtx, dist)
            #print(i, rvecs, tvecs)
            #sm = moveModel(frame, (135, 150, 110), corners)
            sm = moveModel2(frame, (135, 156, 118), corners, tvecs, rvecs)
        #frame = aruco.drawDetectedMarkers(frame.copy(), corners, ids)'''

        if corners:
            '''for i in range(len(ids)):
                rvecs, tvecs, _ 1.27= aruco.estimatePoseSingleMarkers(np.array([corners[0][i]]), 1, mtx, dist)
                #print('----------------------')
                H = mtx @ (rvecs[0].T @ rvecs[0])
                #print(mtx @ (tvecs[0].T @ rvecs[0]))
                #print(rvecs, tvecs, sep='\n')
                v = glyp(rvecs, tvecs)
                #print(v)'''

            src_pts = np.array([[0, 0], [0, 200], [200, 200],
                                [200, 0]]).reshape(-1, 1, 2)
            dst_pts = []
            avCorn = [[0] * 2 for i in range(4)]
            k = 0
            #print(corners[0][0])
            for i in range(len(corners)):
                k += 1
                for j in range(4):
                    avCorn[j][0] += corners[i][0][j][0]
                    avCorn[j][1] += corners[i][0][j][1]
                    #cv2.circle(frame, (corners[i][0][j][0], corners[i][0][j][1]), 1, (255 * (j % 3 == 0),255 * (j % 2 == 0),255), 10)
            for i in range(len(avCorn)):
                avCorn[i][0] = avCorn[i][0] // k
                avCorn[i][1] = avCorn[i][1] // k
            '''for i in avCorn:
                
                cv2.circle(frame, (i[0], i[1]), 1, (255, 0, 125), 10)'''
            '''for i in corners[0][0]:
                dst_pts.append([i[0] + sm[0], i[1] + sm[1]])'''
            rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(
                np.array([avCorn]), 1, mtx, dist)

            rvecsMat, _ = cv2.Rodrigues(rvecs)
            print('-----------')
            print(rvecs, tvecs)
            print(rvecsMat)
            for i in range(len(avCorn)):
                avCorn[i][0] = int(avCorn[i][0])
                avCorn[i][1] = int(avCorn[i][1])

            if flFirst:
                avCornOld = avCorn.copy()
                flFirst = False
            else:
                for i in range(len(avCorn)):
                    for j in range(2):
                        if abs(avCorn[i][j] - avCornOld[i][j]) <= 5:
                            avCorn[i][j] = avCornOld[i][j]
                avCornOld = avCorn.copy()

            sm = moveModel2(frame, (135, 156, 118), np.array(avCorn), tvecs,
                            rvecs)
            #print(np.array([avCorn]))
            #print('-----------------')

            for i in avCorn:
                dst_pts.append([i[0] + sm[0], i[1] + sm[1]])
            dst_pts = np.array(dst_pts).reshape(-1, 1, 2)
            homography, mask = cv2.findHomography(src_pts, dst_pts)
            #print(homography)
            R_T = get_extended_RT(camera_parameters, homography)
            #print('aaaa',R_T)
            transformation = camera_parameters.dot(R_T)
            print(R_T)
            frame = render(frame, obj, transformation, False)

        #cv2.imshow('frame2', frame2)
        #cv2.imshow('mask', mask)
        cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
    return 0
예제 #27
0
"""
Connect to the drone and calibrate the camera
"""

import time
import tellopy
import av
import cv2.cv2 as cv2  # for avoidance of pylint error
from cv2 import aruco
import numpy as np

aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
board = aruco.CharucoBoard_create(7, 5, 1, .8, aruco_dict)


def make_chessboard():
    """Make a "chess" aruco board, and save to file for printing."""
    imboard = board.draw((2000, 2000))
    cv2.imwrite('chessboard.tiff', imboard)


def calibrate_camera(allCorners, allIds, imsize):
    """Calibrates the camera using the dected corners."""
    print("CAMERA CALIBRATION")

    cameraMatrixInit = np.array([[1000., 0., imsize[0] / 2.],
                                 [0., 1000., imsize[1] / 2.], [0., 0., 1.]])

    distCoeffsInit = np.zeros((5, 1))
    flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL +
             cv2.CALIB_FIX_ASPECT_RATIO)
def frame_loop(cap, display=False):
    """capture aruco
    
    Arguments:
        cap {open cv capture object} -- the opencv device to view
    
    Keyword Arguments:
        display {bool} -- Print debug to stdout and display the OpenCV frame (default: {False})
    
    Returns:
        All_Objects -- An All_Objects object containing the detected positions of each object
    """
    positions = All_Objects()

    # Capture frame-by-frame
    ret, frame = cap.read()
    #print(frame.shape) # Uncomment to determine dimensions
    width = frame.shape[1]
    height = frame.shape[0]
    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters =  aruco.DetectorParameters_create()

    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
    #print(corners)
    #print(ids)

    #It's working.
    # my problem was that the cellphone put black all around it. The alrogithm
    # depends very much upon finding rectangular black blobs



    for count, i in enumerate(corners):
        #x = (int(corners[i-1][0][0][0]) + int(corners[i-1][0][1][0]) + int(corners[i-1][0][2][0]) + int(corners[i-1][0][3][0])) / 4
        #y = (int(corners[i-1][0][0][1]) + int(corners[i-1][0][1][1]) + int(corners[i-1][0][2][1]) + int(corners[i-1][0][3][1])) / 4
        #print(x, y)
        j = i.ravel()
        print(j)
        x = (j[0] + j[2] + j[4] + j[6])/4
        y = (j[1] + j[3] + j[5] + j[7])/4
        if ids[count] == 1:
            print("monkey")
            positions.monkey.x=x
            positions.monkey.y=y
        elif ids[count] == 2:
            print("banana")
            positions.banana.x=x
            positions.banana.y=y
        elif ids[count] == 3:
            print("ramp")
            positions.ramp.x=x
            positions.ramp.y=y
        print(x, y)

    if display is True:
        gray = aruco.drawDetectedMarkers(gray, corners)

        #print(rejectedImgPoints)
        # Add indicatiors and grid
        for i in range(1, gridDimensions):
            cv2.line(gray, ((width/gridDimensions)*i, 0), ((width/gridDimensions)*i, height), (255, 255, 0), 1, 1)
        for i in range(1, gridDimensions):
            cv2.line(gray, (0, (height/gridDimensions)*i), (width, (height/gridDimensions)*i), (255, 255, 0), 1, 1)
        # Display the resulting frame
        cv2.imshow('frame',gray)

    return positions
예제 #29
0
ret2, mtx2, dist2, rvecs2, tvecs2 = cv2.calibrateCamera(
    objpoints2, imgpoints2, gray2.shape[::-1], None, None)

retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
    objpoints1, imgpoints1, imgpoints2, mtx1, dist1, mtx2, dist2,
    gray1.shape[::-1], criteria)

rset = []
tset = []
dists = []
objpoints3 = []
imgpoints3 = []
objpoints4 = []
imgpoints4 = []
## set dictionary size depending on the aruco marker selected
aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)
#board = aruco.GridBoard_create(4, 5, markerLength, 0, aruco_dict)

for (fname1, fname2) in zip(images3, images4):

    frame1 = cv2.imread(fname1)
    frame2 = cv2.imread(fname2)
    # operations on the frame
    gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)

    # detector parameters can be set here (List of detection parameters[3])
    parameters = aruco.DetectorParameters_create()
    #parameters.adaptiveThreshConstant = 10

    # lists of ids and the corners belonging to each id
예제 #30
0
    def table_callback(self, rgb_img,ros_cloud):
        try:
            rgb_img = self.bridge.imgmsg_to_cv2(rgb_img,'bgr8')
            gray = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
            aruco_dict = aruco.Dictionary_get(aruco.DICT_7X7_250)
            parameters =  aruco.DetectorParameters_create()
            corners, ids, rejectedImgPoints = aruco.detectMarkers(gray,aruco_dict, parameters=parameters)
            criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
            for corner in corners:
                cv2.cornerSubPix(gray, corner, winSize = (3,3), zeroZone = (-1,-1), criteria = criteria)
            frame_markers = aruco.drawDetectedMarkers(rgb_img.copy(), corners, ids)
            markerLength = 0.8
            flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL + cv2.CALIB_FIX_ASPECT_RATIO)
            rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(corners, markerLength, cameramatrix, dist_coeffs)
            # result_matrix = np.zeros(shape=[4,4])
            # rotation_matrix = eulerAnglesToRotationMatrix(rvecs[0][0])
            # result_matrix[:3,:3] = rotation_matrix
            # result_matrix[:3,3] = tvecs[0]
            # result_matrix[3,3] = 1.
            length_of_axis = 0.6
            total_center = np.zeros(shape = [4,1,2])
            for index in range(len(ids)):
                center = np.array(np.mean(corners[index][0],axis = 0), dtype = np.int)
                total_center[ids[index]-1] = center
            imaxis = aruco.drawDetectedMarkers(rgb_img.copy(), corners, ids)
            for i in range(len(tvecs)):
                imaxis = aruco.drawAxis(imaxis, cameramatrix, dist_coeffs, rvecs[i], tvecs[i], length_of_axis)
            # Display the resulting frame
            self.ar_pub.publish(self.bridge.cv2_to_imgmsg(imaxis,'bgr8'))

            print('height: {}'.format(ros_cloud.height))
            print('width: {}'.format(ros_cloud.width))


            ## get only 1 point data from point clouds
            ## uvs must be iterable, so use 2d list. 
            pose_array = PoseArray()
            
            for point_i in range(len(ids)):
                
                pixel_x = int(total_center[point_i][0][0])
                pixel_y = int(total_center[point_i][0][1])
                for p in pc2.read_points(ros_cloud,  field_names = ("x", "y", "z"), uvs=[[pixel_x,pixel_y]]):
                    # print('{}  p.x: {}'.format(ids[point_i],p[0]))
                    # print('{}  p.y: {}'.format(ids[point_i],p[1]))
                    # print('{}  p.z: {}'.format(ids[point_i],p[2]))
                    pose_msg = Pose()

                    tx, ty,tz = transform_coor(p[0],p[1],p[2])
                    pose_msg.position.x = tx
                    pose_msg.position.y = ty
                    pose_msg.position.z = tz
                    pose_msg.orientation.x = 0
                    pose_msg.orientation.y = 0
                    pose_msg.orientation.z = 0
                    pose_msg.orientation.w = ids[point_i]
                    pose_array.poses.append(pose_msg)
            self.pose_pub.publish(pose_array)
         # When everything done, release the capture
        except TypeError as e :
            print(e)
            ## get only 1 point data from point clouds
            ## uvs must be iterable, so use 2d list. 
            pose_array = PoseArray()
            
            for point_i in range(len(ids)):
                
                pixel_x = int(total_center[point_i][0][0])
                pixel_y = int(total_center[point_i][0][1])
                for p in pc2.read_points(ros_cloud,  field_names = ("x", "y", "z"), uvs=[[pixel_x,pixel_y]]):
                    # print('{}  p.x: {}'.format(ids[point_i],p[0]))
                    # print('{}  p.y: {}'.format(ids[point_i],p[1]))
                    # print('{}  p.z: {}'.format(ids[point_i],p[2]))
                    pose_msg = Pose()

                    tx, ty,tz = transform_coor(p[0],p[1],p[2])
                    pose_msg.position.x = tx
                    pose_msg.position.y = ty
                    pose_msg.position.z = tz
                    pose_msg.orientation.x = 0
                    pose_msg.orientation.y = 0
                    pose_msg.orientation.z = 0
                    pose_msg.orientation.w = ids[point_i]
                    pose_array.poses.append(pose_msg)
            self.pose_pub.publish(pose_array)
         # When everything done, release the capture
        except TypeError as e :
            print(e)