Ejemplo n.º 1
0
    def processG(self, *args):
        handeye = args[7]
        infoText = args[8]
        bridge_ip = args[12]
        cameraName = args[13]

        if handeye.cntInputData < 3:
            if bridge_ip.ip_dict is not None:
                super().enableExitFlag()
            return

        PrintMsg.print_error(
            "---------------------------------------------------------------"
        )
        hmTransform = handeye.getHandEyeResultMatrixUsingOpenCV()
        # PrintMsg.print_error("Transform Matrix = ")
        # PrintMsg.print_error(hmTransform)
        # HandEyeCalibration.saveTransformMatrix(hmTransform)

        updateUI = CalibHandeyeUpdate()
        # TODO: [0] is available??
        update_data = updateUI.update_result(hmTransform.reshape(1, 16)[0])

        infoText.set_info_text("Succeeded to extract a handeye matrix.")

        # get the result data and throw into the websocket process
        bridge_ip.send_dict_data(
            "object",
            {
                "name": "handeyecalib:" + cameraName,
                "objectData": update_data,
            },
        )
Ejemplo n.º 2
0
    def processC(self, *args):
        color_image = args[0]
        dirFrameImage = args[1]
        infoText = args[4]

        cv2.imwrite(os.path.join(dirFrameImage,
                                 str(self.interation) + ".jpg"), color_image)
        PrintMsg.print_error(f"Image caputured - {self.interation}")
        self.interation += 1

        strInfoText = f"Image caputured - {self.interation}"
        infoText.set_info_text(strInfoText)
Ejemplo n.º 3
0
    def processG(self, *args):
        camIndex = args[0]
        ROIRegions = args[1]
        ROIRegionIds = args[2]
        infoText = args[3]

        regionCnt = len(ROIRegions)
        if regionCnt == 0:
            return

        savedFileName = "ROIRegions" + str(camIndex) + ".json"
        ROIRegionFile = cv2.FileStorage(savedFileName, cv2.FILE_STORAGE_WRITE)
        regionCnt = len(ROIRegions)
        ROIRegionFile.write("ROICnt", regionCnt)

        rur = ROIUpdateRegions()

        for idx in range(regionCnt):
            ROIRegionFile.write("RegionID", str(ROIRegionIds[idx]))
            ROIRegionFile.write("Region", np.array(ROIRegions[idx]))
            rur.add_roi_region(
                str(ROIRegionIds[idx]),
                int(round(ROIRegions[idx][1][0])),
                int(round(ROIRegions[idx][1][1])),
                int(round(ROIRegions[idx][0][0])),
                int(round(ROIRegions[idx][0][1])),
            )

        ROIRegionFile.release()
        PrintMsg.print_error("ROI Regions saved.")

        # text out to stdout
        rur.print_roi_regions()

        # update info. text display
        infoText.set_info_text("ROI Regions saved successfully.")
def thread_data_receive(sock, interproc_dict, command_queue):
    curr_thread = threading.currentThread()
    while getattr(curr_thread, "do_run", True):
        try:
            recv_message = sock.recv()
            if len(recv_message) > 0:
                recv_obj = json.loads(recv_message)
                (type, name, cmd) = (
                    recv_obj["type"],
                    recv_obj["name"],
                    recv_obj["body"],
                )
                PrintMsg.print_error(type, cmd)
                if type == BridgeDataType.CMD:
                    command_queue.put((name, cmd))
        except Exception as ex:
            PrintMsg.print_error(ex)

    PrintMsg.print_error("thread_data_receive ended")
def process_bridge_data(interproc_dict, ve, cq):
    ws = create_connection(
        f"ws://{AppConfig.ServerIP}:34000",
        sockopt=((socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),),
    )

    ws_recv_thread = threading.Thread(
        target=thread_data_receive,
        args=(
            ws,
            interproc_dict,
            cq,
        ),
    )
    ws_recv_thread.start()

    while True:
        # wait for an video frame event
        ve.wait()

        try:
            # TODO: should modularize these data conversion..
            if interproc_dict["object"] != {}:
                name = interproc_dict["object"]["name"]
                object_data = interproc_dict["object"]["objectData"]
                bridge_data = ObjectBridgeData(name, object_data)
                ws.send(bridge_data.dumps())

            if interproc_dict["error"] != {}:
                name = interproc_dict["error"]["name"]
                message = interproc_dict["error"]["message"]
                bridge_data = ErrorBridgeData(name, message)
                ws.send(bridge_data.dumps())

            if interproc_dict["video"] != {}:
                # get width & height of the current frame
                name = interproc_dict["video"]["name"]
                width = interproc_dict["video"]["width"]
                height = interproc_dict["video"]["height"]
                frame = interproc_dict["video"]["frame"]

                if isinstance(frame, np.ndarray):
                    jpg_image = cv2.imencode(".jpg", frame)[1]
                    base64_encoded = base64.b64encode(jpg_image)

                    bridge_data = VideoBridgeData(
                        name,
                        base64_encoded.decode("utf8"),
                        width,
                        height,
                    )
                ws.send(bridge_data.dumps())

            # exit if 'app_exit' flag is set
            if interproc_dict["app_exit"] == True:
                break

        except Exception as e:
            PrintMsg.print_error(e)
            break

        interproc_dict["object"] = {}
        interproc_dict["video"] = {}
        interproc_dict["error"] = {}

    # close socket and terminate the recv thread
    ws_recv_thread.do_run = False
    ws.close()
    Processes = [
        mp.Process(
            target=process_application_selection,
            args=(
                mp_global_dict,
                video_sync,
                cmd_queue,
            ),
        ),
        mp.Process(
            target=process_bridge_data,
            args=(
                mp_global_dict,
                video_sync,
                cmd_queue,
            ),
        ),
    ]

    [process.start() for process in Processes]
    [process.join() for process in Processes]


if __name__ == "__main__":

    if len(sys.argv) != 3:
        PrintMsg.print_error("Invalid paramters..")
        sys.exit()

    run_application_main(sys.argv[1], sys.argv[2])
Ejemplo n.º 7
0
    def processC(self, *args):
        findAruco = args[0]
        colorImage = args[1]
        tvec = args[3]
        rvec = args[4]
        ids = args[2]
        handeye = args[7]
        infoText = args[8]
        gqlDataClient = args[9]
        robotName = args[10]

        PrintMsg.print_error(
            "---------------------------------------------------------------"
        )
        if ids is None:
            return

        if AppConfig.UseArucoBoard == False:
            for idx in range(0, ids.size):
                if ids[idx] == AppConfig.CalibMarkerID:
                    # get the current robot position
                    # currTaskPose = indy.get_task_pos()
                    currTaskPose = gqlDataClient.get_robot_pose(robotName)

                    # convert dict. to list
                    currTPList = [
                        currTaskPose["x"],
                        currTaskPose["y"],
                        currTaskPose["z"],
                        currTaskPose["u"],
                        currTaskPose["v"],
                        currTaskPose["w"],
                    ]

                    # capture additional matrices here
                    handeye.captureHandEyeInputs(currTPList, rvec[idx], tvec[idx])

                    if handeye.cntInputData >= 3:
                        handeye.calculateHandEyeMatrix()

                    PrintMsg.print_error(
                        "Input Data Count: " + str(handeye.cntInputData)
                    )
                    strText = (
                        "Input Data Count: "
                        + str(handeye.cntInputData)
                        + "("
                        + str(handeye.distance)
                        + ")"
                    )
                    infoText.set_info_text(strText)
        else:
            if findAruco is True:
                # get the current robot position
                # currTaskPose = indy.get_task_pos()
                currTaskPose = gqlDataClient.get_robot_pose(robotName)

                # convert dict. to list
                currTPList = [
                    currTaskPose["x"],
                    currTaskPose["y"],
                    currTaskPose["z"],
                    currTaskPose["u"],
                    currTaskPose["v"],
                    currTaskPose["w"],
                ]

                # capture additional matrices here
                handeye.captureHandEyeInputs(currTPList, rvec.T, tvec.T)

                if handeye.cntInputData >= 3:
                    handeye.calculateHandEyeMatrix()

                PrintMsg.print_error(f"({str(handeye.cntInputData)}")
                strText = f"{handeye.cntInputData} ({handeye.distance})"
                infoText.set_info_text(strText)
def calibcamera_engine(app_args, interproc_dict=None, ve=None, cq=None):

    camcalib_info("camera calibration started..")

    cameraName = app_args
    if cameraName is "":
        PrintMsg.print_error("Input camera name is not available.")
        sys.exit()

    bridge_ip = BridgeInterprocess(interproc_dict, ve, cq)

    try:
        camcalib_info("grpahql client parsing started..")
        gqlDataClient = VisonGqlDataClient()
        if (gqlDataClient.connect(
                f"http://{AppConfig.ServerIP}:3000",
                "system",
                "*****@*****.**",
                "admin",
        ) is False):
            camcalib_err("grpahql client connection error..")
            sys.exit()

        # get camera data from operato
        gqlDataClient.fetch_tracking_camera_all()
        cameraObject = gqlDataClient.trackingCameras[cameraName]

        camcalib_info("video capture started..")
        AppConfig.VideoFrameWidth = cameraObject.width or AppConfig.VideoFrameWidth
        AppConfig.VideoFrameHeight = cameraObject.height or AppConfig.VideoFrameHeight
        (AppConfig.VideoFrameWidth,
         AppConfig.VideoFrameHeight) = ((1920, 1080) if cameraObject.type
                                        == "realsense-camera" else
                                        (AppConfig.VideoFrameWidth,
                                         AppConfig.VideoFrameHeight))

        vcap = VideoCaptureFactory.create_video_capture(
            cameraObject.type,
            cameraObject.endpoint,
            AppConfig.VideoFrameWidth,
            AppConfig.VideoFrameHeight,
            AppConfig.VideoFramePerSec,
            cameraName,
        )

        # Start streaming
        vcap.start()

        camcalib_info("camera calibration configuration started..")
        # create a camera calqibration object
        if AppConfig.UseCalibChessBoard == True:
            calibcam = CalibrationCamera(AppConfig.ChessWidth,
                                         AppConfig.ChessHeight)
        else:
            calibcam = CalibrationCameraAruco(AppConfig.VideoFrameWidth,
                                              AppConfig.VideoFrameHeight)

        # TODO: check where an image directory is created..
        dirFrameImage = makeFrameImageDirectory()

        # create key handler for camera calibration1
        keyhandler = CalibCameraKeyHandler()

        # create info text
        infoText = DisplayInfoText(
            cv2.FONT_HERSHEY_PLAIN,
            (10, 60),
            AppConfig.VideoFrameWidth,
            AppConfig.VideoFrameHeight,
        )
    except Exception as ex:
        print("Preparation Exception:", ex, file=sys.stderr)
        camcalib_err(f"Preparation Exception: {ex}")
        sys.exit(0)

    # setup an opencv window
    if bridge_ip.isActive() is False:
        cv2.namedWindow(cameraName, cv2.WINDOW_NORMAL)
        cv2.setWindowProperty(cameraName, cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)

    camcalib_info("video frame processing starts..")
    try:
        while True:
            # Wait for a coherent pair of frames: depth and color
            color_image = vcap.get_video_frame()

            if (ObjectTrackerErrMsg.check_value_none(
                    color_image, "video color frame") == False):
                bridge_ip.send_dict_data(
                    "error",
                    {
                        "name": "cameracalib:" + cameraName,
                        "message": "camera initialization failed",
                    },
                )
                raise Exception("camera initialization failed")
                break

            # change the format to BGR format for opencv
            if cameraObject.type == "realsense-camera":
                color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)

            color_view_image = color_image.copy()

            # create info text
            infoText.draw(color_view_image)

            # display the captured image
            if bridge_ip.isActive() is True:
                color_image_resized = cv2.resize(color_view_image,
                                                 dsize=(640, 480),
                                                 interpolation=cv2.INTER_AREA)
                bridge_ip.send_dict_data(
                    "video",
                    {
                        "name": "cameracalib" + ":" + cameraName,
                        "width": 640,
                        "height": 480,
                        "frame": color_image_resized,
                    },
                )
            else:
                cv2.imshow(cameraName, color_view_image)

            # TODO: arrange these opencv key events based on other key event handler class
            # handle key inputs
            # pressedKey = (cv2.waitKey(1) & 0xFF)
            try:
                if bridge_ip.isActive() is True:
                    (name, cmd) = bridge_ip.get_cmd_queue_no_wait()
                    if name != "cameracalib:" + cameraName:
                        continue

                    if cmd == "snapshot":
                        pressedKey = 0x63  # 'c' key
                        camcalib_info("call snapshot handler")
                    elif cmd == "result":
                        pressedKey = 0x67  # 'g' key
                        camcalib_info("call get-reseult handler")
                    elif cmd == "exit":
                        camcalib_info("call exit handler")
                        pressedKey = 0x71  # 'q' key
                else:
                    pressedKey = cv2.waitKey(1) & 0xFF
            except queue.Empty:
                continue

            if keyhandler.processKeyHandler(
                    pressedKey,
                    color_image,
                    dirFrameImage,
                    calibcam,
                    cameraObject.endpoint,
                    infoText,
                    cameraName,
                    bridge_ip,
            ):
                break

    except Exception as ex:
        print("Main Loop Error :", ex, file=sys.stderr)
        camcalib_err(f"Main Loop Error : {ex}")
        bridge_ip.send_dict_data(
            "error",
            {
                "name": "cameracalib:" + cameraName,
                "message": f"Error: {ex}",
            },
        )

    finally:
        # Stop streaming
        vcap.stop()

        bridge_ip.send_dict_data("app_exit", True)
        camcalib_info("camera calibration ends..")