def timerEvent(self, event): if (event.timerId() != self.timer.timerId()): return read, data = self.camera.read() self.count += 1 detection.addTextlines(data, [f"{self.count}"]) if read: self.image_data.emit(data)
def timerEvent(self, event): if (event.timerId() != self.timer.timerId()): return try: read, data = self.source.read() except Exception as e: logger.error(e) if not read: logger.error("Read error") self.source.release() self.state = False self.toggle(True) return self.count += 1 detection.addTextlines(data, [f"{self.count}"]) if read: self.image_data.emit(data)
def camParLoad(self): """Load Ueye Camera Parameters from EEPROM""" logger.debug("Loading parameters from EEPROM") logger.debug("Disabling framegetter:") self.stop_recording() nullint = ueye._value_cast(0, ueye.ctypes.c_uint) if self.state: rv = ueye.is_ExitCamera(self.hCam) nret, hCam = ueye_camera.cameraInit(True) # rvv = ueye.is_ParameterSet(self.hCam,ueye.IS_PARAMETERSET_CMD_LOAD_EEPROM,nullint,nullint) # rectAOI = rect_aoi = ueye.IS_RECT() # # Can be used to set the size and position of an "area of interest"(AOI) within an image # nRet = ueye.is_AOI(self.hCam, ueye.IS_AOI_IMAGE_GET_AOI, rectAOI, ueye.sizeof(rectAOI)) self.width = ueye_camera.rect_aoi.s32Width.value self.height = ueye_camera.rect_aoi.s32Height.value if self.width > 0: self.frame = np.zeros((self.height, self.width), np.uint8) self.start_recording() else: detection.addTextlines(self.frame, ["Camera not connected!", "Cannot load parameters"]) self.image_data.emit(self.frame)
def toggle(self, bool): if self.state != bool: if bool: # We want to be switched ON but are OFF: if not self.initialized: # self.singleTimer.singleShot(1000,) self.initialize() if self.nRet < 0: detection.addTextlines(self.frame, ["Camera not connected!"]) self.image_data.emit(self.frame) self.state = bool else: self.state = 1 self.width = ueye_camera.rect_aoi.s32Width.value self.height = ueye_camera.rect_aoi.s32Height.value self.frame = np.zeros((self.height, self.width), np.uint8) self.initialized = True self.start_recording() else: self.stop_recording() self.state = 0 # else: # self.stop_recording() self.state = bool
def processFrame_slot(self, *args, **kwargs): """Processes np_data to a frame to show""" mtx = self.imShow.mtxrs dist = detection.dist mtxdetect, newmtxdetect, mtxwindow, newmtxwindow = self.imShow.getNewMtx( ) if self.rbDestNone.isChecked(): processedFrame = self.blankimg self.imShow.showProcessedData_slot(processedFrame) return elif self.rbOverlayNone.isChecked(): processedFrame = self.imShow.np_data.copy() self.imShow.showProcessedData_slot(processedFrame) self.timer.stop() # reset timer to updated slider position self.timer.start(int(self.frameRate**-1 * 1000)) return elif self.rbDestRaw.isChecked(): processedFrame = self.imShow.np_data.copy() processedFrame = cv2.resize(self.imShow.np_data, self.imShow.showFrame.shape[1::-1]) self.showRvec, self.showTvec, self.showMtx, self.showDist = self.detector.rvec, self.detector.tvec, mtxwindow, detection.dist elif self.rbDestUndistorted.isChecked(): udFrame = np.zeros_like(self.imShow.np_data) # Do mtxdetect, newmtxdetect = detection.getCameraMatrices( self.imShow.np_data) mtxwindow, newmtxwindow = detection.getCameraMatrices( self.imShow.showFrame) # @param dst Output (corrected) image that has the same size and type as src . cv2.undistort(self.imShow.np_data, mtxdetect, detection.dist, udFrame, newmtxdetect) processedFrame = cv2.resize(udFrame, self.imShow.showFrame.shape[1::-1]) # cv2.aruco.getBoardObjectAndImagePoints() logger.debug("lalala") mtx = self.imShow.newmtx dist = None self.showRvec, self.showTvec, self.showMtx, self.showDist = self.detector.rvec, self.detector.tvec, newmtxwindow, None elif self.rbDestRectified.isChecked(): # rectify the image: imShowmtx, imShowNewMtx = detection.getCameraMatrices( self.imShow.np_data) udFrame = np.zeros_like(self.imShow.np_data) cv2.undistort(self.imShow.np_data, imShowmtx, detection.dist, udFrame, imShowNewMtx) frame_in = cv2.resize(udFrame, self.imShow.showFrame.shape[1::-1]) extra_mm_xy = np.array([90, 90]) # the width of the marker stick processedFrame = frame_in.copy() dom = np.array(self.detector.domain, np.float32) borderpix = bp = np.array([100, 100]) overlays.rectifyCameraPose(frame_in, processedFrame, self.detector.rvec, self.detector.tvec, newmtxdetect, dom, border_pix=borderpix) # Compose the right matrices for showing a rectified frame with a border of bd [mm]: gamma = -500 # Gamma to adjust the depth of field of the projection. rv = np.array([0, 0, 0], np.float32) pp = np.array([[*bp * -1, 0], [*(dom + bp), 0]], np.float32) w, h = self.imShow.width, self.imShow.height w, h = dim = processedFrame.shape[1::-1] mtx = np.array( np.diag([*(dom + 2 * bp)**-1, 1]) * np.diag([w, h, 1]), np.float32) ax, ay = dom bx, by = dim - borderpix * 2 mtx[0, 0] = bx / ax * gamma mtx[1, 1] = by / ay * gamma mtx[2, 2] = 1 mtx[0, 2] = cx = w / 2 mtx[1, 2] = cy = h / 2 tv = np.array( [ax / bx * (bp[0] - cx), ay / by * (bp[1] - cy), gamma], np.float32) tv2 = np.array([ax / bx * (-cx), ay / by * (-cy), gamma], np.float32) self.showRvec, self.showTvec, self.showMtx, self.showDist = rv, tv, mtx, None p, _ = cv2.projectPoints( np.array([0, 0, 0], np.float32).reshape((1, 1, 3)), self.showRvec, self.showTvec, self.showMtx, self.showDist) logger.debug("lala") else: processedFrame = self.imShow.np_data.copy() # Convert to color: if len(processedFrame.shape) == 2: proc_image = cv2.cvtColor(processedFrame, cv2.COLOR_GRAY2BGR) elif len(processedFrame.shape) == 3: proc_image = processedFrame if self.chkCalMarkers.isChecked() or self.chkXvision.isChecked(): # Check if there has been a detection step: if not self.detector.count: detection.addTextlines(self.imShow.np_data, [f"No detection step done yet"]) else: corners, ids, rej = self.detector.rawCorners, self.detector.rawIds, self.detector.rawRej if self.chkCalMarkers.isChecked(): calCorners, calIds, _ = detection.filterMarkers( self.detector.rawCorners, self.detector.rawIds, detection.calibration_marker_ids) pCorners = list( map( lambda x: cv2.projectPoints( x, self.showRvec, self.showTvec, self.showMtx, self.showDist)[0].reshape( (1, 4, 2)).astype(np.float32), self.detector.corners3D)) calCorners, calIds, _ = detection.filterMarkers( pCorners, self.detector.rawIds, detection.calibration_marker_ids) # cv2.aruco.drawDetectedMarkers(proc_image,calCorners,calIds) cv2.aruco.drawDetectedMarkers(proc_image, calCorners, calIds) if self.chkGrid.isChecked(): overlays.drawDomain(proc_image, self.showRvec, self.showTvec, self.showMtx, self.showDist, domain=self.detector.domain) logger.debug("Drawn calDomain") if self.chkCalstick.isChecked(): detection.drawCalstick(proc_image, self.showRvec, self.showTvec, self.showMtx, self.showDist) logger.debug("Drawn calstick") # if self.chkGamePad.isChecked(): # controllers.showGamePadStates(proc_image, self.GamePadThread.gamePad.states) if self.rbControlMouse.isChecked(): PTS = np.array(mousepos3d, np.float64).reshape((1, 1, 3)) impoints, jac = cv2.projectPoints(PTS, self.showRvec, self.showTvec, self.showMtx, self.showDist) impoints = impoints.ravel() ptuple = tuple((int(impoints[0]), int(impoints[1]))) cv2.drawMarker(proc_image, ptuple, (255, 122, 122), cv2.MARKER_TILTED_CROSS, 20, 2, cv2.LINE_AA) # if self.rbOverlayRotational.isChecked(): # pos = QtGui.QCursor.pos() # poswin = self.imShow.parent().parent().parent().pos() # posimg = self.imShow.parent().pos() # posrel = pos - poswin - posimg # posxy = (posrel.x(), posrel.y() - 30) # try: # pix = np.array(posxy, np.float32).reshape((1, 1, 2)) # pos3d = detection.correspondence2d3d(pix, self.showMtx, # self.showDist, # self.showRvec, # self.showTvec, detection.calibration_markers_mm) # # cv2.drawMarker(proc_image,posxy,(240,240,0),cv2.MARKER_TILTED_CROSS,markerSize=10) # angle = self.MouseAngle # model.Agent(states=[*pos3d.ravel()[:2], angle, 0, 0]).show_position_pnp(proc_image, self.showRvec, # self.showTvec, self.showMtx, # self.showDist) # pWithAngle = [*pos3d.ravel()[:2], angle] # e_ti, vabs = control_mapping.controlfield(pWithAngle, overlays.rotationalfield) # # # self.lbCursorPos.setText( # # f"{posxy} | {pos3d} | omega_i {omega_i:6.3f} e_ti {e_ti:6.3f} | zx {zx:6.3f} |zy {zy:6.3f}") # # # # except Exception as e: # logger.error(e) if self.rbOverlayRotational.isChecked(): overlays.drawRotation(proc_image, self.showRvec, self.showTvec, self.showMtx, self.showDist, self.detector.domain, invert=self.chkVizInvert.isChecked()) if self.rvOverlayCoverage.isChecked(): overlays.drawCoverage(proc_image, self.showRvec, self.showTvec, self.showMtx, self.showDist, self.detector.domain, viewnr=self.vizViewNumber, invert=self.chkVizInvert.isChecked()) pstrl = [] for i in range(len(gctronic.elisa_ids)): max_idx = cc.goals_max_index[i] max_path = cc.goal_paths[i, max_idx, :] detection.drawPath(max_path, proc_image, self.showRvec, self.showTvec, self.showMtx, self.showDist, color=(255, 0, 0)) max_path = cc.goal_paths[i, max_idx, cc.goals_index[i]:] detection.drawPath(max_path, proc_image, self.showRvec, self.showTvec, self.showMtx, self.showDist, color=(0, 255, 0)) traj = dwa.getPath(cc.states_all[i, :], dwa.config.predict_time, 4) trajpath = np.array(traj).T detection.drawPath(trajpath, proc_image, self.showRvec, self.showTvec, self.showMtx, self.showDist, color=(0, 0, 255), thickness=2) if self.chkXvision.isChecked(): for agent in self.agents: agent.show_position_pnp(proc_image, self.showRvec, self.showTvec, self.showMtx, self.showDist) if self.chkXhat.isChecked(): for agent in self.agents: agent.show_position_pnp(proc_image, self.showRvec, self.showTvec, self.showMtx, self.showDist, showOdometry=True) if self.chkXvision.isChecked(): objpts = np.array(self.detector.corners3D).reshape( len(self.detector.corners) * 4, 3) projectedcorners = list( map( lambda x: cv2.projectPoints( x, self.showRvec, self.showTvec, self.showMtx, self. showDist)[0].reshape((1, 4, 2)).astype(np.float32), self.detector.corners3D)) cv2.aruco.drawDetectedMarkers(proc_image, projectedcorners, self.detector.ids) logger.debug("pass") if self.chkReprojectionTest.isChecked(): try: logger.debug( "Testing if the corners are correcty used for pnp") logger.debug("Sort the values from config") sortedTpl2 = sorted(list( zip(detection.calibration_marker_ids, detection.calibration_marker_corners)), key=lambda x: x[0]) [logger.debug(_) for _ in sortedTpl2] pnpIds2, pnpMarkerCorners = zip(*sortedTpl2) objpts = np.array(pnpMarkerCorners[:]).reshape(16, 3) impoints, jac = cv2.projectPoints(objpts, self.showRvec, self.showTvec, self.showMtx, self.showDist) detection.drawGridPoints(proc_image, impoints.reshape(4, 4, 2), drawCount=True, color=(69, 111, 243), drawCoords=False) calCorners3d, calIds, _ = detection.filterMarkers( self.detector.corners3D, self.detector.rawIds, detection.calibration_marker_ids) calCorners, calIds = self.detector.corners3D, self.detector.rawIds if len(calCorners): sortedTpl = sorted(list(zip(calIds, calCorners)), key=lambda x: x[0].ravel()) pnpIds, pnpCorners = zip(*sortedTpl) objpts = np.array(pnpCorners[:]).reshape( len(calCorners) * 4, 3) impoints, jac = cv2.projectPoints(objpts - 50, self.showRvec, self.showTvec, self.showMtx, self.showDist) detection.drawGridPoints(proc_image, impoints.reshape( 4, len(calCorners), 2), drawCount=True, color=(69, 255, 243), drawCoords=False) except Exception as e: logger.error("") self.imShow.showProcessedData_slot(proc_image) self.timer.stop() # reset timer to updated slider position self.timer.start(int(self.frameRate**-1 * 1000))
# videoname = os.path.abspath(os.path.join(planebots.packdir, 'stills', "500-300-1000-Highres.mp4")) logger.info(f"Opening {videoname}") stream = cv2.VideoCapture(videoname) ret, frame = stream.read() corners, ids, rej = cv2.aruco.detectMarkers(frame, planebots.MarkerDictionary, None, None, planebots.DetectionParameters, None) mtxWindow, newMtxWindow = detection.getCameraMatrices(frame) cv2.aruco.drawDetectedMarkers(frame, corners, ids, borderColor=(255, 122, 0)) detection.addTextlines( frame, [datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")]) cv2.imwrite("testPnP.png", frame) f2 = cv2.undistort(frame, mtxWindow, detection.dist, None, mtxWindow) detection.addTextlines( f2, [datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")]) cv2.imwrite("testPnP2.png", f2) f2 = cv2.undistort(frame, mtxWindow, detection.dist, None, newMtxWindow) detection.addTextlines( f2, [datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")]) cv2.imwrite("testPnP3.png", f2) pnpSucces, rvec, tvec = detection.findPnP(mtxWindow, detection.dist, corners,