def on_removeSubview(self, view): myDebug(self.__class__.__name__, get_current_function_name()) if view.isFocused: self.setFocusedView(None) self.mLastViewList = self.mViewList.copy() view_ind = self.mViewList.index(view) input_flow = self.mViewList[view_ind].getInputFlow() camera_controller = MainController.getController().mCameraController robot_controller = MainController.getController().mRobotController if type(input_flow) is CameraInterface: camera_controller.releaseCamera(input_flow.getID()) elif type(input_flow) is RobotEpuck: robot_controller.releaseRobot(input_flow) self.mViewList.pop(self.mViewList.index(view)) self.reLayoutView()
def slot_add_EPuck(self): dialog = QInputDialog(self) dialog.setModal(True) dialog.setStyleSheet(""" background-color: rgba(0, 0, 0, 200); border:1px solid rgba(0, 200, 200, 150); """) dialog.setFixedSize(350, 250) dialog.setWindowTitle('Set Input Flow for Improcessor') dialog.setInputMode(QInputDialog.TextInput) dialog.setLabelText('请输入……(机器人的ip)') dialog.setTextValue('192.168.3.5') dialog.setOkButtonText('Ok') dialog.setCancelButtonText('Cancel') if dialog.exec_() == QDialog.Accepted: ip = dialog.textValue() robot_controller = MainController.getController().mRobotController robot_controller.addRobot(RobotEpuck) robot = robot_controller.getRobot(-1) robot.connect(ip) view = RobotFrame(self) view.setRobot(robot) self.addSubview(view) else: print("dialog canceled") dialog.show()
def mkQMenu(self): myDebug(self.__class__.__name__, get_current_function_name()) menu=QMenu(self) menu.setStyleSheet(""" color: rgb(0, 243, 255); background-color: rgba(255, 255, 255, 0); border:2px solid rgb(0, 108, 255); selection-background-color: rgba(183, 212, 255, 150); """ ) camera_list = QMenu(menu) camera_list.setTitle('新建相机视图') controller = MainController.getController() camera_names, camera_types = controller.mCameraController.getAvailableCameraNames() for ind, name in enumerate(camera_names): cam_action = QAction(name, camera_list) # cam_action.triggered.connect(self.mCameregister[ind]) cam_action.triggered.connect(partial(self.on_add_camera_view, ind, camera_types[ind])) camera_list.addAction(cam_action) menu.addMenu(camera_list) add_image_proc_action = QAction('添加图像处理视图', menu) add_image_proc_action.triggered.connect(self.slot_add_image_proc_view) menu.addAction(add_image_proc_action) add_robot_action = QAction('添加机器人', menu) add_robot_action.triggered.connect(self.slot_add_robot) menu.addAction(add_robot_action) clear_views_action = QAction('清空视图', menu) clear_views_action.triggered.connect(self.slot_clearSubview) menu.addAction(clear_views_action) return menu
def slot_clearSubview(self): myDebug(self.__class__.__name__, get_current_function_name()) self.mLastViewList = self.mViewList.copy() self.mViewList = [] self.reLayoutView() camera_controller = MainController.getController().mCameraController for i in self.mLastViewList: i.destroy() camera_controller.releaseAllCamera() self.setFocusedView(None)
def main(): app = QApplication(sys.argv) XSetting.XSetting.loadSettingFile() if XSetting.XSetting.isDebug.isPrintDebug: Test.UnitTest() print('Load mainController') mainController = MainController.MainController() MainController.__controller = mainController mainController.start() sys.exit(app.exec_())
def __init__(self, *arg): myDebug(self.__class__.__name__, get_current_function_name()) super().__init__(*arg) self.controller = MainController.getController() self.robot_controller = RobotController.RobotController() self.robot_controller.start() self.mDataHandle = DataHandle() self.controller.addDataHandle(self.mDataHandle) self.mLayout.addWidget(self.mDataHandle.image_label) self.mDataHandle.image_label.show() self.mRobotPolicy = None
def on_add_camera_view(self, cam_id: int, cam_type: XCameraType): myDebug(self.__class__.__name__, get_current_function_name()) camera_controller = MainController.getController().mCameraController camera = camera_controller.getCamera(cam_id) if camera and camera.isOpen(): print('该相机已经打开') else: camera_controller.startCamera(cam_id, cam_type) view = CameraViewFrame(self) view.setCamera(camera_controller.mCameraList[len(camera_controller.mCameraList) - 1]) self.addSubview(view) print('打开第%d个相机'%cam_id)
def __init__(self, *args): myDebug(self.__class__.__name__, get_current_function_name()) super(ImageProcViewBase, self).__init__(*args) self.mActiveBorderStyle = 'border:1px solid rgba(200, 50, 50, 255);' self.mUnactiveBorderStyle = 'border:1px solid rgba(200, 200, 200, 150);' self.deactive() self.mImageSaveDir = "./general_image_save" self.mImageHandle = ImageHandle() self.mImageHandle.setImageSaveDir(self.mImageSaveDir) self.controller = MainController.getController() self.controller.addImageHandle(self.mImageHandle) self.mLayout.addWidget(self.mImageHandle.image_label) self.mImageHandle.image_label.show()
def __init__(self, *args): myDebug(self.__class__.__name__, get_current_function_name()) super(ImageProcViewBase, self).__init__(*args) self.mLayout = QGridLayout(self) self.mLayout.setContentsMargins(0, 0, 0, 0) self.setLayout(self.mLayout) self.mActiveBorderStyle = 'border:1px solid rgba(200, 50, 50, 255);' self.mUnactiveBorderStyle = 'border:1px solid rgba(200, 200, 200, 150);' self.deactive() self.mImageHandle = ImageHandle() self.controller = MainController.getController() self.controller.addImageHandle(self.mImageHandle) self.mLayout.addWidget(self.mImageHandle.image_label) self.mImageHandle.image_label.show()
def process(self, image: np.ndarray) -> np.ndarray: ret = super().process(image) # print(self.channels(image)) if self.channels(image) == 1: pass elif self.channels(image) == 3: pass elif self.channels(image) == 4: image_tmp = image[:, :, 0:3] ''' 再此处添加算法,例如: image_tmp = 255 - image_tmp ''' image_tmp = image_tmp.astype(np.uint8) frame = None if not self.mapInited: ret = initMap(image_tmp, ratio=(self.mapheight / self.mapwidth)) if ret: self.mapBfTrans = ret[0] self.mapAfTrans = ret[1] self.transM = ret[2] self.mapInited = 1 else: if self.arucodetect: # 单个二维码测试耗时< 10 ms frame, self.carIds, self.posiCar, self.poseCar = qrDextbyArUco( image_tmp, self.transM, self.mapwidth, self.mapheight) if frame is not None: image[:, :, 0:3] = frame[:, :, :] else: image[:, :, 0:3] = image_tmp[:, :, :] ret = image controller = MainController.getController() if self.carIds: for i in range(len(self.carIds)): controller.mCameraData.setData( 'GlobalLocation', self.carIds[i][0], np.concatenate((self.posiCar[i], self.poseCar[i]))) # print(controller.mCameraData.data) else: raise Exception('图像类型不支持') return ret
def process(self, image: np.ndarray) -> np.ndarray: ret = super().process(image) # print(self.channels(image)) if self.channels(image) == 1: pass elif self.channels(image) == 3: pass elif self.channels(image) == 4: image_tmp = image[:, :, 0:3] ''' 再此处添加算法,例如: image_tmp = 255 - image_tmp ''' controller = MainController.getController() if controller.mCameraController.mModelYolo is not None: image_tmp = detect(image_tmp) else: print('Yolo model is not loaded') image[:, :, 0:3] = image_tmp[:, :, :] ret = image else: raise Exception('图像类型不支持') return ret
def queryGlobalLocation(self): controller = MainController.getController() data_tmp = controller.mCameraData.getData('GlobalLocation', self.mId) if data_tmp is not None: data = np.array(controller.mCameraData.getData('GlobalLocation', self.mId), dtype=np.float) self.setSelfPos(data)
def slot_return(self): myDebug(self.__class__.__name__, get_current_function_name()) controller = MainController.getController() camera_controller = controller.mCameraController camera_controller.releaseAllCamera() self.mFrameViewArea.slot_clearSubview()
def detect(image: np.array): img0 = image controller = MainController.getController() # Initialize device = select_device() half = device.type != 'cpu' # half precision only supported on CUDA # Load model model = controller.mCameraController.mModelYolo # load FP32 model stride = int(model.stride.max()) # model stride imgsz = check_img_size(IMG_SIZE, s=stride) # check img_size if half: model.half() # to FP16 img = letterbox(img0, imgsz, stride=stride)[0] img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = np.ascontiguousarray(img) # Get names and colors names = model.module.names if hasattr(model, 'module') else model.names colors = controller.mCameraController.mModelYoloColors # Run inference if device.type != 'cpu': model( torch.zeros(1, 3, imgsz, imgsz).to(device).type_as( next(model.parameters()))) # run once ## main img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() img /= 255.0 if img.ndimension() == 3: img = img.unsqueeze(0) # Inference pred = model(img, augment=False)[0] # Apply NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes=None, agnostic=False) # Process detections for i, det in enumerate(pred): # detections per image im0 = img0 gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Write results for *xyxy, conf, cls in reversed(det): label = f'{names[int(cls)]} {conf:.2f}' im0 = plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) return im0