def exportMxsObjInformation(scene, pzFile): info, ok = scene.getSceneInfo() # root element root = Element("scene") root.attrib["path"] = scene.getMxsPath() root.attrib["nObjects"] = repr(info.nObjects) root.attrib["nMeshs"] = repr(info.nMeshes) root.attrib["nTriangles"] = repr(info.nTriangles) objElemList = [] objNameList = [] # iterate all objects it = CmaxwellObjectIterator() curObj = it.first(scene) while not curObj.isNull(): objName, ok = curObj.getName() parent, ok = curObj.getParent() objNameList.append(objName) parentElem = root if not parent.isNull(): parentName, ok = parent.getName() parentIdx = objNameList.index(parentName) parentElem = objElemList[parentIdx] objElem = SubElement(parentElem, "object", name=repr(objName)) buildObjectElement(objElem, curObj) objElemList.append(objElem) curObj = it.next() # write the document to a file # ElementTree(root).write(pzFile) Utils.saveXML(root, pzFile) print "finish export mxs information"
def testcamera_nir(): camera_rgb = cv2.VideoCapture(0) camera_ir = cv2.VideoCapture(1) frameCount = 0 prevSaveDetectedFaceFrameCount = 0 #上一次保存检测到人脸是第几帧 while True: start = time.time() frameCount += 1 grab_rgb, frame_rgb = camera_rgb.read() # 第一个返回值grab代表是否读取到图片 grab_ir, frame_ir = camera_ir.read() image_rgb = np.array(frame_rgb) boxes_c_rgb, landmarks_rgb = mtcnn_detector.detect(image_rgb) end = time.time() seconds = end - start fps = int(1 / seconds) if 0 == boxes_c_rgb.size: cv2.putText(frame_rgb, 'FPS:' + str(fps), (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1) cv2.imshow("detection result rgb", frame_rgb) cv2.imshow("detection result ir", frame_ir) cv2.waitKey(1) continue total_boxes_rgb = [] for i in range(boxes_c_rgb.shape[0]): bbox = boxes_c_rgb[i, :4] score = boxes_c_rgb[i, 4] corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])] total_boxes_rgb.append(corpbbox) if (frameCount - prevSaveDetectedFaceFrameCount) >= int( fps * 2): # 每2秒保存一帧 rgb_img_filename = os.path.join(images_rgbs_orgs_save_path, 'rgb_org_%04d.png' % frameCount) ir_img_filename = os.path.join(images_nirs_orgs_save_path, 'nir_org_%04d.png' % frameCount) cv2.imwrite(rgb_img_filename, frame_rgb) cv2.imwrite(ir_img_filename, frame_ir) draw_rgb = frame_rgb.copy() draw_ir = frame_ir.copy() rgb_img_anns_filename = os.path.join( images_rgbs_anns_save_path, 'rgb_org_%04d.xml' % frameCount) ir_img_anns_filename = os.path.join( images_nirs_anns_save_path, 'nir_org_%04d.xml' % frameCount) # print('rgb_img_anns_filename===', rgb_img_anns_filename) utils.saveXML(rgb_img_anns_filename, total_boxes_rgb, 0, frame_rgb.shape[1], frame_rgb.shape[0]) utils.saveXML(ir_img_anns_filename, total_boxes_rgb, 0, frame_ir.shape[1], frame_ir.shape[0], True) for b in total_boxes_rgb: p1 = (int(b[0]), int(b[1])) p2 = (int(b[2]), int(b[3])) cv2.rectangle(draw_rgb, p1, p2, (0, 255, 0)) # p1_ir = (int(b[0])+20, int(b[1])-10) # p2_ir = (int(b[2])+20, int(b[3])-10) p1_ir = p1 p2_ir = p2 cv2.rectangle(draw_ir, p1_ir, p2_ir, (0, 255, 0)) rgb_img_rect_filename = os.path.join( images_rgbs_rects_save_path, 'rgb_rect_%04d.png' % frameCount) ir_img_rect_filename = os.path.join( images_nirs_rects_save_path, 'nir_rect_%04d.png' % frameCount) cv2.imwrite(rgb_img_rect_filename, draw_rgb) cv2.imwrite(ir_img_rect_filename, draw_ir) prevSaveDetectedFaceFrameCount = frameCount cv2.putText(draw_rgb, 'FPS:' + str(fps), (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1) cv2.imshow("detection result rgb", draw_rgb) cv2.imshow("detection result ir", draw_ir) key = cv2.waitKey(1) if 'q' == chr(key & 255) or 'Q' == chr(key & 255) or 27 == key: # 27:Esc break # 释放VideoCapture对象 camera_rgb.release() camera_ir.release() cv2.destroyAllWindows()
def exportMxCamerasInfo2XML(pzScene, pzXML): scene = readMXS(pzScene) cameras = getCamerasInfo(scene) root = buildCameraElement(cameras) return Utils.saveXML(root, pzXML)
def testcamera_nir(detector): camera_rgb = cv2.VideoCapture(0) camera_ir = cv2.VideoCapture(1) frameCount = 0 prevSaveDetectedFaceFrameCount = 0 #上一次保存检测到人脸是第几帧 while True: start = time.time() frameCount += 1 grab_rgb, frame_rgb = camera_rgb.read() # 第一个返回值grab代表是否读取到图片 grab_ir, frame_ir = camera_ir.read() detect_results_rgb = detector.detect_face(frame_rgb) end = time.time() seconds = end - start fps = 1 / seconds if detect_results_rgb is None: cv2.putText(frame_rgb,'FPS:'+str(fps),(25,25),cv2.FONT_HERSHEY_SIMPLEX,4,(0,0,255),1) cv2.imshow("detection result rgb", frame_rgb) cv2.imshow("detection result ir", frame_ir) cv2.waitKey(1) continue if (frameCount - prevSaveDetectedFaceFrameCount) >= int(fps * 2): # 每2秒保存一帧 rgb_img_filename = os.path.join(images_rgbs_orgs_save_path, 'rgb_org_%04d.png' % frameCount) ir_img_filename = os.path.join(images_nirs_orgs_save_path, 'nir_org_%04d.png' % frameCount) cv2.imwrite(rgb_img_filename, frame_rgb) cv2.imwrite(ir_img_filename, frame_ir) total_boxes_rgb = detect_results_rgb[0] points_rgb = detect_results_rgb[1] draw_rgb = frame_rgb.copy() draw_ir = frame_ir.copy() rgb_img_anns_filename = os.path.join(images_rgbs_anns_save_path, 'rgb_org_%04d.xml' % frameCount) ir_img_anns_filename = os.path.join(images_nirs_anns_save_path, 'nir_org_%04d.xml' % frameCount) # print('rgb_img_anns_filename===', rgb_img_anns_filename) utils.saveXML(rgb_img_anns_filename, total_boxes_rgb, 0, frame_rgb.shape[1], frame_rgb.shape[0]) utils.saveXML(ir_img_anns_filename, total_boxes_rgb, 0, frame_ir.shape[1], frame_ir.shape[0], True) for b in total_boxes_rgb: p1 = (int(b[0]), int(b[1])) p2 = (int(b[2]), int(b[3])) cv2.rectangle(draw_rgb, p1, p2, (0, 255, 0)) p1_ir = (int(b[0])+20, int(b[1])-10) p2_ir = (int(b[2])+20, int(b[3])-10) cv2.rectangle(draw_ir, p1_ir, p2_ir, (0, 255, 0)) rgb_img_rect_filename = os.path.join(images_rgbs_rects_save_path, 'rgb_rect_%04d.png' % frameCount) ir_img_rect_filename = os.path.join(images_nirs_rects_save_path, 'nir_rect_%04d.png' % frameCount) cv2.imwrite(rgb_img_rect_filename, draw_rgb) cv2.imwrite(ir_img_rect_filename, draw_ir) prevSaveDetectedFaceFrameCount = frameCount cv2.putText(draw_rgb,'FPS:'+str(fps),(25,25),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1) cv2.imshow("detection result rgb", draw_rgb) cv2.imshow("detection result ir", draw_ir) key=cv2.waitKey(1) if 'q'==chr(key & 255) or 'Q'==chr(key & 255) or 27 == key: break