Exemple #1
0
def socketVideoProcces(nFrames=1, hand_svm_model=r"..\model\hand_svm_model.m",
                       face_cascade_path=r"..\model\haarcascade_frontalface_alt2.xml",saveVideo=None):
    print('Socket视频处理现成已启动')
    handsvm = None
    if not hand_svm_model is None:
        if os.path.exists(hand_svm_model):
            handsvm = joblib.load(hand_svm_model)#加载训练好的手形识别svm模型

    # 读入视频,提取帧图像
    frame = readFrameBuffer()
    while frame is None:
        frame = readFrameBuffer()
        # 响应键盘,等1ms,按Esc键退出
        key = cv2.waitKey(1)
        if key == 27: return

    lastFrame = np.copy(frame)  # 用np.copy(frame)比frame.copy()速度快

    # 加载Haar人脸检测器
    face_cascade = None
    if not face_cascade_path is None:
        face_cascade = cv2.CascadeClassifier(face_cascade_path)  # 加载级联分类器模型
        face_cascade.load(face_cascade_path)

    # 保存视频处理结果用
    vid_writer=None
    if saveVideo:
        vid_writer = cv2.VideoWriter(saveVideo,cv2.VideoWriter_fourcc('M','J','P','G'), 15, (frame.shape[1]*2,frame.shape[0]))

    if nFrames <= 1: nFrames = 1
    k = 0 # 记录处理过的帧数
    while True:
        k = (k+1) % 2592000 # 一天重置一次
        t = time.time()
        currentFrame = readFrameBuffer()
        # print('获取视频', '成功' if not currentFrame is None else '失败')

        if currentFrame is None:
            cv2.waitKey(1)
            continue

        currentFrame = cv2.flip(currentFrame,1)

        # 跳帧处理,每nFrames帧做一次处理
        if k % nFrames ==0:
            gesture,cg,lastFrame = frameProcess(currentFrame, lastFrame, handsvm,face_cascade,useWaterShed=True,moveSeg=True, showVideo=True,saveVideo=vid_writer)

             # 响应键盘,等1ms,按Esc键退出
            key = cv2.waitKey(1)
            if key == 27:
                # 保存手形数据,采集训练数据时用
                # text_save(handdata,'data.txt')
                break
    if saveVideo:
        vid_writer.release()
Exemple #2
0
def socketServerWithVideoProcess(ip='127.0.0.1', port=6666,BUFSIZE = None,Height=480,Width=640,showVideo=True,
                                 hand_svm_model=r"..\model\hand_svm_model.m",
                                 face_cascade_path=r"..\model\haarcascade_frontalface_alt2.xml",handSize=100,
                                 useWaterShed=True,moveSeg=True,saveRawVideo=None,saveVideo=None,collectHandData=None):
    global gGESTURE
    global gConfidence

    print('加载SVM手形识别模型...',end='')
    handsvm = None
    if not hand_svm_model is None:
        if os.path.exists(hand_svm_model):
            handsvm = joblib.load(hand_svm_model)  # 加载训练好的手形识别svm模型
    print('OK')

    # 加载Haar人脸检测器
    face_cascade=None
    if not face_cascade_path is None:
        print('加载人脸检测模型...',end='')
        face_cascade = cv2.CascadeClassifier(face_cascade_path)  # 加载级联分类器模型
        face_cascade.load(face_cascade_path)
        print('OK')

    # 保存视频处理结果用
    vidRaw_writer = None
    vid_writer=None
    if saveRawVideo:
        print('创建原始视频存储对象...',end='')
        saveRawVideoFileName=makeVideoFileName(filename=saveRawVideo)
        vidRaw_writer = cv2.VideoWriter(saveRawVideoFileName,cv2.VideoWriter_fourcc('M','J','P','G'), 15, (Width,Height))
        print('OK')
    if saveVideo:
        print('创建处理过的视频存储对象...',end='')
        saveRecVideoFileName = makeVideoFileName(filename=saveVideo)
        vid_writer = cv2.VideoWriter(saveRecVideoFileName,cv2.VideoWriter_fourcc('M','J','P','G'), 15, (Width*2,Height))
        print('OK')

    # 创建目录,采集手形数据图像用
    if collectHandData:
        if not os.path.exists(collectHandData+'_imgs'):
            print('创建存储手形图像的文件夹'+collectHandData+'_imgs'+'...', end='')
            os.makedirs(collectHandData+'_imgs')
            print('OK')

    print('启动Socket服务器...',end='')
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)  # 重用IP和端口号
    s.bind((ip, port))
    s.listen(1)
    print('OK')
    print('Socket服务器端已启动,开始侦听客户端连接......')
    lastFrame = None
    if BUFSIZE is None:
        BUFSIZE=Height * Width * 3 # Socket接收数据缓冲区大小
    while True:
        conn, addr = s.accept()
        print('接到来自%s的Socket连接' % addr[0])
        while True:
            # 响应键盘,等1ms,按Esc键退出
            key = cv2.waitKey(1)
            if key == 27:
                print('正在关闭Socket连接...',end='')
                conn.close()
                print('OK')
                print('正在关闭Socket服务器...',end='')
                s.close()
                print('OK')
                if saveRawVideo:  # 保存原始视频
                    print('正在保存原始视频...',end='')
                    vidRaw_writer.release()
                    print('OK')
                if saveVideo: # 保存处理过的视频
                    print('正在保存处理过的视频...',end='')
                    vid_writer.release()
                    print('OK')
                # 保存手形数据,采集训练数据时用
                if collectHandData:
                    print('正在保存手形数据...', end='')
                    text_save(handdata, collectHandData + '_data.txt')
                    print('OK')
                return
            try:
                frame = conn.recv(BUFSIZE)
                r=1
                while len(frame)< BUFSIZE and r<BUFSIZE:
                    frame += conn.recv(BUFSIZE-len(frame))
                    r +=1
                if r>=BUFSIZE:# 循环了很多次都没有接收到数据
                    print('长时间接收不到客户端数据,断开连接')
                    s.listen(1)
                    print('Socket服务器端重新开始侦听......')
                    break
            except Exception as e:
                print('客户端断开连接')
                s.listen(1)
                print('Socket服务器端重新开始侦听......')
                break
            if len(frame) > 0:
                # print('收到视频流数据{}字节'.format(len(frame)))
                frame=np.fromstring(frame, dtype='uint8')
                if len(frame) == Height * Width * 3:
                    frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
                    # frame=np.reshape(frame,(Height,Width,3)) # 如果不用编码解码,需打开此语句
                    frame = cv2.flip(frame, 1)
                    if saveRawVideo: # 保存原始视频
                        vidRaw_writer.write(frame)
                    if lastFrame is None:
                        lastFrame=np.copy(frame)
                    gesture, cg, lastFrame = frameProcess(frame, lastFrame, handsvm=handsvm, face_cascade=face_cascade,
                                                          useWaterShed=useWaterShed,moveSeg=moveSeg,handSize=handSize,
                                                          showVideo=showVideo,saveVideo=vid_writer,
                                                          collectHandData=collectHandData)
                    lock.acquire()
                    web.gGESTURE = gesture # 手势名称
                    web.gConfidence = cg  # 置信度
                    lock.release()
                    try:
                        conn.send(gesture.encode('utf-8'))
                    except:
                        print('客户端断开连接')
                        s.listen(1)
                        print('Socket服务器端重新开始侦听......')
                        break
Exemple #3
0
def localVideoProcces(
        cameraID=0,
        nFrames=2,
        hand_svm_model=r"..\model\hand_svm_model.m",
        face_cascade_path=r"..\model\haarcascade_frontalface_alt2.xml",
        handSize=100,
        useWaterShed=True,
        moveSeg=True,
        GestureSendMode='SOCKET',
        ip=None,
        port=8888,
        restURL=None,
        showVideo=True,
        saveVideo=None,
        collectHandData=None,
        demoMap=True,
        mapPath=None,
        chromeDriverPath=None,
        sensitivity=1):

    c = None  # Socket对象初始化为None,以防REST模式下,调用TellSeverClose函数中socket=c参数出错
    baseurl = ''  # baseur赋初值,以防Socket模式下,调用TellSeverClose函数中baseurl参数出错

    sendGesture2ServerViaSocket = False  # 是否通过Socket向服务器传送手势识别结果的标识
    if GestureSendMode == 'SOCKET' and ip is not None:
        print('正在连接服务器(', ip, ':', port, ')......')
        c = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        conn = c.connect_ex((ip, port))
        if conn == 10061:
            print('无法与服务器(', ip, ':', port,
                  ')建立Socket连接,手势识别结果无法通过Socket接口发送到服务器端。')
        else:
            sendGesture2ServerViaSocket = True
            print('已与服务器(', ip, ':', port, ')建立Socket连接,手势识别结果将同步发送到服务器端。')

    sendGesture2ServerViaREST = False  # 是否通过REST向服务器传送手势识别结果的标识
    if GestureSendMode == 'REST' and restURL is not None:
        print('将通过REST接口向', restURL, '发送手势识别结果.....')
        baseurl = restURL + '?'
        sendGesture2ServerViaREST = True

    handsvm = None
    if not hand_svm_model is None:
        if os.path.exists(hand_svm_model):
            handsvm = joblib.load(hand_svm_model)  # 加载训练好的手形识别svm模型

    if demoMap:
        mapDemo = baiduMap(mapPath=mapPath,
                           mapType='BMAP_NORMAL_MAP',
                           chromeDriverPath=chromeDriverPath,
                           sensitivity=sensitivity)
        demoMap = mapDemo.canDemo

    # 读入视频,提取帧图像
    cap = cv2.VideoCapture(cameraID)

    # 强制视频格式转换,防止YUK格式帧率过低
    fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
    cap.set(cv2.CAP_PROP_FOURCC, fourcc)
    print('FrameWidth:', cap.get(cv2.CAP_PROP_FRAME_WIDTH), ', FrameHeight:',
          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    hasFrame, frame = cap.read()
    lastFrame = np.copy(frame)  # 用np.copy(frame)比frame.copy()速度快

    # 加载Haar人脸检测器
    face_cascade = None
    if not face_cascade_path is None:
        face_cascade = cv2.CascadeClassifier(face_cascade_path)  # 加载级联分类器模型
        face_cascade.load(face_cascade_path)

    # 保存视频处理结果用
    vid_writer = None
    if saveVideo:
        videofilename = makeVideoFileName(filename=saveVideo)
        vid_writer = cv2.VideoWriter(
            videofilename, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 15,
            (frame.shape[1] * 2, frame.shape[0]))

    # 创建目录,采集手形数据图像用
    if collectHandData:
        if not os.path.exists(collectHandData + '_imgs'):
            os.makedirs(collectHandData + '_imgs')

    if nFrames <= 1: nFrames = 1
    lastkey = -1  # 用于判断是否连续按下两次ESC键
    k = 0  # 记录处理过的帧数
    while cap.isOpened():
        k = (k + 1) % 2592000  # 一天重置一次
        t = time.time()
        hasFrame, currentFrame = cap.read()
        if currentFrame is None:
            cv2.waitKey()
            continue

        currentFrame = cv2.flip(currentFrame, 1)

        # 跳帧处理,每nFrames帧做一次处理
        if k % nFrames == 0:
            gesture, cg, lastFrame = frameProcess(
                currentFrame,
                lastFrame,
                handsvm,
                face_cascade,
                handSize,
                useWaterShed,
                moveSeg,
                showVideo,
                saveVideo=vid_writer,
                collectHandData=collectHandData)
            if demoMap:
                try:
                    mapDemo.run(command=gesture)  # 操控地图
                    if mapDemo.confirmClose:
                        TellServerClose(
                            lastkey=27,
                            key=27,
                            sendGesture2ServerViaSocket=
                            sendGesture2ServerViaSocket,
                            sendGesture2ServerViaREST=sendGesture2ServerViaREST,
                            socket=c,
                            baseurl=baseurl)
                        break
                except Exception as e:
                    print('执行动作出错:', e)
            if sendGesture2ServerViaSocket:
                try:
                    c.send(gesture.encode("utf8"))
                except Exception as e:
                    print("服务器连接异常,可能服务器端已经关闭,正在尝试重新连接服务器......")
                    conn = c.connect_ex((ip, port))
                    print(conn)
                    if conn == 10061:
                        print('无法与服务器(', ip, ':', port,
                              ')建立Socket连接,手势识别结果无法通过Socket接口发送到服务器端。')
                        sendGesture2ServerViaSocket = False
                    else:
                        sendGesture2ServerViaSocket = True
                        print('已与服务器(', ip, ':', port,
                              ')重新建立Socket连接,手势识别结果将继续同步发送到服务器端。')
            if sendGesture2ServerViaREST:
                params = {'gesture': gesture, 'confidence': cg}
                res = requests.get(baseurl, params=params)
                # res.encoding = 'utf-8'
                # print(res.text)

            # 响应键盘,等1ms,按Esc键退出
            key = cv2.waitKey(1)
            if lastkey == 27:
                TellServerClose(
                    lastkey=lastkey,
                    key=key,
                    sendGesture2ServerViaSocket=sendGesture2ServerViaSocket,
                    sendGesture2ServerViaREST=sendGesture2ServerViaREST,
                    socket=c,
                    baseurl=baseurl)
                # 保存手形数据,采集训练数据时用
                if collectHandData:
                    text_save(handdata, collectHandData + '_data.txt')
                break
            lastkey = key
    cap.release()
    if saveVideo:
        vid_writer.release()
        print('手势识别结果视频已经记录在文件', videofilename, '中。')
Exemple #4
0
async def localVideoProcces(cameraID=0,
                            nFrames=2,
                            hand_svm_model=None,
                            dnn_hand_model=None,
                            face_cascade_path=None,
                            resFaceModel_path=None,
                            useDlibFace=True,
                            handAreaScope=[3000, 102400],
                            useWaterShed=True,
                            moveSeg=True,
                            useBackSeg=True,
                            GestureSendMode=None,
                            ip=None,
                            port=8888,
                            restURL=None,
                            ws=None,
                            showVideo=True,
                            saveVideo=None,
                            collectHandData=None,
                            demoMap=True,
                            mapPath=None,
                            chromeDriverPath=None,
                            sensitivity=1):

    # # 记录历史帧,生成测试数据用
    rPoint = 0  #recordFrames的指针,循环记录
    recordFramesLen = handTrackLen * ProcessOneEverynFrames
    recordFrames = list([None] * recordFramesLen)  # 记录历史帧
    testData = []  # 记录测试数据

    c = None  # Socket对象初始化为None,以防REST模式下,调用TellSeverClose函数中socket=c参数出错
    baseurl = ''  # baseur赋初值,以防Socket模式下,调用TellSeverClose函数中baseurl参数出错

    sendGesture2ServerViaSocket = False  # 是否通过Socket向服务器传送手势识别结果的标识
    if GestureSendMode == 'SOCKET' and ip is not None:
        print('正在连接服务器(', ip, ':', port, ')......')
        c = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        conn = c.connect_ex((ip, port))
        if conn == 10061:
            print('无法与服务器(', ip, ':', port,
                  ')建立Socket连接,手势识别结果无法通过Socket接口发送到服务器端。')
        else:
            sendGesture2ServerViaSocket = True
            print('已与服务器(', ip, ':', port, ')建立Socket连接,手势识别结果将同步发送到服务器端。')

    sendGesture2ServerViaREST = False  # 是否通过REST向服务器传送手势识别结果的标识
    if GestureSendMode == 'REST' and restURL is not None:
        print('将通过REST接口向', restURL, '发送手势识别结果.....')
        baseurl = restURL + '?'
        sendGesture2ServerViaREST = True

    sendGesture2ServerViaWebSocket = False  # 是否通过WebSocket向服务器传送手势识别结果的标识
    if GestureSendMode == 'WEBSOCKET' and ws is not None:
        sendGesture2ServerViaWebSocket = True

    handsvm = None
    if not hand_svm_model is None:
        if os.path.exists(hand_svm_model):
            handsvm = joblib.load(hand_svm_model)  # 加载训练好的手形识别svm模型
    dnn_model = None
    if not dnn_hand_model is None:
        if os.path.exists(dnn_hand_model):
            dnn_model = handDNN(dnn_hand_model)

    if demoMap:
        mapDemo = baiduMap(mapPath=mapPath,
                           mapType='BMAP_NORMAL_MAP',
                           chromeDriverPath=chromeDriverPath,
                           sensitivity=sensitivity)
        demoMap = mapDemo.canDemo

    # 读入视频,提取帧图像
    cap = myVideoCapture(cameraID=cameraID)

    # 强制视频格式转换,防止YUK格式帧率过低
    fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
    cap.set(cv2.CAP_PROP_FOURCC, fourcc)
    print('FrameWidth:', cap.get(cv2.CAP_PROP_FRAME_WIDTH), ', FrameHeight:',
          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    hasFrame, frame = cap.read()
    lastFrame = np.copy(frame)  # 用np.copy(frame)比frame.copy()速度快

    # 加载人脸检测模型
    myFace = faceDection(useDlibFace=useDlibFace,
                         face_cascade_path=face_cascade_path,
                         resFaceModel_path=resFaceModel_path)

    # 保存视频处理结果用
    vid_writer = None
    if saveVideo:
        videofilename = makeVideoFileName(filename=saveVideo)
        vid_writer = cv2.VideoWriter(
            videofilename, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 15,
            (frame.shape[1] * 2, frame.shape[0]))

    # 创建目录,采集手形数据图像用
    if collectHandData:
        if not os.path.exists(collectHandData + '_imgs'):
            os.makedirs(collectHandData + '_imgs')

    if nFrames <= 1: nFrames = 1
    lastkey = -1  # 用于判断是否连续按下两次ESC键
    k = 0  # 记录处理过的帧数
    lastGesture = ''  # 记录上一个手势
    while cap.isOpened() and hasFrame:
        k = (k + 1) % 2592000  # 一天重置一次
        t = time.time()
        hasFrame, currentFrame = cap.read()
        if k == 1:
            fgMask = frameBackGround.apply(currentFrame)
        if currentFrame is None:
            cv2.waitKey()
            continue

        # 记录历史帧,生成测试数据用
        if RecTestData:
            recordFrames[rPoint] = currentFrame.copy()
            rPoint = (rPoint + 1) % len(recordFrames)

        currentFrame = cv2.flip(currentFrame, 1)

        # 跳帧处理,每nFrames帧做一次处理
        if k % nFrames == 0:
            gesture, cg, lastFrame = frameProcess(
                currentFrame,
                lastFrame,
                handsvm=handsvm,
                handdnn=dnn_model,
                myFace=myFace,
                handAreaScope=handAreaScope,
                useWaterShed=useWaterShed,
                moveSeg=moveSeg,
                useBackSeg=useBackSeg,
                showVideo=showVideo,
                saveVideo=vid_writer,
                collectHandData=collectHandData)
            # 保存测试视频用
            if RecTestData and gesture != 'invalid' and gesture != lastGesture:
                st = str(time.time())
                videofilename = makeVideoFileName(filename=RecTestData + st +
                                                  '_' + gesture)
                testData.append(['test' + st + ',' + gesture])
                vid_test_writer = cv2.VideoWriter(
                    videofilename, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                    15, (frame.shape[1], frame.shape[0]))
                for i in range(rPoint + 1, len(recordFrames)):
                    if recordFrames[i] is not None:
                        vid_test_writer.write(recordFrames[i])
                for i in range(0, rPoint + 1):
                    if recordFrames[i] is not None:
                        vid_test_writer.write(recordFrames[i])
                vid_test_writer.release()

            if AsGestureRestServer:  # 作为REST服务器
                lock.acquire()
                web.gGESTURE = gesture  # 手势名称
                web.gConfidence = cg  # 置信度
                lock.release()

            if demoMap:
                try:
                    mapDemo.run(command=gesture)  # 操控地图
                    if mapDemo.confirmClose:
                        await TellServerClose(
                            lastkey=27,
                            key=27,
                            sendGesture2ServerViaSocket=
                            sendGesture2ServerViaSocket,
                            sendGesture2ServerViaREST=sendGesture2ServerViaREST,
                            sendGesture2ServerViaWebSocket=
                            sendGesture2ServerViaWebSocket,
                            socket=c,
                            baseurl=baseurl,
                            ws=ws)
                        break
                except Exception as e:
                    print('执行动作出错:', e)
            if gesture != lastGesture and gesture != 'invalid':  # 重复手势不发,无效手势不发
                if sendGesture2ServerViaSocket:
                    try:
                        c.send(gesture.encode("utf-8"))
                    except Exception as e:
                        print("服务器连接异常,可能服务器端已经关闭,正在尝试重新连接服务器......")
                        conn = c.connect_ex((ip, port))
                        print(conn)
                        if conn == 10061:
                            print('无法与服务器(', ip, ':', port,
                                  ')建立Socket连接,手势识别结果无法通过Socket接口发送到服务器端。')
                            sendGesture2ServerViaSocket = False
                        else:
                            sendGesture2ServerViaSocket = True
                            print('已与服务器(', ip, ':', port,
                                  ')重新建立Socket连接,手势识别结果将继续同步发送到服务器端。')
                if sendGesture2ServerViaREST:
                    params = {'gesture': gesture, 'confidence': cg}
                    #res = requests.get(baseurl, params=params)
                    # res.encoding = 'utf-8'
                    # print(res.text)
                if sendGesture2ServerViaWebSocket:
                    try:
                        json_str = gesture2JSON(gesture, cg)
                        await ws.send(json_str)
                    except Exception as e:
                        sendGesture2ServerViaWebSocket = False
                        print(
                            "服务器连接异常,可能服务器端已经关闭,手势识别结果将不再通过WebSocket接口发送到服务器端!"
                        )
            lastGesture = gesture  # 记录上个手势,不重复发

            # 响应键盘,等1ms,按Esc键退出
            key = cv2.waitKey(1) & 0xFF
            if lastkey == 27:
                await TellServerClose(
                    lastkey=lastkey,
                    key=key,
                    sendGesture2ServerViaSocket=sendGesture2ServerViaSocket,
                    sendGesture2ServerViaREST=sendGesture2ServerViaREST,
                    sendGesture2ServerViaWebSocket=
                    sendGesture2ServerViaWebSocket,
                    socket=c,
                    baseurl=baseurl,
                    ws=ws)
                # 保存手形数据,采集训练数据时用
                if collectHandData:
                    text_save(handdata, collectHandData + '_data.txt')
                # 保存测试数据
                if RecTestData:
                    text_save(testData, RecTestData + '_data.csv')
                break
            lastkey = key
    cap.release()
    if saveVideo:
        vid_writer.release()
        print('手势识别结果视频已经记录在文件', videofilename, '中。')
Exemple #5
0
async def receive_data(videoWebsocket,
                       sendGestureWebsocket=None,
                       svm_hand_model=None,
                       dnn_hand_model=None,
                       myFace=None,
                       handAreaScope=[3000, 102400],
                       useWaterShed=True,
                       moveSeg=True,
                       useBackSeg=True,
                       saveRawVideo=None,
                       saveVideo=None):
    global gGESTURE
    global gConfidence
    lastframe = None
    lastGesture = ''  # 记录上一个手势
    sendGesture2ServerViaWebSocket = False  # 是否通过WebSocket向服务器传送手势识别结果的标识
    if GestureSendMode == 'WEBSOCKET' and sendGestureWebsocket is not None:
        sendGesture2ServerViaWebSocket = True

    # 保存视频处理结果用
    vidRaw_writer = None
    vid_writer = None
    Width, Height = 640, 480
    if saveRawVideo:
        print('创建原始视频存储对象...', end='')
        saveRawVideoFileName = makeVideoFileName(filename=saveRawVideo)
        vidRaw_writer = cv2.VideoWriter(
            saveRawVideoFileName, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
            15, (Width, Height))
        print('OK')
    if saveVideo:
        print('创建处理过的视频存储对象...', end='')
        saveRecVideoFileName = makeVideoFileName(filename=saveVideo)
        vid_writer = cv2.VideoWriter(
            saveRecVideoFileName, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
            15, (Width * 2, Height))
        print('OK')

    # 测试用----------------------------------------------------------
    testFPS = False  # 测试标记,测试机器人传来图像速率时,设置为True,正常运行程序功能时设置为False
    testSeconds = 3  # 测试时长(秒数)
    recv_frames = 0  # 收到的帧数
    startTime = None  # 计时起点,用于测试机器人传来图像速率
    # 测试用----------------------------------------------------------

    recvDadaPackets = 0  # 记录收到数据包数量
    completePackets = 0  # 记录处理完的数据包数量
    k = 0  # 记录处理过的帧数
    print('WebSocket视频数据客户端等待从服务器接收数据...')
    while not videoWebsocket.closed:
        recv_data = await videoWebsocket.recv()
        recvDadaPackets += 1
        k = (k + 1) % 2592000  # 一天重置一次
        # print('WebSocket视频数据客户端收到数据包个数:',recvDadaPackets)
        if testFPS:  # 测试帧率
            if startTime is None:
                startTime = time.time()  # 第一次收到数据开始计时
                print('测试时间起点:', startTime)
            recv_frames += 1
            currentTime = time.time()
            if currentTime - startTime > 0:
                fps = ',平均FPS:' + str(
                    round(recv_frames / (currentTime - startTime), 1))
            else:
                fps = ''
            print('当前时间:', currentTime, '收到帧数:', recv_frames, fps)
            if currentTime - startTime >= testSeconds:  # 统计到设定秒数就结束,自动转为正常状态
                fps_recv = round(recv_frames / (currentTime - startTime), 1)
                print(testSeconds, '秒内共收到', recv_frames, '帧数据,平均FPS:',
                      fps_recv, '测试结束,转入正常工作状态。')
                testFPS = False
                # recvDadaPackets =0
        else:
            if recv_data == 'close':  #仅在自己搭的测试环境有用
                print('WebSocket视频数据客户端收到服务器关闭连接指令,中止接收数据')
                break
            else:
                # print('处理收到的第',recvDadaPackets,'帧数据...')
                #decom_recv_data = gzip.decompress(recv_data) # 解压缩
                jpg_frame = np.frombuffer(recv_data, dtype='uint8')
                img = cv2.imdecode(jpg_frame, cv2.IMREAD_COLOR)
                img = cv2.flip(img, 1)

                if saveRawVideo:  # 保存原始视频
                    vidRaw_writer.write(img)

                if lastframe is None:
                    lastframe = np.copy(img)
                tc = time.time()
                if k % ProcessOneEverynFrames == 0:
                    gesture, cg, lastframe = frameProcess(
                        img,
                        lastframe,
                        handsvm=svm_hand_model,
                        handdnn=dnn_hand_model,
                        myFace=myFace,
                        handAreaScope=handAreaScope,
                        useWaterShed=useWaterShed,
                        moveSeg=moveSeg,
                        useBackSeg=useBackSeg,
                        showVideo=True,
                        saveVideo=saveVideo,
                        collectHandData=None)
                    completePackets += 1
                    tc = time.time() - tc
                    fps_recg = round(completePackets / tc, 0)  # 每秒处理多少帧
                    # print('WebSocket数据处理客户端处理完:',completePackets,'帧,本次耗时',round(tc,0),'秒,平均处理速度:',fps_recg,'帧/秒')

                    # 如果作为REST服务器对外提供服务,则将识别结果写入全局变量
                    if AsGestureRestServer:
                        lock.acquire()
                        web.gGESTURE = gesture
                        web.gConfidence = cg
                        lock.release()

                    if gesture != lastGesture and gesture != 'invalid':
                        # 通过Websocket发送手势识别结果
                        if sendGesture2ServerViaWebSocket:
                            try:
                                gestureJSON = gesture2JSON(gesture, cg)
                                await sendGestureWebsocket.send(gestureJSON)
                            except Exception as e:
                                sendGesture2ServerViaWebSocket = False
                                print("连接异常,手势识别结果将不再通过WebSocket接口发送!")
                    lastGesture = gesture

                    key = cv2.waitKey(1) & 0xFF
                    if key == 27:
                        if saveRawVideo:  # 保存原始视频
                            print('正在保存原始视频...', end='')
                            vidRaw_writer.release()
                            print('OK')
                        if saveVideo:  # 保存处理过的视频
                            print('正在保存处理过的视频...', end='')
                            vid_writer.release()
                            print('OK')