def __init__(self,
                 model,
                 height=256,
                 device='CPU',
                 openvino=False,
                 tensorrt=False,
                 extrinsics_path="./data/extrinsics.json",
                 fx=1,
                 canvas_shape=(720, 1280, 3)) -> None:

        if openvino:
            from modules.inference_engine_openvino import InferenceEngineOpenVINO
            self.net = InferenceEngineOpenVINO(model, device)
        else:
            from modules.inference_engine_pytorch import InferenceEnginePyTorch
            self.net = InferenceEnginePyTorch(model, device, tensorrt)

        try:
            with open(extrinsics_path, 'r') as f:
                self.extrinsics = json.load(f)
        except Exception:
            with open("./data/extrinsics.json", 'r') as f:
                self.extrinsics = json.load(f)
            traceback.print_exc()

        self.base_height = height
        self.fx = fx
        self.canvas_3d = np.zeros(canvas_shape, dtype=np.uint8)
        self.plotter = Plotter3d(self.canvas_3d.shape[:2])

        # print("[INFO] plotter shape {}".format(self.plotter.shape))
        print("[INFO] canvas shape {}".format(self.canvas_3d.shape))
コード例 #2
0
ファイル: viz_raw.py プロジェクト: rtmtree/CSPS
    def updatefig(i):
        print("updatefig",i)
        ax0.cla()
        ax1.cla()
        ax2.cla()

        imageIdx=(i+startFrom)*14

        if (imageIdx >= (vidLength)-1):
            print('close')
            plt.close(fig)
        
        print("imageIdx",imageIdx)

        if (imageIdx == vidLength):
            print(f'imageIdx {imageIdx} == vidLength {vidLength}; closing!')
            plt.close(fig)

        frame = vid.get_data(imageIdx)
        ax1.imshow(frame)    

        if withPose:
            poseIdx = imageIdx
            poses_3dFromImage=np.array([poseList[poseIdx][1:].reshape(19,3)])
            # poses_3dFromImage=stand3dmatrix

            edgesFromImage = (Plotter3d.SKELETON_EDGES + 19 * np.arange(poses_3dFromImage.shape[0]).reshape((-1, 1, 1))).reshape((-1, 2))
            canvas_3d = np.zeros((450, 450, 3), dtype=np.uint8)
            plotter = Plotter3d(canvas_3d.shape[:2])
            plotter.plot(canvas_3d, poses_3dFromImage, edgesFromImage)
            ax0.imshow(canvas_3d)



        #imageIdx to csiIdx
        csiIndices=imageIdx2csiIndices(duration_in_sec,imageIdx,tsList,vidLength)
        if(len(csiIndices)>0):
            startCSIIdx=csiIndices[0]
            endCSIIdx=csiIndices[len(csiIndices)-1]
            print(startCSIIdx,'-',endCSIIdx)
            print(endCSIIdx-startCSIIdx+1)
            
            for j in range(0,64):
                if (6<=j<32 or 33<=j<59):
                    textX=[]
                    textY=[]
                    for k in csiIndices:
                        textX.append(tsList[k]/(10**6))
                        textY.append(csiList[k][j])
                    ax2.plot(textX,gaussian_filter(textY,sigma=1), label='CSI subcarrier')
            print("added")
            print('lastTS',tsList[endCSIIdx])
        ax2.set_ylim([-10, +40])
        # ax2.xlabel("Frame")
        # ax2.ylabel("Amplitude(dB)")
        return ax0,ax1,ax2 
コード例 #3
0
ファイル: test_realtime.py プロジェクト: rtmtree/CSPS
def updatefig(line):
    sLine = line % seqLen
    print("animate", sLine)

    if (sLine == seqLen - 1):
        fileIdx = 0
        print("============filePath1", filePaths[fileIdx])
        print("============color CSI", colorCSIs[fileIdx])
        valueSTA = 0.1
        valueAP = 0.1
        try:
            curFileSTA = pd.read_csv(filePaths[fileIdx])
            if (len(Channels) == 1):
                curFileSTA = curFileSTA[(
                    curFileSTA['mac'] == my_filter_address)]
                curFileSTA = curFileSTA[(curFileSTA['len'] == 384)]
                curFileSTA = curFileSTA[(curFileSTA['stbc'] == 0)]
                curFileSTA = curFileSTA[(curFileSTA['rx_state'] == 0)]
                curFileSTA = curFileSTA[(curFileSTA['sig_mode'] == 1)]
                curFileSTA = curFileSTA[(curFileSTA['bandwidth'] == 1)]
                curFileSTA = curFileSTA[(curFileSTA['secondary_channel'] == 1)]

            tail = len(Channels) * 2
            tail = 500 + 1
            curCSI = curFileSTA['CSI_DATA'].tail(tail)
            RTcsiList = list(x for x in curCSI)
            curRSSI = curFileSTA['rssi'].tail(tail)
            RTrssiList = list(x for x in curRSSI)
            curTS = curFileSTA['local_timestamp'].tail(tail)
            tsSTAList = list(x for x in curTS)
            curChannel = curFileSTA['channel'].tail(tail)
            channelSTAList = list(x for x in curChannel)
            # print("timestamp",datetime.fromtimestamp(tsSTAList[0]+1611841591.5))
            if (plotBySubcarrier):
                valueSTA = []
                rssiSTA = []
                tsSTA = []
                startIndex = curRSSI.index[0]
                for i in range(len(RTcsiList) - 1):
                    valueSTA.append(parseCSI(RTcsiList[i]))
                    rssiSTA.append(RTrssiList[i])
                    tsSTA.append(tsSTAList[i])
        except:
            print("catch animate RT")
            return [ax, ax0, ax1]
            # return [ax,ax0]

        if (shows[fileIdx] and isinstance(valueSTA, float) == False
                and len(valueSTA) > 0):
            csiAll = valueSTA
            tsAll = tsSTA
            rssiAll = rssiSTA
            print("last rssiAll", rssiAll[-1])
            lastTS = tsAll[-1] / (10**6)
            lastTSbf = (tsAll[-1] / (10**6)) - (seqLen / 30)
            # print("last TS",lastTS)
            # print("last TSbf",lastTSbf)

            csiInRange = []
            for i in range(len(csiAll)):
                if (tsAll[i] / (10**6) > lastTSbf):
                    # if(True):
                    csiInRange.append([tsAll[i] / (10**6)] + csiAll[i])

            amplitudesAll = [
                filterNullSC(rawCSItoAmp(csiInRange[j][1:]))
                for j in range(len(csiInRange))
            ]
            csiIndices = [j for j in range(len(csiInRange))]
            poseIndices = [j for j in range((seqLen))]
            poseList = []
            stepTS = (lastTS - lastTSbf) / seqLen
            for j in range(0, seqLen):
                poseList.append([lastTSbf + ((j + 1) * (stepTS))])
            samplingedAmp, expectedTS = samplingCSI(csiInRange,
                                                    csiIndices,
                                                    poseList,
                                                    poseIndices,
                                                    paddingTo=seqLen)

            # Ploting Start
            for j in range(0, 52):
                textX = []
                textY = []
                for k in range(len(amplitudesAll)):
                    curCsi = amplitudesAll[k][j]
                    textX.append(csiInRange[k][0])
                    textY.append(curCsi)
                ax.plot(textX,
                        gaussian_filter(textY, sigma=0),
                        label='CSI subcarrier')
                textXSP = []
                textYSP = []
                for k in range(len(samplingedAmp)):
                    curCsi = samplingedAmp[k][j]
                    textXSP.append(expectedTS[k])
                    textYSP.append(curCsi)
                ax0.plot(textXSP,
                         gaussian_filter(textYSP, sigma=0),
                         label='CSI subcarrier')
            ax.set_xlim([textX[0], textX[-1]])
            ax0.set_xlim([textXSP[0], textXSP[-1]])

            sdSum = 0
            for j in range(0, 52):
                subClist = []
                for k in range(len(samplingedAmp)):
                    subClist.append(samplingedAmp[k][j])
                sdAmp = stdev(subClist)
                sdSum += sdAmp
            print("sum_diff", sdSum)

            if (sdSum > isActSDthreshold):
                # if(False):
                X = np.array([samplingedAmp])
                X = featureEngineer(X)
                print(X.shape)
                global y_pred
                y_pred = model.predict(X)
            else:
                y_pred = False
                ax1.cla()

    if (isinstance(y_pred, bool) == False):
        print("have pose")
        if True:
            poses_3dFromImage = PAMtoPose(y_pred[0][sLine].reshape(3, 19, 19))
        else:
            poses_3dFromImage = stand3dmatrix
        edgesFromImage = (Plotter3d.SKELETON_EDGES +
                          19 * np.arange(poses_3dFromImage.shape[0]).reshape(
                              (-1, 1, 1))).reshape((-1, 2))
        canvas_3d = np.zeros((450, 450, 3), dtype=np.uint8)
        plotter = Plotter3d(canvas_3d.shape[:2])
        plotter.plot(canvas_3d, poses_3dFromImage, edgesFromImage)
        ax1.imshow(canvas_3d)

    # return [ax,ax0]
    return [ax, ax0, ax1]
コード例 #4
0
    args.add_argument('--no_show',
                      help='Optional. Do not display output.',
                      action='store_true')
    args.add_argument("-u",
                      "--utilization_monitors",
                      default='',
                      type=str,
                      help="Optional. List of monitors to show initially.")
    args = parser.parse_args()

    cap = open_images_capture(args.input, args.loop)

    stride = 8
    inference_engine = InferenceEngine(args.model, args.device, stride)
    canvas_3d = np.zeros((720, 1280, 3), dtype=np.uint8)
    plotter = Plotter3d(canvas_3d.shape[:2])
    canvas_3d_window_name = 'Canvas 3D'
    if not args.no_show:
        cv2.namedWindow(canvas_3d_window_name)
        cv2.setMouseCallback(canvas_3d_window_name, Plotter3d.mouse_callback)

    file_path = args.extrinsics_path
    if file_path is None:
        file_path = Path(__file__).parent / 'data/extrinsics.json'
    with open(file_path, 'r') as f:
        extrinsics = json.load(f)
    R = np.array(extrinsics['R'], dtype=np.float32)
    t = np.array(extrinsics['t'], dtype=np.float32)

    is_video = cap.get_type() in ('VIDEO', 'CAMERA')
コード例 #5
0
def main():
    # model files check and download
    check_and_download_models(WEIGHT_PATH, MODEL_PATH, REMOTE_PATH)
    check_file_existance(FILE_PATH)

    # prepare input data
    canvas_3d = np.zeros((720, 1280, 3), dtype=np.uint8)
    plotter = Plotter3d(canvas_3d.shape[:2])
    canvas_3d_window_name = 'Canvas3D'
    cv2.namedWindow(canvas_3d_window_name)
    cv2.setMouseCallback(canvas_3d_window_name, Plotter3d.mouse_callback)

    with open(FILE_PATH, 'r') as f:
        extrinsics = json.load(f)

    R = np.array(extrinsics['R'], dtype=np.float32)
    t = np.array(extrinsics['t'], dtype=np.float32)

    if args.video is None:
        frame_provider = ImageReader([args.input])
        is_video = False
    else:
        frame_provider = VideoReader(args.video)
        is_video = True

    fx = -1
    delay = 1
    esc_code = 27
    p_code = 112
    space_code = 32
    mean_time = 0
    img_mean = np.array([128, 128, 128], dtype=np.float32)
    base_width_calculated = False

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)

    # inference
    for frame_id, frame in enumerate(frame_provider):
        current_time = cv2.getTickCount()
        if frame is None:
            break

        if not base_width_calculated:
            IMAGE_WIDTH = frame.shape[1] * (IMAGE_HEIGHT / frame.shape[0])
            IMAGE_WIDTH = int(IMAGE_WIDTH / STRIDE) * STRIDE
            net.set_input_shape((1, 3, IMAGE_HEIGHT, IMAGE_WIDTH))
            base_width_calculated = True

        input_scale = IMAGE_HEIGHT / frame.shape[0]
        scaled_img = cv2.resize(frame,
                                dsize=None,
                                fx=input_scale,
                                fy=input_scale)
        # better to pad, but cut out for demo
        scaled_img = scaled_img[:, 0:scaled_img.shape[1] -
                                (scaled_img.shape[1] % STRIDE)]

        if fx < 0:  # Focal length is unknown
            fx = np.float32(0.8 * frame.shape[1])

        normalized_img = (scaled_img.astype(np.float32) - img_mean) / 255.0
        normalized_img = np.expand_dims(normalized_img.transpose(2, 0, 1),
                                        axis=0)

        # exectution
        if is_video:
            input_blobs = net.get_input_blob_list()
            net.set_input_blob_data(normalized_img, input_blobs[0])
            net.update()
            features, heatmaps, pafs = net.get_results()

        else:
            print('Start inference...')
            if args.benchmark:
                print('BENCHMARK mode')
                for i in range(5):
                    start = int(round(time.time() * 1000))
                    features, heatmaps, pafs = net.predict([normalized_img])
                    end = int(round(time.time() * 1000))
                    print(f'\tailia processing time {end - start} ms')
            else:
                features, heatmaps, pafs = net.predict([normalized_img])

        inference_result = (features[-1].squeeze(), heatmaps[-1].squeeze(),
                            pafs[-1].squeeze())

        poses_3d, poses_2d = parse_poses(inference_result, input_scale, STRIDE,
                                         fx, is_video)
        edges = []
        if len(poses_3d):
            poses_3d = rotate_poses(poses_3d, R, t)
            poses_3d_copy = poses_3d.copy()
            x = poses_3d_copy[:, 0::4]
            y = poses_3d_copy[:, 1::4]
            z = poses_3d_copy[:, 2::4]
            poses_3d[:, 0::4], poses_3d[:, 1::4], poses_3d[:, 2::4] = -z, x, -y

            poses_3d = poses_3d.reshape(poses_3d.shape[0], 19, -1)[:, :, 0:3]
            edges = (Plotter3d.SKELETON_EDGES +
                     19 * np.arange(poses_3d.shape[0]).reshape(
                         (-1, 1, 1))).reshape((-1, 2))
        plotter.plot(canvas_3d, poses_3d, edges)

        if is_video:
            cv2.imshow(canvas_3d_window_name, canvas_3d)
        else:
            cv2.imwrite(f'Canvas3D_{frame_id}.png', canvas_3d)

        draw_poses(frame, poses_2d)
        current_time = (cv2.getTickCount() -
                        current_time) / cv2.getTickFrequency()
        if mean_time == 0:
            mean_time = current_time
        else:
            mean_time = mean_time * 0.95 + current_time * 0.05
        cv2.putText(frame, 'FPS: {}'.format(int(1 / mean_time * 10) / 10),
                    (40, 80), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255))

        if is_video:
            cv2.imshow('ICV 3D Human Pose Estimation', frame)
        else:
            cv2.imwrite(args.savepath, frame)

        key = cv2.waitKey(delay)
        if key == esc_code:
            break
        if key == p_code:
            if delay == 1:
                delay = 0
            else:
                delay = 1

        if delay == 0 and args.rotate3d:
            key = 0
            while (key != p_code and key != esc_code and key != space_code):
                plotter.plot(canvas_3d, poses_3d, edges)
                cv2.imshow(canvas_3d_window_name, canvas_3d)
                key = cv2.waitKey(33)
            if key == esc_code:
                break
            else:
                delay = 1

    print('Script finished successfully.')
コード例 #6
0
ファイル: trainPose3d.py プロジェクト: rtmtree/CSPS
        def updatefig(i):

            global y_pred
            global y_test
            global checkIndex
            # print("updatefig",i)
            i = i % seqLen
            if i == seqLen - 1:
                checkIndex = checkIndex + 1
            if checkIndex == len(y_pred):
                checkIndex = 0
            ax0.cla()
            ax1.cla()
            ax2.cla()

            poseIdx = i

            # if (poseIdx == len(csiList)-1):
            #     print('close')
            #     plt.close(fig)

            csiIdx = i

            # Plot 3D Pose
            poses_3dFromCSI = PAMtoPose(y_pred[checkIndex][poseIdx].reshape(
                3, 19, 19))

            edgesFromCSI = (Plotter3d.SKELETON_EDGES +
                            19 * np.arange(poses_3dFromCSI.shape[0]).reshape(
                                (-1, 1, 1))).reshape((-1, 2))
            canvas_3d = np.zeros((450, 450, 3), dtype=np.uint8)
            plotter = Plotter3d(canvas_3d.shape[:2])
            plotter.plot(canvas_3d, poses_3dFromCSI, edgesFromCSI)
            ax0.imshow(canvas_3d)

            poses_3dFromImage = PAMtoPose(y_test[checkIndex][poseIdx].reshape(
                3, 19, 19))
            # poses_3dFromImage[0][9]=np.array([ 0,0,0]) # right shoulder
            # poses_3dFromImage[0][6]=np.array([ 0,0,0]) # left hip

            edgesFromImageGT = (
                Plotter3d.SKELETON_EDGES +
                19 * np.arange(poses_3dFromImage.shape[0]).reshape(
                    (-1, 1, 1))).reshape((-1, 2))
            canvas_3dGT = np.zeros((450, 450, 3), dtype=np.uint8)
            plotterGT = Plotter3d(canvas_3dGT.shape[:2])
            plotterGT.plot(canvas_3dGT, poses_3dFromImage, edgesFromImageGT)
            ax1.imshow(canvas_3dGT)

            # Plot CSI
            for j in range(0, 52):
                textX = []
                textY = []
                for k in range(len(x_test[checkIndex])):
                    curCsi = x_test[checkIndex][k]
                    textX.append(k)
                    textY.append(curCsi[j])
                ax2.plot(textX,
                         gaussian_filter(textY, sigma=0),
                         label='CSI subcarrier')

            return ax0, ax1, ax2
コード例 #7
0
        def updatefig(i, noPlot=False):
            print("updatefig", i, label)
            # ax0.cla()
            # ax1.cla()
            # ax2.cla()

            imageIdx = i + startFrom
            print("imageIdx", imageIdx)

            if (noPlot == False and imageIdx == vidLength):
                print(
                    f'imageIdx {imageIdx} == vidLength {vidLength}; closing!')
                plt.close(fig)
            try:
                frame = vid.get_data(imageIdx)
                input_scale = base_height / frame.shape[0]
                fx = np.float32(0.8 * frame.shape[1])
                scaled_img = cv2.resize(frame,
                                        dsize=None,
                                        fx=input_scale,
                                        fy=input_scale)
                scaled_img = scaled_img[:, 0:scaled_img.shape[1] - (
                    scaled_img.shape[1] %
                    stride)]  # better to pad, but cut out for demo

                inference_result = net.infer(scaled_img)
                poses_3dFromImage, poses_2d = parse_poses(
                    inference_result, input_scale, stride, fx, is_video)
            except:
                poses_3dFromImage = []
                poses_2d = []

            if len(poses_3dFromImage) == 0 or len(
                    poses_2d) == 0 or poses_3dFromImage.all(
                    ) == nullPose3D.all():
                print("No pose  detected ")
                # return False if noPlot==True else ax0,ax1,ax2
                # return False
                poses_3dFromImage = np.array([np.zeros((19, 3))])
            else:
                if True:
                    poses_3dFromImage = rotate_poses(poses_3dFromImage, R, t)
                    poses_3dFromImage = reshape_poses(poses_3dFromImage)
                else:
                    poses_3dFromImage = stand3dmatrix

            if (noPlot == False):
                edgesFromImage = (
                    Plotter3d.SKELETON_EDGES +
                    19 * np.arange(poses_3dFromImage.shape[0]).reshape(
                        (-1, 1, 1))).reshape((-1, 2))
                canvas_3d = np.zeros((450, 450, 3), dtype=np.uint8)
                plotter = Plotter3d(canvas_3d.shape[:2])
                plotter.plot(canvas_3d, poses_3dFromImage, edgesFromImage)
                ax0.imshow(canvas_3d)
                draw_poses(frame, poses_2d)
                ax1.imshow(frame)

            # Setting the values for all axes.
            csiIndices, parsedTimeInVid = imageIdx2csiIndicesPrecise(
                duration_in_sec, imageIdx, tsList, vidLength, lastsec)

            if (noPlot == True):
                print("parsedTimeInVid", parsedTimeInVid)
                parsedPoses_3dFromImage = np.array(
                    poses_3dFromImage[0]).reshape(3 * 19)
                parsedTimeInVid_array = np.array([parsedTimeInVid])
                pose3D_value.append(
                    np.concatenate(
                        (parsedTimeInVid_array, parsedPoses_3dFromImage)))
                if (len(csiIndices) > 0):
                    startCSIIdx = csiIndices[0]
                    endCSIIdx = csiIndices[len(csiIndices) - 1]
                    print(startCSIIdx, '-', endCSIIdx)
                    print(endCSIIdx - startCSIIdx + 1)
                    for k in csiIndices:
                        curParseCSI = parseCSI(csiList[k])
                        print("adding ", curParseCSI)
                        if (curParseCSI != False):
                            print("len check")
                            print(k, len(curParseCSI), tsList[k])
                            if (len(curParseCSI) != 384):
                                print("len not 384")
                                continue
                            print("isFloat check")
                            isInt = True
                            for l in range(384):
                                if (isinstance(curParseCSI[l], int) == False):
                                    print(curParseCSI[l], " is not int")
                                    isInt = False
                                    break
                            if isInt == False:
                                continue
                            csi_value.append([tsList[k]] +
                                             parseCSI(csiList[k]))
                            print("added ", k)
                        else:
                            csi_value.append([tsList[k]] +
                                             [0 for l in range(384)])
                            print("added ", k, 'as 0s')
            else:
                for j in range(0, 64):
                    if (6 <= j < 32 or 33 <= j < 59):
                        textX = []
                        textY = []
                        for k in csiIndices:
                            textX.append(tsList[k] / (10**6))
                            textY.append(rawCSItoAmp(parseCSI(x), 128)[k][j])
                        ax2.plot(textX,
                                 gaussian_filter(textY, sigma=1),
                                 label='CSI subcarrier')
                print("added")

            # print(tsList[csiIdx])
            return False  #if noPlot==True else ax0,ax1,ax2
コード例 #8
0
ファイル: inference.py プロジェクト: jasongedev/robot-teleop
def run_inference(args):
    from modules.inference_engine_pytorch import InferenceEnginePyTorch

    socket_server = SocketServer(args.port)
    joint_angle_calculator = JointAngleCalculator()

    stride = 8

    model_path = os.path.join('models', 'human-pose-estimation-3d.pth')
    net = InferenceEnginePyTorch(model_path, "GPU")

    canvas_3d = np.zeros((720, 1280, 3), dtype=np.uint8)
    plotter = Plotter3d(canvas_3d.shape[:2])
    canvas_3d_window_name = 'Canvas 3D'
    cv2.namedWindow(canvas_3d_window_name)
    cv2.setMouseCallback(canvas_3d_window_name, Plotter3d.mouse_callback)

    file_path = None
    if file_path is None:
        file_path = os.path.join('data', 'extrinsics.json')
    with open(file_path, 'r') as f:
        extrinsics = json.load(f)
    R = np.array(extrinsics['R'], dtype=np.float32)
    t = np.array(extrinsics['t'], dtype=np.float32)

    frame_provider = ImageReader(args.images)
    is_video = False
    if args.video != '':
        frame_provider = VideoReader(args.video)
        is_video = True
    base_height = args.height_size
    fx = 1 # focal length

    delay = 1
    esc_code = 27
    p_code = 112
    space_code = 32
    mean_time = 0

    for frame in frame_provider:
        current_time = cv2.getTickCount()
        if frame is None:
            break
        input_scale = base_height / frame.shape[0]
        scaled_img = cv2.resize(frame, dsize=None, fx=input_scale, fy=input_scale)
        scaled_img = scaled_img[:, 0:scaled_img.shape[1] - (scaled_img.shape[1] % stride)]  # better to pad, but cut out for demo
        if fx < 0:  # Focal length is unknown
            fx = np.float32(0.8 * frame.shape[1])

        inference_result = net.infer(scaled_img)
        poses_3d, poses_2d = parse_poses(inference_result, input_scale, stride, fx, is_video)
        edges = []

        if len(poses_3d):
            poses_3d = rotate_poses(poses_3d, R, t)
            poses_3d_copy = poses_3d.copy()
            x = poses_3d_copy[:, 0::4]
            y = poses_3d_copy[:, 1::4]
            z = poses_3d_copy[:, 2::4]
            poses_3d[:, 0::4], poses_3d[:, 1::4], poses_3d[:, 2::4] = -z, x, -y

            poses_3d = poses_3d.reshape(poses_3d.shape[0], 19, -1)[:, :, 0:3]
            edges = (Plotter3d.SKELETON_EDGES + 19 * np.arange(poses_3d.shape[0]).reshape((-1, 1, 1))).reshape((-1, 2))

        plotter.plot(canvas_3d, poses_3d, edges)
        cv2.imshow(canvas_3d_window_name, canvas_3d)

        draw_poses(frame, poses_2d)
        current_time = (cv2.getTickCount() - current_time) / cv2.getTickFrequency()
        if mean_time == 0:
            mean_time = current_time
        else:
            mean_time = mean_time * 0.95 + current_time * 0.05
        cv2.putText(frame, 'FPS: {}'.format(int(1 / mean_time * 10) / 10),
                    (40, 80), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255))
        cv2.imshow('ICV 3D Human Pose Estimation', frame)

        key = cv2.waitKey(delay)
        if key == esc_code:
            break
        if key == p_code:
            if delay == 1:
                delay = 0
            else:
                delay = 1
        if delay == 0 or not is_video:  # allow to rotate 3D canvas while on pause
            key = 0
            while (key != p_code
                   and key != esc_code
                   and key != space_code):
                plotter.plot(canvas_3d, poses_3d, edges)
                cv2.imshow(canvas_3d_window_name, canvas_3d)
                key = cv2.waitKey(33)
            if key == esc_code:
                break
            else:
                delay = 1
        
        joint_angles = joint_angle_calculator.calculate_angles(poses_3d)
        if joint_angles:
            socket_server.send_data(joint_angles)