def video_to_images(videos, inDir, outDir):
    for video in videos:
        new_directory = os.path.join(outDir, os.path.splitext(video)[0])
        if not os.path.isdir(new_directory):
            os.mkdir(new_directory)
        videocap = cv.VideoCapture(os.path.join(inDir, video))
        success, image = videocap.read()
        count = 0

        while success:
            #code.interact(local=dict(globals(), **locals()))
            if "frame{}.jpg".format(count) in os.listdir(new_directory) and "frame{}.json".format(count) in os.listdir(new_directory):
                print("frame{}.jpg and frame{}.json already in directory".format(count, count))
                success, image = videocap.read()
                count += 1
                continue

            cv.imwrite(os.path.join(new_directory, "frame{:05d}.jpg".format(count)), image)
            print("{}: Saved frame {}".format(video, count))

            datum = op.Datum()
            datum.cvInputData = image
            opWrapper.emplaceAndPop(op.VectorDatum([datum]))

            json_file = os.path.join(new_directory, "frame{:05d}.json".format(count))
            with open(json_file, "w") as f:
                try:
                    json.dump(datum.poseKeypoints.tolist(), f)
                except:
                    pass

            success, image = videocap.read()
            count += 1
Exemple #2
0
 def getOpenposeDataFrom(self, frame=None):
     if (frame is None):
         raise Exception("NoneType passed into OP wrapper class")
     datum = op.Datum()
     datum.cvInputData = frame
     self.opWrapper.emplaceAndPop(op.VectorDatum([datum]))
     return datum
Exemple #3
0
    def get_sample_heatmaps():
        # These parameters are globally set. You need to unset variables set here if you have a new OpenPose object. See *
        params = dict()
        params["model_folder"] = "../../../models/"
        params["heatmaps_add_parts"] = True
        params["heatmaps_add_bkg"] = True
        params["heatmaps_add_PAFs"] = True
        params["heatmaps_scale"] = 3
        params["upsampling_ratio"] = 1
        params["body"] = 1

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image and get heatmap
        datum = op.Datum()
        imageToProcess = cv2.imread(args[0].image_path)
        datum.cvInputData = imageToProcess
        opWrapper.emplaceAndPop(op.VectorDatum([datum]))
        poseHeatMaps = datum.poseHeatMaps.copy()
        opWrapper.stop()

        return poseHeatMaps
def generate_keypoints(infile, show):

    datum = op.Datum()
    imageToProcess = cv2.imread(infile)
    datum.cvInputData = imageToProcess
    opWrapper.emplaceAndPop(op.VectorDatum([datum]))

    # print("Body keypoints: \n" + str(datum.poseKeypoints))
    print('Body keypoints shape:', datum.poseKeypoints.shape)
    # print('Image shape:', datum.cvOutputData.shape)

    image = datum.cvOutputData
    data = {}

    # show the keypoints of the FIRST person only
    # for x, y, score in datum.poseKeypoints[0]:
    #     if score != 0:
    #         image = cv2.circle(image, (int(x), int(y)), radius=5, color=(0, 255, 255), thickness=-1)

    if show:
        cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    else:
        outfile_data, outfile_pix = generate_outfile(infile=infile)

        data['keypoints'] = datum.poseKeypoints
        data['dimension'] = datum.cvOutputData.shape

        np.save(outfile_data, data)
        cv2.imwrite(outfile_pix, image)

        print('Saving data to', outfile_data)
        print('Saving pix to', outfile_pix)
Exemple #5
0
    def posemodl(image):
        datum = op.Datum()
        # imageToProcess = cv2.imread(args[0].image_path)
        datum.cvInputData = image
        opWrapper.emplaceAndPop(op.VectorDatum([datum]))

        # Display Image
        # print("Body keypoints: \n" + str(datum.poseKeypoints))
        # cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", datum.cvOutputData)
        # cv2.waitKey(0)
        kp = datum.poseKeypoints
        return kp, datum.cvOutputData
Exemple #6
0
    def body_from_image(self, image: np.ndarray) -> op.Datum:
        """
        画像から骨格を推定します.

        :param image: 画像
        :return: 推定データ
        """
        # 推定
        datum = op.Datum()
        datum.cvInputData = image
        self.op_wrapper.emplaceAndPop(op.VectorDatum([datum]))

        return datum
Exemple #7
0
def opFrameRun(frame, opWrapper):
    """Applique openpose sur une image et retourne l'object datum contenant :
    	- l'image avec le squelette
    	- les keypoints en numpy
    """
    datum = op.Datum()
    datum.cvInputData = frame
    #old version of pyopenpose library
    #opWrapper.emplaceAndPop([datum])

    #new version of pyopenpose library
    opWrapper.emplaceAndPop(op.VectorDatum([datum]))
    return datum
Exemple #8
0
def get_body_points(image_path):
    try:
        # Import Openpose (Windows/Ubuntu/OSX)
        dir_path = os.path.dirname(os.path.realpath('__file__'))
        try:
            # Windows Import
            if platform == "win32":
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append(dir_path + '/../../../python/openpose/Release')
                os.environ['PATH'] = os.environ[
                    'PATH'] + ';' + dir_path + '/../../../x64/Release;' + dir_path + '/../../../bin;'
                import pyopenpose as op
            else:
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('../../../python')
                # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
                # sys.path.append('/usr/local/python')
                from openpose import pyopenpose as op
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()
        params["model_folder"] = "../../../../models/"

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image
        datum = op.Datum()
        imageToProcess = cv2.imread(image_path)
        datum.cvInputData = imageToProcess
        opWrapper.emplaceAndPop(op.VectorDatum([datum]))

        # Display Image
        print("Body keypoints: \n" + str(datum.poseKeypoints))
        # cv2.imshow("OpenPose 1.6.0 - Tutorial Python API", datum.cvOutputData)
        # cv2.waitKey(0)
        return datum.poseKeypoints
    except Exception as e:
        print(e)
        sys.exit(-1)


# body_points("C:/Users/lab/Desktop/taskey/action_video_test/2.jpg")
Exemple #9
0
def getkey(img):  #获取关键节点坐标
    #旋转90度手机录像需要
    dst_im = cv2.flip(
        img, 0
    )  #原型:cv2.flip(src, flipCode[, dst]) → dst  flipCode表示对称轴 0:x轴  1:y轴.  -1:both
    dst_im = cv2.transpose(dst_im)
    im = cv2.resize(dst_im, (360, 640))  #设置为固定分辨率
    datum = op.Datum()
    #cv2.imshow('img',img)
    #cv2.imshow('dst_im',dst_im)
    #cv2.imshow('im',im)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()
    datum.cvInputData = im
    opWrapper.emplaceAndPop(op.VectorDatum([datum]))
    return datum.cvOutputData, datum.poseKeypoints  #返回值1为骨架图,返回值2为坐标
Exemple #10
0
def create_clips(file_name=None):
    # Starting OpenPose
    params = dict()
    params["model_folder"] = "/home/rkaufman/dev/openpose/models"
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()
    out_dir = "/home/rkaufman/Downloads/restaurant_footage_openposified"
    #while True:
    #    file_name = input("Next clip please.\n\t-->")
    for file_name in clip_paths[3:]:
        #while(True):
        #    file_name=input("next video please:\n\t-->")
        cap = cv2.VideoCapture(file_name)
        base, tail = os.path.split(file_name)
        codec = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
        outfile_name = os.path.join(
            out_dir, tail[:tail.find('.')] + "_openpose" + ".avi")
        input_fps = 30
        #        input_fps = cap.get(cv2.CAP_PROP_FPS)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        print("Writing to {}".format(outfile_name))
        out_vid = cv2.VideoWriter(outfile_name, codec, input_fps,
                                  (width, height))
        i = 0
        while (cap.isOpened()):
            ret, frame = cap.read()
            print("\titeration: {}".format(i))
            if (not ret): break
            datum = op.Datum()
            datum.cvInputData = frame
            opWrapper.emplaceAndPop(op.VectorDatum([datum]))
            op_image = datum.cvOutputData
            # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            out_vid.write(op_image)
            #cv2.imshow('frame', frame)
            #cv2.imshow('openpose', op_image)
            i += 1
        #   if cv2.waitKey(1) & 0xFF == ord('q'):
        #       break
        print("Successfully wrote to {}\n".format(outfile_name))

        cap.release()
        out_vid.release()
Exemple #11
0
def get_skeleton(bbox,frame, player_index, offset=15):

    x1,y1,x2,y2 = bbox.astype(int)
    height, width, _  = frame.shape
    # extract skeleton from bounding box
    if y1-offset>=0 and x1-offset>=0 and y2+offset<height and x2+offset<width:
        frame_ = frame[y1-offset:(y2)+offset,x1-offset:x2+offset, :]
    else:
        frame_ = frame[y1:y2,x1:x2, :]
    scale_percent = 8
    width = int(frame_.shape[1] * scale_percent / 10)
    height = int(frame_.shape[0] * scale_percent / 10)
    dim = (width, height)
    src_img = cv2.resize(frame_, dim, interpolation=cv2.INTER_AREA)
    datum.cvInputData = src_img
    #print(datum.cvOutputData.shape)
    opWrapper.emplaceAndPop(op.VectorDatum([datum]))
    #if datum.poseKeypoints is not None: print( datum.poseKeypoints[0,:,:])

    if datum.poseKeypoints is not None:
        cv2.imwrite("./results/test" + str(player_index) +"_"+ str("ss") + ".jpg",datum.cvOutputData)
        keypoints = datum.poseKeypoints[0,:,:].tolist()
        return keypoints
Exemple #12
0
            key = curr_item.replace('-', '')
            if key not in params: params[key] = "1"
        elif "--" in curr_item and "--" not in next_item:
            key = curr_item.replace('-', '')
            if key not in params: params[key] = next_item

    # Construct it from system arguments
    # op.init_argv(args[1])
    # oppython = op.OpenposePython()

    # Starting OpenPose
    opWrapper = op.WrapperPython(op.ThreadManagerMode.AsynchronousOut)
    opWrapper.configure(params)
    opWrapper.start()

    # Main loop
    userWantsToExit = False
    while not userWantsToExit:
        # Pop frame
        datumProcessed = op.VectorDatum()
        if opWrapper.waitAndPop(datumProcessed):
            if not args[0].no_display:
                # Display image
                userWantsToExit = display(datumProcessed)
            printKeypoints(datumProcessed)
        else:
            break
except Exception as e:
    print(e)
    sys.exit(-1)
Exemple #13
0
 def run_op(self, frame):
     self.datum.cvInputData = frame
     self.opWrp.emplaceAndPop(op.VectorDatum([self.datum]))  
     return self.datum.cvOutputData 
Exemple #14
0
def upload_file():
    if request.method == 'POST':
        # if 'file' not in request.files:
        #     flash('No file part')
        #     return redirect(request.url)
        #
        print(request.files.keys())
        print(request.files['data'])

        file = request.files['data']
        print(dir(file))
        print(file.filename)

        file.save(os.path.join(os.getcwd(), 'uploads.mp4'))

        try:
            try:
                import pyopenpose as op

            except ImportError as e:
                raise e

            try:
                import cv2

            except ImportError as e:
                raise e

            try:
                from test_video import SampleVideo

            except ImportError as e:
                raise e

            params = dict()
            params["model_folder"] = "C:\\openpose\\models\\"
            params["number_people_max"] = 1

            opWrapper = op.WrapperPython()
            opWrapper.configure(params)
            opWrapper.start()

            datum = op.Datum()
            video_path = os.path.join(os.getcwd(), 'uploads.mp4')

            cap = cv2.VideoCapture(video_path)
            print(cap.get(cv2.CAP_PROP_FRAME_COUNT))

            out_path = os.path.join(os.getcwd(), 'static/output.mp4')
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            out = cv2.VideoWriter(out_path, fourcc, 20.0,
                                  (int(cap.get(3)), int(cap.get(4))))

            for pos in range(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))):
                ret, frame = cap.read()

                datum.cvInputData = frame
                opWrapper.emplaceAndPop(op.VectorDatum([datum]))

                out.write(datum.cvOutputData)

                print(datum.poseKeypoints)
                print(cap.get(cv2.CAP_PROP_POS_FRAMES))

            cap.release()
            print('end')

        except Exception as e:
            print(dir(e))
            print(e)
            sys.exit(-1)
        # if file.filename == '':
        #     flash('No selected file')
        #     return redirect(request.url)

        # if file and allowed_file(file.filename):
        # filename = secure_filename(file.filename)
        # file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
        # return redirect(url_for('upload_file',
        #                         filename=filename))

        # return '''
        # <!doctype html>
        # <title>Upload new File</title>
        # <h1>Upload new File</h1>
        # <form method=post enctype=multipart/form-data>
        #     <input type=file name=file>
        #     <input type=submit value=Upload>
        # </form>
        # '''
        return "good"

    if request.method == 'GET':
        return '''
def train(trainloader, validationloader):
    ### Initialization
    params = list(cnn2.parameters()) + list(cnn1.parameters())
    optimizer = torch.optim.Adam(params, lr=0.0001)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100, 150], gamma=0.1)
    dict1 = {'loss': [], 'valLoss': [], 'valPSNR': [], 'epoch': -1}

    start = time.time()
    cLoss = dict1['loss']
    valLoss = dict1['valLoss']
    valPSNR = dict1['valPSNR']

    checkpoint_counter = 0

    for epoch in range(dict1['epoch'] + 1, 3):
        print("Epoch: ", epoch)

        # Append and reset
        cLoss.append([])
        valLoss.append([])
        valPSNR.append([])
        iLoss = 0

        # Increment scheduler count
        scheduler.step()

        for trainIndex, (trainData, trainFrameIndex) in enumerate(trainloader, 0):

            ## Getting the input and the target from the training set
            frame0, frameT, frame1 = trainData

            I0 = frame0.to(device)
            I1 = frame1.to(device)
            IFrame = frameT.to(device)

            optimizer.zero_grad()

            # Calculate flow between reference frames I0 and I1
            flowOut = cnn1(torch.cat((I0, I1), dim=1))

            # Extracting flows between I0 and I1 - F_0_1 and F_1_0
            F_0_1 = flowOut[:, :2, :, :]
            F_1_0 = flowOut[:, 2:, :, :]

            fCoeff = model.getFlowCoeff(trainFrameIndex, device)

            # Calculate intermediate flows
            F_t_0 = fCoeff[0] * F_0_1 + fCoeff[1] * F_1_0
            F_t_1 = fCoeff[2] * F_0_1 + fCoeff[3] * F_1_0

            # Get intermediate frames from the intermediate flows
            g_I0_F_t_0 = trainFlowBackWarp(I0, F_t_0)
            g_I1_F_t_1 = trainFlowBackWarp(I1, F_t_1)

            # Calculate optical flow residuals and visibility maps
            intrpOut = cnn2(torch.cat((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1))

            # Extract optical flow residuals and visibility maps
            F_t_0_f = intrpOut[:, :2, :, :] + F_t_0
            F_t_1_f = intrpOut[:, 2:4, :, :] + F_t_1
            V_t_0 = torch.nn.functional.sigmoid(intrpOut[:, 4:5, :, :])
            V_t_1 = 1 - V_t_0

            # Get intermediate frames from the intermediate flows
            g_I0_F_t_0_f = trainFlowBackWarp(I0, F_t_0_f)
            g_I1_F_t_1_f = trainFlowBackWarp(I1, F_t_1_f)

            wCoeff = model.getWarpCoeff(trainFrameIndex, device)

            # Calculate final intermediate frame
            Ft_p = (wCoeff[0] * V_t_0 * g_I0_F_t_0_f + wCoeff[1] * V_t_1 * g_I1_F_t_1_f) / (
                        wCoeff[0] * V_t_0 + wCoeff[1] * V_t_1)

            interpolated_keypoints = []
            intermediate_keypoints = []

            for batchNum in range(2):
                try:
                    TP(IFrame[batchNum].cpu().detach()).save("intermediate{}.jpg".format(batchNum))
                    datum = op.Datum()
                    image = cv.imread("./intermediate{}.jpg".format(batchNum))
                    datum.cvInputData = image
                    opWrapper.emplaceAndPop(op.VectorDatum([datum]))
                    if datum.poseKeypoints is None:
                        keypoints = np.zeros([25, 3], dtype=float)
                    else:
                        keypoints = datum.poseKeypoints[0]

                    intermediate_keypoints.append([keypoints])
                except:
                    pass

            for batchNum in range(2):
                try:
                    TP(Ft_p[batchNum].cpu().detach()).save("interpolated{}.jpg".format(batchNum))
                    datum = op.Datum()
                    image = cv.imread("./interpolated{}.jpg".format(batchNum))
                    datum.cvInputData = image
                    opWrapper.emplaceAndPop(op.VectorDatum([datum]))
                    if datum.poseKeypoints is None:
                        keypoints = np.zeros([25, 3], dtype=float)
                    else:
                        keypoints = datum.poseKeypoints[0]

                    interpolated_keypoints.append([keypoints])
                except:
                    pass

            intermediate_keypoints = torch.tensor(intermediate_keypoints).float().to(device)
            interpolated_keypoints = torch.tensor(interpolated_keypoints).float().to(device)

            # Loss
            recnLoss = L1_lossFn(Ft_p, IFrame)

            prcpLoss = MSE_LossFn(vgg16_conv(Ft_p), vgg16_conv(IFrame))

            warpLoss = L1_lossFn(g_I0_F_t_0, IFrame) + L1_lossFn(g_I1_F_t_1, IFrame) + L1_lossFn(
                trainFlowBackWarp(I0, F_1_0), I1) + L1_lossFn(trainFlowBackWarp(I1, F_0_1), I0)

            loss_smooth_1_0 = torch.mean(torch.abs(F_1_0[:, :, :, :-1] - F_1_0[:, :, :, 1:])) + torch.mean(
                torch.abs(F_1_0[:, :, :-1, :] - F_1_0[:, :, 1:, :]))
            loss_smooth_0_1 = torch.mean(torch.abs(F_0_1[:, :, :, :-1] - F_0_1[:, :, :, 1:])) + torch.mean(
                torch.abs(F_0_1[:, :, :-1, :] - F_0_1[:, :, 1:, :]))
            loss_smooth = loss_smooth_1_0 + loss_smooth_0_1

            keypointsLoss = MSE_LossFn(interpolated_keypoints, intermediate_keypoints)
            # code.interact(local=dict(globals(), **locals()))
            # Total Loss - Coefficients 204 and 102 are used instead of 0.8 and 0.4
            # since the loss in paper is calculated for input pixels in range 0-255
            # and the input to our network is in range 0-1
            loss = 163 * recnLoss + 82 * warpLoss + 0.004 * prcpLoss + 0.8 * loss_smooth + 0.001 * keypointsLoss
            print("trainIndex: {}, loss: {}".format(trainIndex, loss))

            # Backpropagate
            loss.backward()
            optimizer.step()
            iLoss += loss.item()

            # Validation and progress every 50 iterations
            if ((trainIndex % 50) == 49):
                end = time.time()

                psnr, vLoss, valImg1, valImg2 = validate(validationloader)

                valPSNR[epoch].append(psnr)
                valLoss[epoch].append(vLoss)

                # Tensorboard
                itr = trainIndex + epoch * (len(trainloader))

                writer.add_scalars('Loss', {'trainLoss': iLoss / 50,
                                            'validationLoss': vLoss}, itr)
                writer.add_scalar('PSNR', psnr, itr)

                writer.add_image('Validation1', valImg1, itr)
                writer.add_image('Validation2', valImg2, itr)

                #####

                endVal = time.time()

                print(
                    " Loss: %0.6f  Iterations: %4d/%4d  TrainExecTime: %0.1f  ValLoss:%0.6f  ValPSNR: %0.4f  ValEvalTime: %0.2f LearningRate: %f" % (
                    iLoss / 50, trainIndex, len(trainloader), end - start, vLoss, psnr, endVal - end,
                    get_lr(optimizer)))

                cLoss[epoch].append(iLoss / 50)
                iLoss = 0
                start = time.time()

        # Create checkpoint after every 3 epochs
        if (not os.path.isdir('./checkpoints')):
            os.mkdir('./checkpoints')
        if ((epoch % 3) == 2):
            dict1 = {
                'Detail': "End to end Super SloMo.",
                'epoch': epoch,
                'timestamp': datetime.datetime.now(),
                'trainBatchSz': 2,
                'validationBatchSz': 2,
                'learningRate': get_lr(optimizer),
                'loss': cLoss,
                'valLoss': valLoss,
                'valPSNR': valPSNR,
                'state_dictFC': cnn1.state_dict(),
                'state_dictAT': cnn2.state_dict(),
            }
            torch.save(dict1, "./checkpoints/Interpolation" + str(checkpoint_counter) + ".ckpt")
            checkpoint_counter += 1
Exemple #16
0
def upload_file():
    # result를 디비에 저장하기 위해 jwt 로 본인확인
    current_user = get_jwt_identity()
    print(current_user, '님이 분석을 요청하였습니다')

    # 현재 시간
    # 몽고 디비에 data 업데이트 할 때 사용
    now = datetime.now()
    # 이미지를 저장할 이름을 현재 시간으로 설정 XXXX.XXXX
    image_save_name = datetime.timestamp(now)

    file = request.files['video']
    video_path = os.path.join(VIDEO_SAVE_PATH, f"{str(image_save_name)}.mp4")
    # print('현재경로는: ', os.getcwd())
    # print('비디오 파일 저장 경로는: ', video_path)
    file.save(video_path)
    print('비디오 저장')
    """
    -----------------------
    골프 db 에서 모델을 가져온다
    -----------------------
    """
    print('golfdb 시작')
    ds = SampleVideo(video_path,
                     transform=transforms.Compose([
                         ToTensor(),
                         Normalize([0.485, 0.456, 0.406],
                                   [0.229, 0.224, 0.225])
                     ]))

    dl = DataLoader(ds, batch_size=1, shuffle=False, drop_last=False)

    model = EventDetector(pretrain=True,
                          width_mult=1.,
                          lstm_layers=1,
                          lstm_hidden=256,
                          bidirectional=True,
                          dropout=False)

    save_dict = torch.load('models/golfdb/weight/swingnet_1800.pth.tar')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('Using device:', device)
    model.load_state_dict(save_dict['model_state_dict'])
    model.to(device)
    model.eval()
    print("Loaded model weights")

    seq_length = 64

    print('Testing...')
    for sample in dl:
        images = sample['images']
        # full samples do not fit into GPU memory so evaluate sample in 'seq_length' batches
        batch = 0
        while batch * seq_length < images.shape[1]:
            if (batch + 1) * seq_length > images.shape[1]:
                image_batch = images[:, batch * seq_length:, :, :, :]
            else:
                image_batch = images[:, batch * seq_length:(batch + 1) *
                                     seq_length, :, :, :]
            logits = model(image_batch.cuda())
            if batch == 0:
                probs = F.softmax(logits.data, dim=1).cpu().numpy()
            else:
                probs = np.append(probs,
                                  F.softmax(logits.data, dim=1).cpu().numpy(),
                                  0)
            batch += 1

    events = np.argmax(probs, axis=0)[:-1]
    print('Predicted event frames: {}'.format(events))
    """
    cv2 비디오 캡쳐 오픈
    """
    cap = cv2.VideoCapture(video_path)

    confidence = []
    for i, e in enumerate(events):
        confidence.append(probs[e, i])
    print('Condifence: {}'.format([np.round(c, 3) for c in confidence]))
    """
        openpose 객체 오픈
        및 파라미터 설정
        """
    params = dict()
    params["model_folder"] = "C:\\openpose\\models\\"
    params["number_people_max"] = 1
    params["net_resolution"] = "-1x240"

    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    datum = op.Datum()

    # 각 프레임의 키포인트를 dict 로 모음
    key_data = dict()

    for i, e in enumerate(events):
        cap.set(cv2.CAP_PROP_POS_FRAMES, e)
        _, img = cap.read()
        # cv2.putText(img, '{:.3f}'.format(confidence[i]), (20, 20), cv2.FONT_HERSHEY_DUPLEX, 0.75, (0, 0, 255))

        # golfdb 가 뽑아낸 이미지를 op 객체에
        datum.cvInputData = img
        opWrapper.emplaceAndPop(op.VectorDatum([datum]))

        # yolo 로 뽑은 club head 데이터 추가
        club_head_points, confiedences = yolo(frame=img,
                                              size=416,
                                              score_threshold=0.3,
                                              nms_threshold=0.3)
        x = club_head_points[0]
        y = club_head_points[1]
        w = club_head_points[2]
        h = club_head_points[3]
        club_head_list = np.array([x, y, confiedences])

        _kp = datum.poseKeypoints[0]
        key_data[i] = np.append(_kp, [club_head_list], axis=0)

        # 오픈포즈 outputData에 클럽 헤드 좌표도 추가
        output_image = cv2.rectangle(datum.cvOutputData, (x, y),
                                     (x + w, y + h), (0, 0, 255), 2)

        cv2.imwrite(f'{IMAGE_SAVE_PATH}{image_save_name}_{str(i)}.png',
                    output_image)

    cap.release()

    # 뽑아낸 키포인트를 분석
    swing_anal = Anal.Anal(key_data, face_on=True)

    result = swing_anal.check_all()

    # result에 이미지가 저장되는 경로 넣어줌
    result["filePath"] = image_save_name

    # 결과 데이터에 업로드 날짜 추가
    upload_date = now.strftime('%Y-%m-%d')
    result["date"] = upload_date
    # col.update_one(
    #     { "email": current_user},
    #     { "$push": {
    #             f"swingData": json.dumps(result, ensure_ascii=False)
    #         }
    #     }
    # )
    col.update_one(
        {"email": current_user},
        {"$push": {
            "swingData": json.dumps(result, ensure_ascii=False)
        }})
    # print(json.dumps(result))
    print(f'{current_user} : {upload_date}, 스윙 분석 업데이트')

    # 분석 횟 수가 일정 횟수 이상이면 뱃지 발급
    current_user_doc = col.find_one({"email": current_user})
    counts = len(current_user_doc["swingData"])
    print('분석 횟수는: ', counts)
    if counts == 1:
        col.update_one({'email': current_user},
                       {'$push': {
                           'badges': 'analysis_1'
                       }})
    if counts == 10:
        col.update_one({"email": current_user},
                       {"$push": {
                           "badges": "analysis_10"
                       }})
    if counts == 30:
        col.update_one({"email": current_user},
                       {"$push": {
                           "badges": "analysis_50"
                       }})
    if counts == 50 and 'analysis_50' not in current_user_doc["badges"]:
        col.update_one({"email": current_user},
                       {"$push": {
                           "badges": "analysis_50"
                       }})

    # return json.dumps(result, cls=MyEncoder)
    return result
Exemple #17
0
    def start(self):
        # # load openpose python api
        # if self.arg.openpose is not None:
        #     sys.path.append('{}/python'.format(self.arg.openpose))
        #     sys.path.append('{}/build/python'.format(self.arg.openpose))
        #     sys.path.append('{}/build/python/openpose'.format(self.arg.openpose))
        #     sys.path.append('{}/build/example/openpose'.format(self.arg.openpose))
        #     sys.path.append('{}/build/python/openpose/Release'.format(self.arg.openpose))
        #     sys.path.append('{}/build/x64/Release'.format(self.arg.openpose))
        #     sys.path.append('{}/build/bin'.format(self.arg.openpose))
        # try:
        #     #from openpose import pyopenpose as op
        #     import pyopenpose as op
        # except:
        #     print('Can not find Openpose Python API.')
        #     return
        dir_path = os.path.dirname(os.path.realpath(__file__))
        try:
            sys.path.append(dir_path +
                            '/../../openpose/build/python/openpose/Release')
            os.environ['PATH'] = os.environ[
                'PATH'] + ';' + dir_path + '/../../openpose/build/x64/Release;' + dir_path + '/../../openpose/build/bin;'
            import pyopenpose as op
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e

        video_name = self.arg.video.split('/')[-1].split('.')[0]
        label_name_path = './resource/kinetics_skeleton/label_name.txt'
        with open(label_name_path) as f:
            label_name = f.readlines()
            label_name = [line.rstrip() for line in label_name]
            self.label_name = label_name

        # initiate
        opWrapper = op.WrapperPython()
        params = dict(model_folder='./models', model_pose='COCO')
        opWrapper.configure(params)
        opWrapper.start()
        self.model.eval()
        pose_tracker = naive_pose_tracker()

        if self.arg.video == 'camera_source':
            video_capture = cv2.VideoCapture(0)
        else:
            video_capture = cv2.VideoCapture(self.arg.video)

        # start recognition
        start_time = time.time()
        frame_index = 0
        while (True):

            tic = time.time()

            # get image
            ret, orig_image = video_capture.read()
            if orig_image is None:
                break
            source_H, source_W, _ = orig_image.shape
            orig_image = cv2.resize(orig_image,
                                    (256 * source_W // source_H, 256))
            H, W, _ = orig_image.shape

            # pose estimation
            datum = op.Datum()
            datum.cvInputData = orig_image
            # opWrapper.emplaceAndPop([datum])
            opWrapper.emplaceAndPop(op.VectorDatum([datum]))
            multi_pose = datum.poseKeypoints  # (num_person, num_joint, 3) 关节点数据
            # if (multi_pose is None) | (len(multi_pose.shape) != 3):  # 是否三维数组
            if multi_pose is None:  # 是否三维数组
                continue

            # normalization
            multi_pose[:, :, 0] = multi_pose[:, :, 0] / W
            multi_pose[:, :, 1] = multi_pose[:, :, 1] / H
            multi_pose[:, :, 0:2] = multi_pose[:, :, 0:2] - 0.5
            multi_pose[:, :, 0][multi_pose[:, :, 2] == 0] = 0
            multi_pose[:, :, 1][multi_pose[:, :, 2] == 0] = 0

            # pose tracking
            if self.arg.video == 'camera_source':
                frame_index = int(
                    (time.time() - start_time) * self.arg.model_fps)
            else:
                frame_index += 1
            pose_tracker.update(multi_pose, frame_index)
            data_numpy = pose_tracker.get_skeleton_sequence()
            data = torch.from_numpy(data_numpy)
            data = data.unsqueeze(0)
            data = data.float().to(
                self.dev).detach()  # (1, channel, frame, joint, person)

            # model predict
            voting_label_name, video_label_name, output, intensity = self.predict(
                data)

            # visualization
            app_fps = 1 / (time.time() - tic)
            image = self.render(data_numpy, voting_label_name,
                                video_label_name, intensity, orig_image,
                                app_fps)
            cv2.imshow("ST-GCN", image)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
def detect_skeleton(file_name):
    try:
        # Import Openpose (Windows/Ubuntu/OSX)
        try:
            # Windows Import
            if platform == "win32":
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('../../openpose/build/python/openpose/Release')
                os.environ['PATH'] = os.environ[
                    'PATH'] + ';' + '../../openpose/build/x64/Release;' + '../../openpose/build/bin'
                import pyopenpose as op
            else:
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('../../openpose/build/python')
                # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
                # sys.path.append('/usr/local/python')
                from openpose import pyopenpose as op
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e

        # Flags
        parser = argparse.ArgumentParser()
        parser.add_argument("--video_path",
                            default=f"../media/{file_name}.mp4",
                            help="Read input video (avi, mp4).")
        parser.add_argument("--no_display",
                            default=False,
                            help="Enable to disable the visual display.")
        args = parser.parse_known_args()

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()
        params["model_folder"] = "../../openpose/models/"
        params["disable_multi_thread"] = "false"
        numberGPUs = 1

        # Add others in path?
        for i in range(0, len(args[1])):
            curr_item = args[1][i]
            if i != len(args[1]) - 1:
                next_item = args[1][i + 1]
            else:
                next_item = "1"
            if "--" in curr_item and "--" in next_item:
                key = curr_item.replace('-', '')
                if key not in params: params[key] = "1"
            elif "--" in curr_item and "--" not in next_item:
                key = curr_item.replace('-', '')
                if key not in params: params[key] = next_item

        # Construct it from system arguments
        # op.init_argv(args[1])
        # oppython = op.OpenposePython()

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Video
        datum = op.Datum()
        cap = cv2.VideoCapture(args[0].video_path)
        set_frame_size(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                       int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))

        # 관절을 입힌 동영상 생성
        # fourcc = cv2.VideoWriter_fourcc('D', 'I', 'V', 'X')
        # out = cv2.VideoWriter(f'../output/output/video/{file_name}.avi', fourcc, 20.0, get_frame_size())

        frame_data = []
        frame_id = -1

        while cap.isOpened():
            frame_id += 1

            grabbed, frame = cap.read()

            if frame is None or not grabbed:
                print("Finish reading video frames...")
                break

            datum.cvInputData = frame
            opWrapper.emplaceAndPop(op.VectorDatum([datum]))
            # print("Body keypoints: \n" + str(datum.poseKeypoints))

            # 동영상 저장
            #out.write(datum.cvOutputData)

            # cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", datum.cvOutputData)
            # cv2.waitKey(1)

            # save the keypoint as a list
            frame_data.append(make_json(datum, frame_id))

        else:
            print('cannot open the file')

        # show list as json
        with open(f'../output/json/output{file_name}.json',
                  'w',
                  encoding="utf-8") as make_file:
            json.dump(frame_data, make_file, ensure_ascii=False, indent="\t")

        cap.release()
        # out.release()
        # cv2.destroyAllWindows()
    except Exception as e:
        print(e)
        sys.exit(-1)

    return frame_data
Exemple #19
0
def video_start():
    global SSF_Wait, SLL_Wait, Pose, n, db, canvas, video_flag, cap, sx, sy, cursor, fill_line, canvas3, V
    '''配置模型'''
    params = dict()  # 创建一个字典
    params["model_folder"] = model  # 修改路径
    params["model_pose"] = "BODY_25"  # 选择pose模型
    params["number_people_max"] = 2  # 最大检测人体数
    params["display"] = 2  # 2D模式

    opWrapper = op.WrapperPython()
    opWrapper.configure(params)  # 导入上述参数
    opWrapper.start()
    '''配置模型'''
    cap = cv2.VideoCapture(0)  # 打开摄像头
    Initial = 0
    video_flag = True
    week = str(strftime("%A", localtime()))
    while video_flag:  # 判断是否开启camera
        SSF_Wait = 5
        SLL_Wait = 2  # 加快video速度
        cap.open(0)
        ret, Frame = cap.read()
        H = round(0.63 * sy)
        W = round(0.906 * sy)
        Frame = cv2.resize(Frame, (W, H))  # 图像尺寸变换
        imageToProcess = cv2.cvtColor(Frame, cv2.COLOR_RGB2BGR)  # RGB转换为BGR
        BGR = imageToProcess

        '''计算关节点'''
        datum = op.Datum()
        datum.cvInputData = imageToProcess
        opWrapper.emplaceAndPop(op.VectorDatum([datum]))

        # print("Body keypoints: \n" + str(datum.poseKeypoints))
        Pose = datum.poseKeypoints  # 关键点存在的地方,若无人像,输出None
        # BGR = datum.cvOutputData  # imageToProcess 已识别后的图像
        '''计算关节点'''

        '''初值'''
        if Initial == 0:  # 赋初值
            '''SSF'''
            n = 0  # 默认user=0
            j = 0
            j1 = 0
            j2 = 0
            j3 = 0  # 4种状态
            SSF = 0  # 计数
            SSF_Angle = 270  # 实际角度
            SSF_Max = 270  # 抬手最大值
            SSF_Time = 0  # 测量间隔
            SSF_Num = 3  # 间隔数
            SSF_Hold = 0  # hold时间
            SSF_Flag = 0  # 计时阀门关闭
            SSF_start = 0  # 开始时间
            SSF_Deviation = 0  # 0.18s角度差值
            SSF_Sum = 0  # 角度差值和
            hip_angle = 90  # hip角度
            helper = 'None'
            string2 = 'None'
            comps = 0  # 补偿角
            SSF_Row = 1  # 第1行开始记录
            '''SLL'''
            i = 0
            i1 = 0
            i2 = 0
            i3 = 0
            SLL = 0
            SLL_Angle = 120  # 默认120为开始lift leg
            SLL_Max = 90
            SLL_Time = 0  # 测量间隔
            SLL_Num = 3  # 间隔数
            SLL_Hold = 0  # hold时间
            SLL_Flag = 0  # 计时阀门关闭
            SLL_start = 0  # 开始时间
            SLL_Deviation = 0  # 0.18s角度差值
            SLL_Sum = 0  # 角度差值和
            SLL_Row = 1  # 第1行开始记录
            SLL_string2 = 'None'
            # 实际角度
            V = 1
            Initial = 1  # 赋值完毕
        '''初值'''

        '''模型测量'''
        if str(Pose) != 'None':

            '''Helper'''
            if Pose.shape[0] == 1:
                n = 0  # Only one person
                helper = 'Yes!'
            else:
                if Pose[1][8][2] != 0 and Pose[1][9][2] != 0 and Pose[1][12][2] != 0:
                    if Pose[0][8][2] != 0 and Pose[0][9][2] != 0 and Pose[0][12][2] != 0:
                        if Pose[0][8][1] > Pose[1][8][1] and Pose[0][9][1] > Pose[1][9][1] and Pose[0][12][1] > \
                                Pose[1][12][1]:
                            n = 0  # 0先生是坐着的
                        else:
                            n = 1
                    else:
                        n = 1
                else:
                    n = 0
                helper = 'No'
            '''Helper'''  # n = user

            '''Compensation'''
            # neck_angle = round(Angle(5, 1, 8), 0)
            comps = np.arctan(abs((Pose[n][1][0] - Pose[n][8][0])) / (Pose[n][8][1] - Pose[n][1][1])) * 180 / np.pi
            comps = round(comps, 0)
            if comps < 10:
                string2 = 'Straight!'
                comps = 0
            else:
                if Pose[n][1][0] > Pose[n][8][0]:
                    string2 = 'Left Comps! ' + str(comps)
                else:
                    string2 = 'Right Comps! ' + str(abs(comps))

            if SSF_Max > Angle(1, 2, 3) and j == 1 and j1 == 1 and j2 == 1 and j3 == 1:
                SSF_Max = round(Angle(1, 2, 3), 0)  # 抬手时找角度最小
                # real_angle > Angle(1, 2, 3) - (180 - hip_angle - neck_angle)
            '''Compensation'''

            '''SSF'''
            if Pose[n][4][2] != 0 and Pose[n][2][2] != 0 and Pose[n][1][2] != 0 and Pose[n][3][2] != 0:
                '''Num'''
                SSF_Deviation = abs(SSF_Angle - Angle(1, 2, 3))
                SSF_Angle = Angle(1, 2, 3)
                if Pose[n][4][1] < Pose[n][2][1]:  # hands up
                    j3 = 1
                else:  # hands down
                    j3 = 0
                if j == 0 and j1 == 0 and j2 == 1 and j3 == 1:  # 0011抬手
                    SSF = SSF + 1
                j = j1
                j1 = j2
                j2 = j3
                '''Num'''

                '''Hold time'''
                if j == 1 and j1 == 1 and j2 == 1 and j3 == 1:  # 1111 hands up
                    SSF_Time = SSF_Time + 1  # 单位0.18s
                    if SSF_Time % SSF_Num == 0:  # 开始判定
                        if SSF_Sum / SSF_Num < 6:  # if hold?
                            if SSF_Flag == 0:
                                SSF_start = time()
                                SSF_Flag = 1
                        else:
                            SSF_Flag = 0
                        SSF_Sum = 0
                    else:  # 角度均值
                        SSF_Sum = SSF_Deviation + SSF_Sum
                else:
                    SSF_Flag = 0
                if SSF_Flag == 1 and round(time() - SSF_start, 2) > 1:  # 找到最后的hold值
                    SSF_Hold = round(time() - SSF_start, 2)
                '''Hold time'''

                '''Fill in database'''
                if j == 1 and j1 == 1 and j2 == 0 and j3 == 0:  # 1100放手
                    sql = "INSERT INTO " + week + "(SSF_Times, SSF_Angle, SSF_Hold, SSF_Comps ,SSF_DIY) VALUES (%s, %s, " \
                                                  "%s, %s, %s); "
                    cursor.execute(sql, (SSF_Row, 270 - SSF_Max, SSF_Hold, comps, helper))
                    db.commit()
                    SSF_Row = SSF_Row + 1
                    SSF_Max = 270
                    SSF_Hold = 0
                '''Fill in database'''

            else:
                BGR = cv2.putText(BGR, 'Move left', (5, H - 90), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)
            '''SSF'''
            ###################################################################################################
            '''Sitting Leg Lift'''
            if Pose[n][9][2] != 0 and Pose[n][10][2] != 0 and Pose[n][11][2] != 0:

                '''COMPS'''
                SLL_comps = np.arctan(
                    abs((Pose[n][9][1] - Pose[n][10][1]) / (Pose[n][10][0] - Pose[n][9][0]))) * 180 / np.pi
                SLL_comps = round(SLL_comps, 0)
                if SLL_comps < 15:
                    SLL_string2 = 'Horizontal'
                    SLL_comps = 0
                else:
                    SLL_string2 = 'Comps! ' + str(abs(SLL_comps))
                '''COMPS'''
                '''Num'''
                SLL_Deviation = SLL_Angle - Angle(9, 10, 11)
                SLL_Angle = Angle(9, 10, 11)
                if SLL_Max < SLL_Angle:
                    SLL_Max = round(SLL_Angle, 0)
                if SLL_Angle > 120:  # leg up
                    i3 = 1
                else:  # leg down
                    i3 = 0
                if i == 0 and i1 == 0 and i2 == 1 and i3 == 1:  # 0011抬脚
                    SLL = SLL + 1
                i = i1
                i1 = i2
                i2 = i3
                '''Num'''

                '''Hold time'''
                if i == 1 and i1 == 1 and i2 == 1 and i3 == 1:  # 1111 hands up
                    SLL_Time = SLL_Time + 1  # 单位0.18s
                    if SLL_Time % SLL_Num == 0:  # 开始判定
                        if SLL_Sum / SSF_Num < 6:  # if hold?
                            if SLL_Flag == 0:
                                SLL_start = time()
                                SLL_Flag = 1
                        else:
                            SLL_Flag = 0
                        SLL_Sum = 0
                    else:  # 角度偏差均值
                        SLL_Sum = SLL_Deviation + SLL_Sum
                else:
                    SLL_Flag = 0
                if SLL_Flag == 1 and round(time() - SLL_start, 2) > 1:  # 找到最后的hold值
                    SLL_Hold = round(time() - SLL_start, 2)
                '''Hold time'''

                '''Fill in database'''
                if i == 1 and i1 == 1 and i2 == 0 and i3 == 0:  # 1100放腿
                    sql = "INSERT INTO " + week + "(SLL_Times, SLL_Angle, SLL_Hold, SLL_Comps ,SLL_DIY) VALUES (%s, %s, " \
                                                  "%s, %s, %s); "
                    cursor = db.cursor()
                    cursor.execute(sql, (SLL_Row, SLL_Max, SLL_Hold, SLL_comps, helper))
                    db.commit()
                    SLL_Row = SLL_Row + 1
                    SLL_Max = 90
                    SLL_Hold = 0
                '''Fill in database'''

            else:
                BGR = cv2.putText(BGR, 'Move left', (5, H - 90), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)

            '''SLL'''
        else:
            BGR = cv2.putText(BGR, 'Move into camera', (5, H - 60), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 2)

        '''mirror_feedback'''
        string0 = 'SSF: ' + str(SSF) + ' Hold:' + str(SSF_Hold) + 's'
        BGR = cv2.putText(BGR, string0, (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)
        BGR = cv2.putText(BGR, 'Angle: ' + str(270 - SSF_Max), (5, 70), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)
        BGR = cv2.putText(BGR, string2, (5, 110), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)
        BGR = cv2.putText(BGR, helper, (5, 150), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)
        ############################################################################################
        SLL_string0 = 'SLL: ' + str(SLL) + ' Hold:' + str(SLL_Hold) + 's'
        BGR = cv2.putText(BGR, SLL_string0, (W - 350, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 2)
        BGR = cv2.putText(BGR, 'Angle: ' + str(SLL_Max), (W - 350, 70), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 2)
        BGR = cv2.putText(BGR, SLL_string2, (W - 350, 110), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 2)
        BGR = cv2.putText(BGR, helper, (W - 350, 150), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 2)

        '''控件显示'''
        # mirror feedback
        RGB = Image.fromarray(BGR)
        RGB = ImageTk.PhotoImage(RGB)
        canvas.create_image(0, 0, anchor='nw', image=RGB)  # 刷新图片,nw表示从西北角开始排列图片
        canvas.bg = RGB
        text2.set('Camera Open!')
        # times
        print(SSF, V)
        if int(SSF) < int(V):
            text3.set(str(SSF) + ' / ' + str(V))
        else:
            text3.set('Finish!')
        # progress bar
        if j == 1 and j1 == 1 and j2 == 1 and j3 == 1:  # hands up
            k = 0.4 * sx / 90  # 总长度0.4*sx
            n = k * (180 - SSF_Max)
        else:
            n = 0
        canvas3.coords(fill_line, (0, 0, n, 60))
        window.update()
        cv2.waitKey(1)
Exemple #20
0
    def pose_estimation(self):
        # load openpose python api
        if self.arg.openpose is not None:
            # sys.path.append('{}/python'.format(self.arg.openpose))
            # sys.path.append('{}/build/python'.format(self.arg.openpose))
            # print(self.arg.openpose + '/build/python/openpose/Release')
            sys.path.append(self.arg.openpose +
                            '/build/python/openpose/Release')
            # sys.path.append(self.arg.openpose + '\\build\\x64\\Release')
            # sys.path.append(self.arg.openpose + '\\build\\bin')

            os.environ['PATH'] = os.environ[
                'PATH'] + ';' + self.arg.openpose + '/build/x64/Release;' + self.arg.openpose + '/build/bin;'
            print(sys.path)
        try:
            # from openpose import pyopenpose as op
            import pyopenpose as op
        except:
            print('Can not find Openpose Python API.')
            return

        video_name = self.arg.video.split('/')[-1].split('.')[0]

        # initiate
        opWrapper = op.WrapperPython()
        params = dict(model_folder='./models', model_pose='COCO')
        opWrapper.configure(params)
        opWrapper.start()
        self.model.eval()
        video_capture = cv2.VideoCapture(self.arg.video)
        video_length = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
        pose_tracker = naive_pose_tracker(data_frame=video_length)

        # pose estimation
        start_time = time.time()
        frame_index = 0
        video = list()
        pose_list = []
        pbar = tqdm.tqdm(total=video_length)
        while (True):

            # get image
            ret, orig_image = video_capture.read()
            if orig_image is None:
                break
            source_H, source_W, _ = orig_image.shape
            orig_image = cv2.resize(orig_image,
                                    (self.arg.shortedge, self.arg.shortedge *
                                     source_H // source_W))
            H, W, _ = orig_image.shape
            video.append(orig_image)

            # pose estimation
            datum = op.Datum()
            datum.cvInputData = orig_image
            opWrapper.emplaceAndPop(op.VectorDatum([datum]))
            multi_pose = datum.poseKeypoints  # (num_person, num_joint, 3)
            if len(multi_pose.shape) != 3:
                continue
            my_pose = multi_pose.copy()
            my_pose = self.__get_prior_person(my_pose)
            pose_list.append(my_pose)
            # normalization
            multi_pose[:, :, 0] = multi_pose[:, :, 0] / W
            multi_pose[:, :, 1] = multi_pose[:, :, 1] / H
            multi_pose[:, :, 0:2] = multi_pose[:, :, 0:2] - 0.5
            multi_pose[:, :, 0][multi_pose[:, :, 2] == 0] = 0
            multi_pose[:, :, 1][multi_pose[:, :, 2] == 0] = 0

            # pose tracking
            pose_tracker.update(multi_pose, frame_index)
            frame_index += 1
            pbar.update(1)
            # print('Pose estimation ({}/{}).'.format(frame_index, video_length))

        data_numpy = pose_tracker.get_skeleton_sequence()
        pbar.close()
        # np.save('stgcn_data_numpy_original.npy', data_numpy)
        return video, data_numpy, pose_list
Exemple #21
0
def toCsv(file_path):
    # Import Openpose (Windows/Ubuntu/OSX)
    dir_path = os.path.dirname(os.path.realpath(__file__))
    try:
        # Windows Import
        if platform == "win32":
            # Change these variables to point to the correct folder (Release/x64 etc.)
            sys.path.append(dir_path + '/../../python/openpose/Release')
            os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + \
                '/../../x64/Release;' + dir_path + '/../../bin;'
            import pyopenpose as op
        else:
            # Change these variables to point to the correct folder (Release/x64 etc.)
            sys.path.append('/home/aleix/openpose/build/python')
            # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
            # sys.path.append('/usr/local/python')
            from openpose import pyopenpose as op
    except ImportError as e:
        print(
            'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
        )
        raise e
    # Flags
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--video_path",
        default=file_path,
        help=
        "Process an image. Read all standard formats (jpg, png, bmp, etc.).")
    args = parser.parse_known_args()

    # Custom Params (refer to include/openpose/flags.hpp for more parameters)
    params = dict()
    params["model_folder"] = "/home/aleix/openpose/models/"

    # Add others in path?
    for i in range(0, len(args[1])):
        curr_item = args[1][i]
        if i != len(args[1]) - 1:
            next_item = args[1][i + 1]
        else:
            next_item = "1"
        if "--" in curr_item and "--" in next_item:
            key = curr_item.replace('-', '')
            if key not in params:
                params[key] = "1"
        elif "--" in curr_item and "--" not in next_item:
            key = curr_item.replace('-', '')
            if key not in params:
                params[key] = next_item

    try:

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image
        datum = op.Datum()
        imageToProcess = cv2.VideoCapture(args[0].video_path)
        arrayNoNorm = np.array(0)
        totalFrames = int(imageToProcess.get(cv2.CAP_PROP_FRAME_COUNT))
        target = cutFps(totalFrames)
        counter = 0
        framesRet = 0

        # 1. Guardem totes les posicions del vídeo en una array.
        while (imageToProcess.isOpened()):
            if counter == target:
                ret, frame = imageToProcess.read()
                if ret == True:
                    datum.cvInputData = frame
                    opWrapper.emplaceAndPop(op.VectorDatum([datum]))

                    # Display Image
                    kp_array = np.array(datum.poseKeypoints)

                    # Treiem els outliers
                    nou_array = np.delete(kp_array[0], [3, 4, 15, 17, 22, 23],
                                          0)

                    arrayNoNorm = np.append(arrayNoNorm, nou_array)

                    # print("Body keypoints: \n" + str(datum.poseKeypoints))
                    # cv2.imshow("OpenPose 1.7.0 - Tutorial Python API",
                    #     datum.cvOutputData)
                    cv2.waitKey(1)
                    counter = 0
                    framesRet += 1
                else:
                    break
            else:
                imageToProcess.grab()
                counter += 1

        label = file_path.split('/')[7]
        label = label.split('_')[0]
        label = label.split('.')[0]
        print(label)
        #label = label.join([c for c in label if c.isalpha()])
        pos = 0

        # print("Frames sencers: ",totalFrames)
        # print("Frames retallats: ", framesRet)
        for i in range(framesRet):
            # 2. Normalitzem dades frame per frame (cada 75 posicions ja que te x, y i accuracy)
            posini = pos
            pos += 57
            arrayFrame = arrayNoNorm[posini:pos]
            # print(len(arrayFrame))
            # if len(arrayFrame) == 1:
            #     print(arrayFrame)
            arrayFrame = normValues(arrayFrame, label)

            # 3. Les guardem en un csv
            with open(
                    '/home/aleix/Escriptori/coses_tfg/tfg-correct-exercise/api/files/keypoints.csv',
                    'a') as file:
                writer = csv.writer(file)
                writer.writerow(arrayFrame)

        # Fem padding fins a 75, ja que és el màxim de frames.
        padding = [label]
        for i in range(38):
            padding.append(-1000.0)
        while (framesRet != 75):
            with open(
                    '/home/aleix/Escriptori/coses_tfg/tfg-correct-exercise/api/files/keypoints.csv',
                    'a') as file:
                writer = csv.writer(file)
                writer.writerow(padding)
            framesRet += 1

    except Exception as e:
        print(e)
        sys.exit(-1)
def toCsv(file_path):
    # Import Openpose (Windows/Ubuntu/OSX)
    dir_path = os.path.dirname(os.path.realpath(__file__))
    try:
        # Windows Import
        if platform == "win32":
            # Change these variables to point to the correct folder (Release/x64 etc.)
            sys.path.append(dir_path + '/../../python/openpose/Release')
            os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + \
                '/../../x64/Release;' + dir_path + '/../../bin;'
            import pyopenpose as op
        else:
            # Change these variables to point to the correct folder (Release/x64 etc.)
            sys.path.append('../../python')
            # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
            # sys.path.append('/usr/local/python')
            from openpose import pyopenpose as op
    except ImportError as e:
        print(
            'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
        )
        raise e
    # Flags
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--image_path",
        default=file_path,
        help=
        "Process an image. Read all standard formats (jpg, png, bmp, etc.).")
    args = parser.parse_known_args()

    # Custom Params (refer to include/openpose/flags.hpp for more parameters)
    params = dict()
    params["model_folder"] = "../../../models/"

    # Add others in path?
    for i in range(0, len(args[1])):
        curr_item = args[1][i]
        if i != len(args[1]) - 1:
            next_item = args[1][i + 1]
        else:
            next_item = "1"
        if "--" in curr_item and "--" in next_item:
            key = curr_item.replace('-', '')
            if key not in params:
                params[key] = "1"
        elif "--" in curr_item and "--" not in next_item:
            key = curr_item.replace('-', '')
            if key not in params:
                params[key] = next_item

    try:

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image
        datum = op.Datum()
        imageToProcess = cv2.imread(args[0].image_path)
        datum.cvInputData = imageToProcess
        opWrapper.emplaceAndPop(op.VectorDatum([datum]))

        # Display Image
        kp_array = np.array(datum.poseKeypoints)

        # 1. Guardem totes les posicions del vídeo en una array.

        arrayFrame = normValues(kp_array)

        # print(arrayFrame)
        with open(
                '/home/aleix/Escriptori/coses_tfg/tfg-correct-exercise/dataset/train_dataset.csv',
                'a') as file:
            writer = csv.writer(file)
            writer.writerow(arrayFrame)

    except Exception as e:
        print(e)
        sys.exit(-1)
def main():
    buildPath = os.path.dirname(os.path.realpath(__file__))
    
    sys.path.append(buildPath + "/python/openpose/Release");
    os.environ['PATH']  += ';' + (buildPath + "/x64/Release") + ';' + (buildPath + "/bin;")
    import pyopenpose as op
    
    params = dict()
    params["model_folder"] = "../models/"
    params["model_pose"] = "BODY_25"
    params["net_resolution"] = "-1x224"
    params["hand"] = True
    params["hand_detector"] = 0
    params["hand_net_resolution"] = "224x224"
    openposeHandle = op.WrapperPython()
    openposeHandle.configure(params)
    openposeHandle.start()
    
    
    datum = op.Datum()
    cam = cv2.VideoCapture(0)
    
    frameElapsedTime = 1
    totalTimeElapsed = 0
    sampleIndex = 0
    OutputStr = ""
    while True:
        frameStart = time.time()
        ret, frame = cam.read()
        frame = cv2.flip(frame, 1)
        datum.cvInputData = frame
        openposeHandle.emplaceAndPop(op.VectorDatum([datum]))
        if type(datum.poseKeypoints) != type(None):
            if totalTimeElapsed > samplePeriod:
                totalTimeElapsed -= samplePeriod
                focusIndex = getFocusBodyIndex(datum.poseKeypoints, len(datum.poseKeypoints))
                body = datum.poseKeypoints[focusIndex]
                leftHand = datum.handKeypoints[0][focusIndex]
                rightHand = datum.handKeypoints[1][focusIndex]
                            
                X=[body[0][0], body[0][1], body[1][0], body[1][1], body[2][0], body[2][1], body[3][0], body[3][1],body[4][0], 
                body[4][1], body[5][0], body[5][1], body[6][0], body[6][1], body[7][0], body[7][1],body[15][0],
                body[15][1], body[16][0], body[16][1], body[17][0], body[17][1], body[18][0], body[18][1],leftHand[0][0], leftHand[0][1]]
                Xdict={'a':X}
                X= pd.DataFrame(Xdict)
                
                
                #X=preprocessing.scale(X)
                #print(X)
                preX=detect.predict(X.T)
                max_a = np.argmax(preX, axis=1)
                
                if max_a==0:
                    OutputStr = "studying"
                if max_a==1:
                    OutputStr = "using smartphone"
                if max_a==2:
                   OutputStr = "sleeping"
        
        datum.cvOutputData = cv2.putText(datum.cvOutputData, OutputStr, (2, 34), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0))
        datum.cvOutputData = cv2.putText(datum.cvOutputData,OutputStr, (0, 32), cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255))
        #datum.cvOutputData = cv2.putText(datum.cvOutputData, "FPS: %.02f Target FPS: %.02f" % (1.0 / frameElapsedTime, frameRate), (2, 34), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0))
        #datum.cvOutputData = cv2.putText(datum.cvOutputData,"FPS: %.02f Target FPS: %.02f" % (1.0 / frameElapsedTime, frameRate), (0, 32), cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255))
        cv2.imshow(windowName, datum.cvOutputData)
        frameEnd = time.time()
        remainTime = (1.0 / frameRate) - (frameEnd - frameStart)
        if remainTime > 0:
            key = cv2.waitKey(int(remainTime*1000) & 0xffffffff)
        else:
            key = cv2.waitKey(1)
        frameEnd = time.time()
        frameElapsedTime = frameEnd - frameStart
        totalTimeElapsed += frameElapsedTime
        if cv2.getWindowProperty(windowName, cv2.WND_PROP_ASPECT_RATIO) == -1: break
    cam.release()
Exemple #24
0
def image_preprocess(file_path):
    try:
        # Import Openpose (Windows/Ubuntu/OSX)
        dir_path = os.path.dirname(os.path.realpath(__file__))
        try:
            # Windows Import
            if platform == "win32":
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append(dir_path + '/../../python/openpose/Release')
                os.environ['PATH'] = os.environ[
                    'PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'
                import pyopenpose as op
            else:
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('../../python')
                # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
                # sys.path.append('/usr/local/python')
                from openpose import pyopenpose as op
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e

        # Flags
        parser = argparse.ArgumentParser()
        parser.add_argument(
            "--image_path",
            default=file_path,
            help=
            "Process an image. Read all standard formats (jpg, png, bmp, etc.)."
        )
        args = parser.parse_known_args()

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()
        params["model_folder"] = "../../../models/"
        params["write_images"] = "./src/temp"
        params["write_images_format"] = "png"

        # Add others in path?
        for i in range(0, len(args[1])):
            curr_item = args[1][i]
            if i != len(args[1]) - 1:
                next_item = args[1][i + 1]
            else:
                next_item = "1"
            if "--" in curr_item and "--" in next_item:
                key = curr_item.replace('-', '')
                if key not in params: params[key] = "1"
            elif "--" in curr_item and "--" not in next_item:
                key = curr_item.replace('-', '')
                if key not in params: params[key] = next_item

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image
        datum = op.Datum()
        imageToProcess = cv2.imread(args[0].image_path)
        datum.cvInputData = imageToProcess
        opWrapper.emplaceAndPop(op.VectorDatum([datum]))

        # Calculate Angles
        data = {
            'kneeHipAngle':
            calculate_angle(datum.poseKeypoints[0][12],
                            datum.poseKeypoints[0][13],
                            datum.poseKeypoints[0][14]),
            'hipChestAngle':
            calculate_angle(datum.poseKeypoints[0][5],
                            datum.poseKeypoints[0][12],
                            datum.poseKeypoints[0][13]),
            'chestArmAngle':
            calculate_angle(datum.poseKeypoints[0][12],
                            datum.poseKeypoints[0][5],
                            datum.poseKeypoints[0][6]),
            'armAngleDiff':
            calculate_angle(datum.poseKeypoints[0][4],
                            datum.poseKeypoints[0][5],
                            datum.poseKeypoints[0][7]),
            'kneeAngleDiff':
            calculate_angle(datum.poseKeypoints[0][11],
                            datum.poseKeypoints[0][13],
                            datum.poseKeypoints[0][14])
        }
        return data

    except Exception as e:
        print(e)
        sys.exit(-1)
Exemple #25
0
    for imageBaseId in range(0, len(imagePaths), numberGPUs):

        # Create datums
        images = []

        # Read and push images into OpenPose wrapper
        for gpuId in range(0, numberGPUs):

            imageId = imageBaseId + gpuId
            if imageId < len(imagePaths):

                imagePath = imagePaths[imageBaseId + gpuId]
                datum = op.Datum()
                images.append(cv2.imread(imagePath))
                datum.cvInputData = images[-1]
                opWrapper.waitAndEmplace(op.VectorDatum([datum]))

        # Retrieve processed results from OpenPose wrapper
        for gpuId in range(0, numberGPUs):

            imageId = imageBaseId + gpuId
            if imageId < len(imagePaths):

                datums = op.VectorDatum()
                opWrapper.waitAndPop(datums)
                datum = datums[0]

                print("Body keypoints: \n" + str(datum.poseKeypoints))

                if not args[0].no_display:
                    cv2.imshow("OpenPose 1.7.0 - Tutorial Python API",
Exemple #26
0
def jsonify(file_path):
    try:
        # Import Openpose (Windows/Ubuntu/OSX)
        dir_path = os.path.dirname(os.path.realpath(__file__))
        try:
            # Windows Import
            if platform == "win32":
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append(dir_path + '/../../python/openpose/Release')
                os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + \
                    '/../../x64/Release;' + dir_path + '/../../bin;'
                import pyopenpose as op
            else:
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('../../python')
                # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
                # sys.path.append('/usr/local/python')
                from openpose import pyopenpose as op
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e

        # Flags
        parser = argparse.ArgumentParser()
        parser.add_argument(
            "--video_path",
            default=file_path,
            help=
            "Process an image. Read all standard formats (jpg, png, bmp, etc.)."
        )
        args = parser.parse_known_args()

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()
        params["model_folder"] = "../../../models/"

        # Add others in path?
        for i in range(0, len(args[1])):
            curr_item = args[1][i]
            if i != len(args[1]) - 1:
                next_item = args[1][i + 1]
            else:
                next_item = "1"
            if "--" in curr_item and "--" in next_item:
                key = curr_item.replace('-', '')
                if key not in params:
                    params[key] = "1"
            elif "--" in curr_item and "--" not in next_item:
                key = curr_item.replace('-', '')
                if key not in params:
                    params[key] = next_item

        base = os.path.basename(file_path)
        nom = os.path.splitext(base)[0]
        result_dir = "/home/aleix/Escriptori/coses_tfg/tfg-correct-exercise/dataset/"
        nom_fitxer = result_dir + nom + ".json"
        kplist = []  # Llista de keypoints
        kpdict = {}  # Diccionari de keypoints
        numLinia = 1  # Indica el número de la línia en que està al fitxer
        vegada = 0  # Indica la a quin punt del cos està
        nextFrame = False  # Indica quan passa a un altre frame
        totalFrames = 0

        try:

            # Starting OpenPose
            opWrapper = op.WrapperPython()
            opWrapper.configure(params)
            opWrapper.start()

            # Process Image
            datum = op.Datum()
            imageToProcess = cv2.VideoCapture(args[0].video_path)

            while (imageToProcess.isOpened()):
                ret, frame = imageToProcess.read()
                if ret == True:
                    datum.cvInputData = frame
                    opWrapper.emplaceAndPop(op.VectorDatum([datum]))

                    totalFrames += 1

                    # Display Image
                    kpdict['body keypoint'] = np.array(
                        datum.poseKeypoints).tolist()
                    kplist.append(kpdict.copy())
                    # print("Body keypoints: \n" + str(datum.poseKeypoints))
                    cv2.imshow("OpenPose 1.7.0 - Tutorial Python API",
                               datum.cvOutputData)
                    cv2.waitKey(1)

                else:
                    break

        except Exception as e:
            print(e)
            sys.exit(-1)

        kplist.append("Frames: " + str(totalFrames))

        f = open(nom_fitxer, "w+")
        json.dump(kplist, f, indent=4)
        f.close()

        with open(nom_fitxer) as fp:
            linies = fp.read().splitlines()
        with open(nom_fitxer, "w") as fp:
            for linia in linies:

                if nextFrame:
                    print(linia, file=fp)
                    if numLinia == 10:
                        nextFrame = False
                        numLinia = 5
                        vegada = 0

                    else:
                        numLinia += 1

                elif numLinia % 5 == 0:
                    if vegada == 0:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Nas",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 1:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Clatell",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 2:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Espatlla dreta",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 3:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Colze dret",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 4:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Canell dret",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 5:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Espatlla esquerra",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 6:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Colze esquerre",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 7:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Canell esquerre",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 8:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Cadera centre",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 9:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Cadera banda dreta",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 10:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Genoll dret",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 11:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Turmell dret",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 12:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Cadera banda esquerra",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 13:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Genoll esquerre",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 14:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Turmell esquerre",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 15:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Ull dret",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 16:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Ull esquerre",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 17:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Orella dreta",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 18:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Orella esquerre",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 19:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Dit gros esquerre",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 20:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Dit petit esquerre",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 21:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Taló esquerre",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 22:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Dit gros dret",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 23:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Dit petit dret",',
                              file=fp)
                        vegada += 1
                        numLinia += 1

                    elif vegada == 24:
                        print(linia + "\n" + "\t" + "\t" + "\t" + "\t" + "\t" +
                              '"Taló dret",',
                              file=fp)
                        numLinia = 1
                        nextFrame = True

                else:
                    print(linia, file=fp)
                    numLinia += 1

        fp.close()

    except Exception as e:
        print(e)
        sys.exit(-1)
Exemple #27
0
    def pose_estimation(self):  # 调用openpose识别出关节点
        # load openpose python api
        # if self.arg.openpose is not None:
        #     sys.path.append('{}/python'.format(self.arg.openpose))
        #     sys.path.append('{}/build/python'.format(self.arg.openpose))
        #     sys.path.append('{}/build/python/openpose'.format(self.arg.openpose))
        #     sys.path.append('{}/build/example/openpose'.format(self.arg.openpose))
        #     sys.path.append('{}/build/python/openpose/Release'.format(self.arg.openpose))
        #     sys.path.append('{}/build/x64/Release'.format(self.arg.openpose))
        #     sys.path.append('{}/build/bin'.format(self.arg.openpose))
        # try:
        #     #from openpose import pyopenpose as op
        #     import pyopenpose as op
        # except:
        #     print('Can not find Openpose Python API.')
        #     return
        #     dir_path = os.path.dirname(os.path.realpath(__file__))

        dir_path = os.path.dirname(os.path.realpath(__file__))
        try:
            # Windows Import
            # if platform == "win32":
            # Change these variables to point to the correct folder (Release/x64 etc.)
            # sys.path.append('D:/PycharmProject/openpose/build/python/openpose/Release')
            # os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + 'D:/PycharmProject/openpose/build/x64/Release' + dir_path + 'D:/PycharmProject/openpose/build/bin'
            sys.path.append(dir_path +
                            '/../../openpose/build/python/openpose/Release')
            os.environ['PATH'] = os.environ[
                'PATH'] + ';' + dir_path + '/../../openpose/build/x64/Release;' + dir_path + '/../../openpose/build/bin;'
            import pyopenpose as op
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e

        video_name = self.arg.video.split('/')[-1].split('.')[0]

        # initiate
        opWrapper = op.WrapperPython()
        params = dict(model_folder='./models', model_pose='COCO')
        opWrapper.configure(params)
        opWrapper.start()
        self.model.eval()
        video_capture = cv2.VideoCapture(self.arg.video)
        # video_capture = cv2.VideoCapture(0) # 0为打开摄像头
        video_length = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
        pose_tracker = naive_pose_tracker(data_frame=video_length)

        # pose estimation
        start_time = time.time()
        frame_index = 0
        video = list()
        while (True):

            # get image
            ret, orig_image = video_capture.read()
            if orig_image is None:
                break
            source_H, source_W, _ = orig_image.shape
            orig_image = cv2.resize(orig_image,
                                    (256 * source_W // source_H, 256))
            H, W, _ = orig_image.shape
            video.append(orig_image)

            # pose estimation
            datum = op.Datum()
            datum.cvInputData = orig_image
            # opWrapper.emplaceAndPop([datum])
            opWrapper.emplaceAndPop(op.VectorDatum([datum]))
            multi_pose = datum.poseKeypoints  # (num_person, num_joint, 3)

            #if len(multi_pose.shape) != 3:  # 是否三维数组
            if (multi_pose is None):  # 是否有数据,否则为None的时候会报错
                continue

            # normalization
            multi_pose[:, :, 0] = multi_pose[:, :, 0] / W
            multi_pose[:, :, 1] = multi_pose[:, :, 1] / H
            multi_pose[:, :, 0:2] = multi_pose[:, :, 0:2] - 0.5
            multi_pose[:, :, 0][multi_pose[:, :, 2] == 0] = 0
            multi_pose[:, :, 1][multi_pose[:, :, 2] == 0] = 0

            # pose tracking
            pose_tracker.update(multi_pose, frame_index)
            frame_index += 1

            print('Pose estimation ({}/{}).'.format(frame_index, video_length))

        data_numpy = pose_tracker.get_skeleton_sequence()
        return video, data_numpy
def main():
    try:
        # Import Openpose (Windows/Ubuntu/OSX)
        dir_path = os.path.dirname(os.path.realpath(__file__))
        try:
            # Windows Import
            if platform == "win32":
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append(dir_path + '/../../python/openpose/Release')
                os.environ['PATH'] = os.environ[
                    'PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'
                import pyopenpose as op
            else:
                # Change these variables to point to the correct folder (Release/x64 etc.)
                # sys.path.append('/usr/local/python')
                # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
                sys.path.append('/usr/local/python')
                from openpose import pyopenpose as op
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e

        # Flags
        parser = argparse.ArgumentParser()
        args = parser.parse_known_args()

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()
        params["model_folder"] = "/home/medrobotics/openpose/models/"
        params["net_resolution"] = "-1x352"
        #params["face"] = True
        #params["face_detector"] = 2

        # Add others in path?
        for i in range(0, len(args[1])):
            curr_item = args[1][i]
            if i != len(args[1]) - 1: next_item = args[1][i + 1]
            else: next_item = "1"
            if "--" in curr_item and "--" in next_item:
                key = curr_item.replace('-', '')
                if key not in params: params[key] = "1"
            elif "--" in curr_item and "--" not in next_item:
                key = curr_item.replace('-', '')
                if key not in params: params[key] = next_item

        # Construct it from system arguments
        # op.init_argv(args[1])
        # oppython = op.OpenposePython()

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()
        # Process Image
        for i in range(10000):
            datum = op.Datum()
            string = '/media/medrobotics/STARS_B/2021_12_20_A/color/' + str(
                i) + '.bin'
            imageToProcess = np.fromfile(string, dtype=np.uint8)
            imageToProcess = np.reshape(imageToProcess, (1080, 1920, 4))
            imageToProcess = imageToProcess[:, :, :3].copy()
            datum.cvInputData = imageToProcess
            opWrapper.emplaceAndPop(op.VectorDatum([datum]))
            out = datum.cvOutputData
            if datum.poseKeypoints is not None:
                for j in range(datum.poseKeypoints.shape[0]):
                    mean = np.zeros(shape=(2, ))
                    variance = np.zeros(shape=(2, ))
                    count = 0
                    for k in [0, 14, 15, 16, 17]:
                        if datum.poseKeypoints[j][k][2] != 0.0:
                            count += 1
                    if count == 0:
                        break
                    for k in [0, 14, 15, 16, 17]:
                        if datum.poseKeypoints[j][k][2] != 0.0:
                            mean += datum.poseKeypoints[j][k][:2] / count
                    for k in [0, 14, 15, 16, 17]:
                        if datum.poseKeypoints[j][k][2] != 0.0:
                            variance += (datum.poseKeypoints[j][k][:2] -
                                         mean) * (datum.poseKeypoints[j][k][:2]
                                                  - mean) / count
                    std_dev = np.sqrt(variance)
                    std_dev[0] = 10
                    std_dev[1] = 10
                    x1 = min(max(int(mean[0] - 3 * std_dev[0]), 0), 1920)
                    x2 = min(max(int(mean[0] + 3 * std_dev[0]), 0), 1920)
                    y1 = min(max(int(mean[1] - 3 * std_dev[1]), 0), 1080)
                    y2 = min(max(int(mean[1] + 3 * std_dev[1]), 0), 1080)
                    face = out[y1:y2, x1:x2]
                    #if face is not None and face.size != 0:
                    #face = cv2.GaussianBlur(face, (101, 101), 100)
                    #out[y1:y2, x1:x2] = face
            cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", out)
            if cv2.waitKey(1) == ord('q'):
                sys.exit(1)
    except Exception as e:
        print(e)
        sys.exit(-1)
Exemple #29
0
                if lrdiff < btdiff:
                    len = lrdiff
                    y = y + (btdiff - lrdiff) / 2
                else:
                    len = btdiff
                    x = x + (lrdiff - btdiff) / 2

                faceRectangles = [
                    op.Rectangle(x, y, len, len),
                ]

                # Create new datum
                datum = op.Datum()
                datum.cvInputData = imageToProcess
                datum.faceRectangles = faceRectangles

                # Process and display image
                opWrapper.emplaceAndPop(op.VectorDatum([datum]))
                # print("Face keypoints: \n" + str(datum.faceKeypoints))

                # save openpose data
                # rootForFacepos = root.replace('iphone_pictures', 'openpose')
                # if not isdir(rootForFacepos):
                #     os.makedirs(rootForFacepos)
                # fileForFacepos = file.replace('jpg', 'npy')
                # fileSave = rootForFacepos + '/' + fileForFacepos
                # np.save(fileSave, datum.faceKeypoints)
                imS = cv2.resize(datum.cvOutputData, (540, 960))
                cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", imS)
                cv2.waitKey(0)
def main():
    buildPath = os.path.dirname(os.path.realpath(__file__))
    t = datetime.datetime.now()
    samplePath = buildPath + "/my_samples_%04d_%02d_%02d_%02d_%02d_%02d" % (
        t.year, t.month, t.day, t.hour, t.minute, t.second)

    sys.path.append(buildPath + "/python/openpose/Release")
    os.environ['PATH'] += ';' + (buildPath +
                                 "/x64/Release") + ';' + (buildPath + "/bin;")
    import pyopenpose as op
    params = dict()
    params["model_folder"] = "../models/"
    params["model_pose"] = "BODY_25"
    params["net_resolution"] = "-1x160"
    params["hand"] = True
    params["hand_detector"] = 0
    params["hand_net_resolution"] = "224x224"
    openposeHandle = op.WrapperPython()
    openposeHandle.configure(params)
    openposeHandle.start()
    os.makedirs(samplePath)

    datum = op.Datum()
    cam = cv2.VideoCapture(0)
    with open(samplePath + "/sample_coords.csv", 'w', newline='') as csvFile:
        csvWriter = csv.writer(csvFile)
        csvWriter.writerow([
            "focusing", "nose_x", "nose_y", "neck_x", "neck_y", "rShoulder_x",
            "rShoulder_y", "rElbow_x", "rElbow_y", "rWrist_x", "rWrist_y",
            "lShoulder_x", "lShoulder_y", "lElbow_x", "lElbow_y", "lWrist_x",
            "lWrist_y", "rEye_x", "rEye_y", "lEye_x", "lEye_y", "rEar_x",
            "rEar_y", "lEar_x", "lEar_y", "lHand_x", "lHand_y", "lThumb0_x",
            "lThumb0_y", "lThumb1_x", "lThumb1_y", "lThumb2_x", "lThumb2_y",
            "lThumb3_x", "lThumb3_y", "lIndexFinger0_x", "lIndexFinger0_y",
            "lIndexFinger1_x", "lIndexFinger1_y", "lIndexFinger2_x",
            "lIndexFinger2_y", "lIndexFinger3_x", "lIndexFinger3_y",
            "lMiddleFinger0_x", "lMiddleFinger0_y", "lMiddleFinger1_x",
            "lMiddleFinger1_y", "lMiddleFinger2_x", "lMiddleFinger2_y",
            "lMiddleFinger3_x", "lMiddleFinger3_y", "lRingFinger0_x",
            "lRingFinger0_y", "lRingFinger1_x", "lRingFinger1_y",
            "lRingFinger2_x", "lRingFinger2_y", "lRingFinger3_x",
            "lRingFinger3_y", "lLittleFinger0_x", "lLittleFinger0_y",
            "lLittleFinger1_x", "lLittleFinger1_y", "lLittleFinger2_x",
            "lLittleFinger2_y", "lLittleFinger3_x", "lLittleFinger3_y",
            "rHand_x", "rHand_y", "rThumb0_x", "rThumb0_y", "rThumb1_x",
            "rThumb1_y", "rThumb2_x", "rThumb2_y", "rThumb3_x", "rThumb3_y",
            "rIndexFinger0_x", "rIndexFinger0_y", "rIndexFinger1_x",
            "rIndexFinger1_y", "rIndexFinger2_x", "rIndexFinger2_y",
            "rIndexFinger3_x", "rIndexFinger3_y", "rMiddleFinger0_x",
            "rMiddleFinger0_y", "rMiddleFinger1_x", "rMiddleFinger1_y",
            "rMiddleFinger2_x", "rMiddleFinger2_y", "rMiddleFinger3_x",
            "rMiddleFinger3_y", "rRingFinger0_x", "rRingFinger0_y",
            "rRingFinger1_x", "rRingFinger1_y", "rRingFinger2_x",
            "rRingFinger2_y", "rRingFinger3_x", "rRingFinger3_y",
            "rLittleFinger0_x", "rLittleFinger0_y", "rLittleFinger1_x",
            "rLittleFinger1_y", "rLittleFinger2_x", "rLittleFinger2_y",
            "rLittleFinger3_x", "rLittleFinger3_y"
        ])

        frameElapsedTime = 1
        totalTimeElapsed = 0
        sampleIndex = 0
        while True:

            frameStart = time.time()
            ret, frame = cam.read()
            frame = cv2.flip(frame, 1)
            datum.cvInputData = frame
            openposeHandle.emplaceAndPop(op.VectorDatum([datum]))
            if type(datum.poseKeypoints) != type(None):
                if totalTimeElapsed > samplePeriod:
                    totalTimeElapsed -= samplePeriod
                    focusIndex = getFocusBodyIndex(datum.poseKeypoints,
                                                   len(datum.poseKeypoints))
                    body = datum.poseKeypoints[focusIndex]
                    leftHand = datum.handKeypoints[0][focusIndex]
                    rightHand = datum.handKeypoints[1][focusIndex]
                    cv2.imwrite(samplePath + "/%08d.jpg" % sampleIndex,
                                datum.cvOutputData)
                    csvWriter.writerow([
                        0, body[0][0], body[0][1], body[1][0], body[1][1],
                        body[2][0], body[2][1], body[3][0], body[3][1],
                        body[4][0], body[4][1], body[5][0], body[5][1],
                        body[6][0], body[6][1], body[7][0], body[7][1],
                        body[15][0], body[15][1], body[16][0], body[16][1],
                        body[17][0], body[17][1], body[18][0], body[18][1],
                        leftHand[0][0], leftHand[0][1], leftHand[1][0],
                        leftHand[1][1], leftHand[2][0], leftHand[2][1],
                        leftHand[3][0], leftHand[3][1], leftHand[4][0],
                        leftHand[4][1], leftHand[5][0], leftHand[5][1],
                        leftHand[6][0], leftHand[6][1], leftHand[7][0],
                        leftHand[7][1], leftHand[8][0], leftHand[8][1],
                        leftHand[9][0], leftHand[9][1], leftHand[10][0],
                        leftHand[10][1], leftHand[11][0], leftHand[11][1],
                        leftHand[12][0], leftHand[12][1], leftHand[13][0],
                        leftHand[13][1], leftHand[14][0], leftHand[14][1],
                        leftHand[15][0], leftHand[15][1], leftHand[16][0],
                        leftHand[16][1], leftHand[17][0], leftHand[17][1],
                        leftHand[18][0], leftHand[18][1], leftHand[19][0],
                        leftHand[19][1], leftHand[20][0], leftHand[20][1],
                        rightHand[0], rightHand[0][1], rightHand[1][0],
                        rightHand[1][1], rightHand[2][0], rightHand[2][1],
                        rightHand[3][0], rightHand[3][1], rightHand[4][0],
                        rightHand[4][1], rightHand[5][0], rightHand[5][1],
                        rightHand[6][0], rightHand[6][1], rightHand[7][0],
                        rightHand[7][1], rightHand[8][0], rightHand[8][1],
                        rightHand[9][0], rightHand[9][1], rightHand[10][0],
                        rightHand[10][1], rightHand[11][0], rightHand[11][1],
                        rightHand[12][0], rightHand[12][1], rightHand[13][0],
                        rightHand[13][1], rightHand[14][0], rightHand[14][1],
                        rightHand[15][0], rightHand[15][1], rightHand[16][0],
                        rightHand[16][1], rightHand[17][0], rightHand[17][1],
                        rightHand[18][0], rightHand[18][1], rightHand[19][0],
                        rightHand[19][1], rightHand[20][0], rightHand[20][1]
                    ])
                    print("sample %08d saved." % sampleIndex)
                    sampleIndex += 1

            datum.cvOutputData = cv2.putText(
                datum.cvOutputData, "FPS: %.02f Target FPS: %.02f" %
                (1.0 / frameElapsedTime, frameRate), (2, 34),
                cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0))
            datum.cvOutputData = cv2.putText(
                datum.cvOutputData, "FPS: %.02f Target FPS: %.02f" %
                (1.0 / frameElapsedTime, frameRate), (0, 32),
                cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255))
            cv2.imshow(windowName, datum.cvOutputData)
            frameEnd = time.time()
            remainTime = (1.0 / frameRate) - (frameEnd - frameStart)
            if remainTime > 0:
                key = cv2.waitKey(int(remainTime * 1000) & 0xffffffff)
            else:
                key = cv2.waitKey(1)
            frameEnd = time.time()
            frameElapsedTime = frameEnd - frameStart
            totalTimeElapsed += frameElapsedTime
            if cv2.getWindowProperty(windowName,
                                     cv2.WND_PROP_ASPECT_RATIO) == -1:
                break
    cam.release()