コード例 #1
0
ファイル: app.py プロジェクト: muu86/video_upload_flask
def upload_file():
    if request.method == 'POST':
        # if 'file' not in request.files:
        #     flash('No file part')
        #     return redirect(request.url)
        #
        print(request.files.keys())
        print(request.files['data'])

        file = request.files['data']
        print(dir(file))
        print(file.filename)

        file.save(os.path.join(os.getcwd(), 'uploads.mp4'))

        try:
            try:
                import pyopenpose as op

            except ImportError as e:
                raise e

            try:
                import cv2

            except ImportError as e:
                raise e

            try:
                from test_video import SampleVideo

            except ImportError as e:
                raise e

            params = dict()
            params["model_folder"] = "C:\\openpose\\models\\"
            params["number_people_max"] = 1

            opWrapper = op.WrapperPython()
            opWrapper.configure(params)
            opWrapper.start()

            datum = op.Datum()
            video_path = os.path.join(os.getcwd(), 'uploads.mp4')

            cap = cv2.VideoCapture(video_path)
            print(cap.get(cv2.CAP_PROP_FRAME_COUNT))

            out_path = os.path.join(os.getcwd(), 'static/output.mp4')
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            out = cv2.VideoWriter(out_path, fourcc, 20.0,
                                  (int(cap.get(3)), int(cap.get(4))))

            for pos in range(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))):
                ret, frame = cap.read()

                datum.cvInputData = frame
                opWrapper.emplaceAndPop(op.VectorDatum([datum]))

                out.write(datum.cvOutputData)

                print(datum.poseKeypoints)
                print(cap.get(cv2.CAP_PROP_POS_FRAMES))

            cap.release()
            print('end')

        except Exception as e:
            print(dir(e))
            print(e)
            sys.exit(-1)
        # if file.filename == '':
        #     flash('No selected file')
        #     return redirect(request.url)

        # if file and allowed_file(file.filename):
        # filename = secure_filename(file.filename)
        # file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
        # return redirect(url_for('upload_file',
        #                         filename=filename))

        # return '''
        # <!doctype html>
        # <title>Upload new File</title>
        # <h1>Upload new File</h1>
        # <form method=post enctype=multipart/form-data>
        #     <input type=file name=file>
        #     <input type=submit value=Upload>
        # </form>
        # '''
        return "good"

    if request.method == 'GET':
        return '''
コード例 #2
0
def process_picture(picture_path):
    try:
        # Import Openpose (Windows/Ubuntu/OSX)
        dir_path = os.path.dirname(os.path.realpath(__file__))
        try:
            # Windows Import
            if sys.platform == "win32":
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append(dir_path + '/../../python/openpose/Release')
                os.environ['PATH'] = os.environ[
                    'PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'
                import pyopenpose as op
            else:
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('../../python')
                # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
                # sys.path.append('/usr/local/python')
                from openpose import pyopenpose as op
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e

        # Flags
        parser = argparse.ArgumentParser()
        png_path = "../../../examples/media/traffic_police_gesture/" + picture_path[
            79:]
        #parser.add_argument("--image_path", default="../../../examples/media/COCO_val2014_000000000192.jpg", help="Process an image. Read all standard formats (jpg, png, bmp, etc.).")
        parser.add_argument(
            "--image_path",
            default=png_path,
            help=
            "Process an image. Read all standard formats (jpg, png, bmp, etc.)."
        )

        args = parser.parse_known_args()

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()
        params["model_folder"] = "../../../models/"

        # Add others in path?
        for i in range(0, len(args[1])):
            curr_item = args[1][i]
            if i != len(args[1]) - 1: next_item = args[1][i + 1]
            else: next_item = "1"
            if "--" in curr_item and "--" in next_item:
                key = curr_item.replace('-', '')
                if key not in params: params[key] = "1"
            elif "--" in curr_item and "--" not in next_item:
                key = curr_item.replace('-', '')
                if key not in params: params[key] = next_item

        # Construct it from system arguments
        # op.init_argv(args[1])
        # oppython = op.OpenposePython()

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image
        datum = op.Datum()
        imageToProcess = cv2.imread(args[0].image_path)
        datum.cvInputData = imageToProcess
        opWrapper.emplaceAndPop([datum])

        # Display Image
        path = '/Users/zephyryau/Documents/study/INF552/Project/input_picture/'

        with open(path + 'result.txt', 'w', newline='') as txtfile:
            spamwriter = csv.writer(txtfile)
            spamwriter.writerow(datum.poseKeypoints)
        #print("Body keypoints: \n" + datum.poseKeypoints)
        cv2.imwrite(path + 'result.png', datum.cvOutputData)
        cv2.waitKey(0)
    except Exception as e:
        print(e)
        sys.exit(-1)
コード例 #3
0
ファイル: app.py プロジェクト: coloriz/pose-detector
    def __init__(self, opt):
        self.session_id = opt.session_id
        # Initialize camera and window
        self.cap = cv.VideoCapture(opt.input_device,
                                   eval(f'cv.{opt.input_api}'))
        assert self.cap.isOpened(), 'Failed to initialize video capture!'

        self.width, self.height = opt.video_width, opt.video_height
        self.cap.set(cv.CAP_PROP_FRAME_WIDTH, self.width)
        self.cap.set(cv.CAP_PROP_FRAME_HEIGHT, self.height)
        self.window_name = 'tutorial'
        if opt.fullscreen:
            cv.namedWindow(self.window_name, cv.WINDOW_NORMAL)
            cv.setWindowProperty(self.window_name, cv.WND_PROP_FULLSCREEN,
                                 cv.WINDOW_FULLSCREEN)
        else:
            cv.namedWindow(self.window_name)
        self.width_qr = self.width // 4
        self.width_half = self.width // 2
        self.width_3qr = self.width // 4 * 3
        self.border_margin = round(max(self.width, self.height) * 0.025)

        # Initialize openpose library
        import os
        import sys
        sys.path.append(os.fspath(opt.op_path / 'python/openpose/Release'))
        os.environ['PATH'] += f';{os.fspath(opt.op_path/"x64/Release")}'
        os.environ['PATH'] += f';{os.fspath(opt.op_path/"bin")}'

        try:
            import pyopenpose as op
        except ImportError as e:
            print('Error: OpenPose library could not be found.')
            raise e

        self._op_wrapper = op.WrapperPython()
        self._op_wrapper.configure({
            'model_folder':
            os.fspath(opt.op_path / '../models/'),
            'model_pose':
            'BODY_25',
            'number_people_max':
            1
        })
        self._op_wrapper.start()
        self._datum = op.Datum()

        # Raw data recorder
        if not opt.no_save:
            self.recorder = DataRecorder(
                (self.width, self.height), 24,
                f'records/{opt.session_id}/{int(time())}')
        else:
            self.recorder = DummyDataRecorder()

        # Handler of each state
        self.handlers = {
            State.Ready: self.handle_ready,
            State.PoseReady: self.handle_pose_ready,
            State.PoseMeasuring: self.handle_pose_measuring,
            State.Finish: self.handle_finish,
        }

        # Load assets
        self.assets = Assets('assets/')
        self.poses: List[Posture] = [
            Pose_01, Pose_02, Pose_03, Pose_04, Pose_05, Pose_06, Pose_08,
            Pose_09, Pose_10
        ]
        self.poses_ui: List[InformationLayer] = generate_info_layers(
            self.poses)

        self.running: bool = True
        self.state: State = State.Ready
        self.pose_index_iter: Iterator[int] = iter(range(len(self.poses)))
        self.current_pose_i: int = 0
        self.t_start: float = perf_counter()
        self.fail_counter = deque([False] * opt.fail_tolerance,
                                  maxlen=opt.fail_tolerance)
        self.angle: float = np.pi
        self.confidence: float = 0
        self.score: int = 0

        self.keypoints: np.ndarray = np.array([], np.float32)
        self.frame: np.ndarray = np.array([], np.uint8)
コード例 #4
0
ファイル: initial_set.py プロジェクト: gatherheart/POSE
def initialize(image, identity):
    try:
        # Import Openpose (Windows/Ubuntu/OSX)
        dir_path = os.path.dirname(os.path.realpath(__file__))
        try:
            # Windows Import
            if platform == "win32":
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('/home/ubuntu/openpose/build/python')
                os.environ['PATH'] = os.environ[
                    'PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'
                import pyopenpose as op
            else:
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('/home/ubuntu/openpose/build/python')
                # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
                # sys.path.append('/usr/local/python')
                from openpose import pyopenpose as op
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()
        params["model_folder"] = "/home/ubuntu/openpose/models/"
        params["face"] = True
        params["hand"] = False
        params["number_people_max"] = 1
        #params["write_json"] = "/home/ubuntu/server/tmp/"

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image
        datum = op.Datum()
        imageToProcess = cv2.imread(image)
        datum.cvInputData = imageToProcess
        opWrapper.emplaceAndPop([datum])

        nose = datum.poseKeypoints[0][0]
        chest = datum.poseKeypoints[0][1]
        left_shoulder = datum.poseKeypoints[0][2]
        right_shoulder = datum.poseKeypoints[0][5]

        data = {}
        data['quantitative'] = []
        data['quantitative'].append({
            'shoulder_width':
            str(distance(left_shoulder, right_shoulder)),
            'shoulder_slope':
            str(slope(right_shoulder, left_shoulder)),
            'head_slope':
            str(slope(nose, chest)),
            'left_shoulder_neck':
            str(slope(chest, left_shoulder)),
            'right_shoulder_neck':
            str(slope(chest, right_shoulder)),
            'eye_distance':
            str(
                distance(datum.faceKeypoints[0][36],
                         datum.faceKeypoints[0][45]))
        })

        print("Done " + image)
        return data

    except (Exception, Warning) as e:
        print("Initial IMAGE: " + e)
コード例 #5
0
def image_preprocess(file_path):
    try:
        # Import Openpose (Windows/Ubuntu/OSX)
        dir_path = os.path.dirname(os.path.realpath(__file__))
        try:
            # Windows Import
            if platform == "win32":
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append(dir_path + '/../../python/openpose/Release')
                os.environ['PATH'] = os.environ[
                    'PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'
                import pyopenpose as op
            else:
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('../../python')
                # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
                # sys.path.append('/usr/local/python')
                from openpose import pyopenpose as op
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e

        # Flags
        parser = argparse.ArgumentParser()
        parser.add_argument(
            "--image_path",
            default=file_path,
            help=
            "Process an image. Read all standard formats (jpg, png, bmp, etc.)."
        )
        args = parser.parse_known_args()

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()
        params["model_folder"] = "../../../models/"
        params["write_images"] = "./src/temp"
        params["write_images_format"] = "png"

        # Add others in path?
        for i in range(0, len(args[1])):
            curr_item = args[1][i]
            if i != len(args[1]) - 1:
                next_item = args[1][i + 1]
            else:
                next_item = "1"
            if "--" in curr_item and "--" in next_item:
                key = curr_item.replace('-', '')
                if key not in params: params[key] = "1"
            elif "--" in curr_item and "--" not in next_item:
                key = curr_item.replace('-', '')
                if key not in params: params[key] = next_item

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image
        datum = op.Datum()
        imageToProcess = cv2.imread(args[0].image_path)
        datum.cvInputData = imageToProcess
        opWrapper.emplaceAndPop(op.VectorDatum([datum]))

        # Calculate Angles
        data = {
            'kneeHipAngle':
            calculate_angle(datum.poseKeypoints[0][12],
                            datum.poseKeypoints[0][13],
                            datum.poseKeypoints[0][14]),
            'hipChestAngle':
            calculate_angle(datum.poseKeypoints[0][5],
                            datum.poseKeypoints[0][12],
                            datum.poseKeypoints[0][13]),
            'chestArmAngle':
            calculate_angle(datum.poseKeypoints[0][12],
                            datum.poseKeypoints[0][5],
                            datum.poseKeypoints[0][6]),
            'armAngleDiff':
            calculate_angle(datum.poseKeypoints[0][4],
                            datum.poseKeypoints[0][5],
                            datum.poseKeypoints[0][7]),
            'kneeAngleDiff':
            calculate_angle(datum.poseKeypoints[0][11],
                            datum.poseKeypoints[0][13],
                            datum.poseKeypoints[0][14])
        }
        return data

    except Exception as e:
        print(e)
        sys.exit(-1)
コード例 #6
0
def body_detect(video):
    try:
        # Import Openpose (Windows/Ubuntu/OSX)
        dir_path = os.path.dirname(os.path.realpath(__file__))
        try:
            # Change these variables to point to the correct folder (Release/x64 etc.)
            sys.path.append('/home/daniel/openpose/build/python/');
            
            # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
            # sys.path.append('/usr/local/python')
            
            # este import esta a estragar o celery

            import pyopenpose as op
            
        except ImportError as e:
            print(e)
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
            raise e

        # Flags
        parser = argparse.ArgumentParser()
        parser.add_argument("--video", default="/home/daniel/uni/CheckMeWOD/checkmewod_project/"+video, help="Process a video.")
        args = parser.parse_known_args()
        
        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()
        params["model_folder"] = '/home/daniel/openpose/models/'
        params["face"] = False
        params["hand"] = False

        # Add others in path?
        for i in range(0, len(args[1])):
            curr_item = args[1][i]
            if i != len(args[1]) - 1:
                next_item = args[1][i + 1]
            else:
                next_item = "1"
            if "--" in curr_item and "--" in next_item:
                key = curr_item.replace('-', '')
                if key not in params:  params[key] = "1"
            elif "--" in curr_item and "--" not in next_item:
                key = curr_item.replace('-', '')
                if key not in params: params[key] = next_item

        # Construct it from system arguments
        # op.init_argv(args[1])
        # oppython = op.OpenposePython()

        # Starting OpenPose
        
        keypointlist = []
        keypointdict = {}

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        
        opWrapper.configure(params)
        
        opWrapper.start()

        # Process video
        datum = op.Datum()
        
        frame_num = 0
        
        cap = cv2.VideoCapture(args[0].video)
        
        while (cap.isOpened()):
            hasframe, frame = cap.read()
            
            if hasframe == True:
                
                datum.cvInputData = frame
                
                opWrapper.emplaceAndPop([datum])
                
                # Display video
                keypointdict['body keypoint'] = np.array(datum.poseKeypoints).tolist()
                
                keypointlist.append(keypointdict.copy())  # must be the copy!!!
                
                #cv2.imshow("OpenPose 1.5.0 - Tutorial Python API", datum.cvOutputData)
                
                filename = "/home/daniel/uni/CheckMeWOD/checkmewod_project/checkmewod/video_evaluation_src/output_json/frame_number_" + str(frame_num) + ".json"
                
                with open(filename, "a") as f:
                
                    json.dump(keypointlist, f, indent=0)
                frame_num += 1
                
                cv2.waitKey(1)
            else:
                break
    except Exception as e:
        # print(e)      
        sys.exit(-1)
コード例 #7
0
def toCsv(file_path):
    # Import Openpose (Windows/Ubuntu/OSX)
    dir_path = os.path.dirname(os.path.realpath(__file__))
    try:
        # Windows Import
        if platform == "win32":
            # Change these variables to point to the correct folder (Release/x64 etc.)
            sys.path.append(dir_path + '/../../python/openpose/Release')
            os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + \
                '/../../x64/Release;' + dir_path + '/../../bin;'
            import pyopenpose as op
        else:
            # Change these variables to point to the correct folder (Release/x64 etc.)
            sys.path.append('/home/aleix/openpose/build/python')
            # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
            # sys.path.append('/usr/local/python')
            from openpose import pyopenpose as op
    except ImportError as e:
        print('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
        raise e
    # Flags
    parser = argparse.ArgumentParser()
    parser.add_argument("--video_path", default=file_path,
                        help="Process an image. Read all standard formats (jpg, png, bmp, etc.).")
    args = parser.parse_known_args()

    # Custom Params (refer to include/openpose/flags.hpp for more parameters)
    params = dict()
    params["model_folder"] = "/home/aleix/openpose/models/"

    # Add others in path?
    for i in range(0, len(args[1])):
        curr_item = args[1][i]
        if i != len(args[1])-1:
            next_item = args[1][i+1]
        else:
            next_item = "1"
        if "--" in curr_item and "--" in next_item:
            key = curr_item.replace('-', '')
            if key not in params:
                params[key] = "1"
        elif "--" in curr_item and "--" not in next_item:
            key = curr_item.replace('-', '')
            if key not in params:
                params[key] = next_item

    try:

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image
        datum = op.Datum()
        imageToProcess = cv2.VideoCapture(args[0].video_path)
        arrayNoNorm = np.array(0)
        totalFrames = int(imageToProcess.get(cv2.CAP_PROP_FRAME_COUNT))
        target = cutFps(totalFrames)
        counter = 0
        framesRet = 0

        # 1. Guardem totes les posicions del vídeo en una array.
        while(imageToProcess.isOpened()):
            if counter == target:
                ret, frame = imageToProcess.read()
                if ret == True:
                    datum.cvInputData = frame
                    opWrapper.emplaceAndPop(op.VectorDatum([datum]))

                    # Display Image
                    kp_array = np.array(datum.poseKeypoints)
                    
                    # Treiem els outliers
                    nou_array = np.delete(kp_array[0], [3, 4, 15, 17, 22, 23], 0)
                    
                    arrayNoNorm = np.append(arrayNoNorm, nou_array)

                    # print("Body keypoints: \n" + str(datum.poseKeypoints))
                    # cv2.imshow("OpenPose 1.7.0 - Tutorial Python API",
                    #     datum.cvOutputData)
                    cv2.waitKey(1)
                    counter = 0
                    framesRet += 1
                else:
                    break
            else:
                imageToProcess.grab()
                counter += 1
        
        label = file_path.split('/')[7]
        label = label.split('_')[0]
        label = label.split('.')[0]
        print(label)
        #label = label.join([c for c in label if c.isalpha()])
        pos = 0
        
        # print("Frames sencers: ",totalFrames)
        # print("Frames retallats: ", framesRet)
        for i in range(framesRet):
        	# 2. Normalitzem dades frame per frame (cada 75 posicions ja que te x, y i accuracy)
            posini = pos
            pos += 57
            arrayFrame = arrayNoNorm[posini:pos]
            # print(len(arrayFrame))
            # if len(arrayFrame) == 1:
            #     print(arrayFrame)
            arrayFrame = normValues(arrayFrame, label)
            
        	# 3. Les guardem en un csv
            with open('/home/aleix/Escriptori/coses_tfg/tfg-correct-exercise/api/files/keypoints.csv', 'a') as file:
                writer = csv.writer(file)
                writer.writerow(arrayFrame)

        # Fem padding fins a 75, ja que és el màxim de frames.
        padding = [label]
        for i in range(38):
            padding.append(-1000.0)
        while(framesRet != 75):
            with open('/home/aleix/Escriptori/coses_tfg/tfg-correct-exercise/api/files/keypoints.csv', 'a') as file:
                writer = csv.writer(file)
                writer.writerow(padding)
            framesRet += 1

    except Exception as e:
        print(e)
        sys.exit(-1)
コード例 #8
0
def main():

    dir_path = 'C:\\Users\\SignEasy\\Documents\\openpose-1.5.0'
    sys.path.append("{}\\build\\python\\openpose\\Release".format(dir_path))
    os.environ["PATH"] = os.environ[
        "PATH"] + ";{}\\build\\x64\\Release;{}\\build\\bin;{};".format(
            dir_path, dir_path, dir_path)

    try:
        import pyopenpose as op
    except ImportError as e:
        print(e, file=sys.stderr)

    # OpenPose Flags
    parser = argparse.ArgumentParser()
    parser.add_argument("--hand", default=True)
    parser.add_argument("--display", default=0)
    parser.add_argument("--number_people_max", default=1)

    args = parser.parse_known_args()

    # OpenPose Custom Params (refer to include/openpose/flags.hpp for more parameters)
    params = dict()
    params["model_folder"] = "{}\\models".format(dir_path)
    params["hand"] = True
    params["display"] = 0
    params["number_people_max"] = 1

    # Add others command line arguments
    for i in range(0, len(args[1])):
        curr_item = args[1][i]
        if i != len(args[1]) - 1:
            next_item = args[1][i + 1]
        else:
            next_item = "1"
        if "--" in curr_item and "--" in next_item:
            key = curr_item.replace('-', '')
            if key not in params: params[key] = "1"
        elif "--" in curr_item and "--" not in next_item:
            key = curr_item.replace('-', '')
            if key not in params: params[key] = next_item
    try:
        import tensorflow as tf
        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()
        datum = op.Datum()

        # Start TensorFlow, load Keras model
        config = tf.ConfigProto()

        config.gpu_options.per_process_gpu_memory_fraction = 0.1  # Only allocates a portion of VRAM to TensorFlow
        session = tf.Session(config=config)

        # TODO CHANGE WITH EVERY NEW MODEL
        tf_model = load_model(
            'Models/dynamic_kfold-7_model_date_04-23-2021_time_18-51-49.h5'
        )  # 'normalized_epochs200_10_data_points10_06_2019_02_00_54.h5

        # Capture Frames
        cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)  # Use webcam
        num_data_points = 0
        frame_counter = 0
        result = ""
        xy_set = []
        conf_level_sum = 0

        while True:
            ret, frame = cap.read()
            if not ret:
                break

            # Use Webcam frames, render OpenPose
            datum.cvInputData = frame
            opWrapper.emplaceAndPop([datum])
            op_frame = datum.cvOutputData
            window_name = "Hand Classification Window"

            # All available hand keypoints (OpenPose 1.5 (0-20))
            hand_data = [
                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
                18, 19, 20
            ]

            # TODO Add new gestures to this
            prediction_strings = ["Hello", "Thank You", "A", "W"]

            x_set = []
            y_set = []

            num_predictions = len(prediction_strings)

            # Ensure hand keypoints exist before doing classification
            try:
                if 0 <= frame_counter < 20:
                    rightHandKeypoints = datum.handKeypoints[1]
                    for entries in rightHandKeypoints:
                        for hand_entry in hand_data:
                            conf_level_sum += entries[hand_entry][2]
                            x_set.append(entries[hand_entry][0])
                            y_set.append(entries[hand_entry][1])

                    xy_set = xy_set + x_set
                    xy_set = xy_set + y_set

                    bottom_left = "Frame: " + str(frame_counter)
                    cv2.putText(op_frame,
                                bottom_left, (10, 100),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=1,
                                color=(0, 0, 0),
                                thickness=2)

                    frame_counter += 1
                elif frame_counter == 20:
                    xy_set = np.asarray(xy_set, dtype=np.float32)
                    xy_set = xy_set.reshape(1, -1)
                    transformer = Normalizer().fit(xy_set)
                    X_test = transformer.transform(xy_set)

                    # Prediction occurs here
                    predictions = tf_model.predict(xy_set)
                    predictions = predictions.flatten()

                    # Issue here is that conf_level_sum works until last iteration
                    print("conf_level_sum: " + str(conf_level_sum))
                    conf_level = conf_level_sum / num_predictions / 10
                    print("conf_level: " + str(conf_level))

                    if conf_level > .85:
                        predictionToDisplay = prediction_strings[np.argmax(
                            predictions)]
                    else:
                        predictionToDisplay = "N/A"

                    print("prediction: " + str(predictionToDisplay))

                    conf_level_sum = 0
                    frame_counter += 1

                elif 21 <= frame_counter < 50:
                    # Show prediction
                    cv2.putText(op_frame,
                                "Result: " + predictionToDisplay, (10, 100),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=1,
                                color=(0, 0, 0),
                                thickness=2)

                    frame_counter += 1

                elif frame_counter == 50:
                    frame_counter = 0
                    xy_set = []
                    # clear prediction

            except Exception as e:
                cv2.putText(op_frame,
                            "Restarting, please put hand in frame.", (10, 100),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            fontScale=1,
                            color=(0, 0, 0),
                            thickness=2)
                print(e)
                conf_level_sum = 0
                frame_counter = 0
                xy_set = []
                pass

            cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
            cv2.imshow(window_name, op_frame)

            if cv2.waitKey(1) == ord('q'):
                cap.release()
                cv2.destroyAllWindows()
                print("Terminating Program")
                exit()

    except Exception as e:
        print(e)
        sys.exit(-1)
コード例 #9
0
def main():
    global speed, steeringAngle, status, last_status, selected_direction, RightWirst_y, RightWirst_x, LeftWirst_y, LeftWirst_x, carSerial, accelerations, steering_angles, alpha, MAX_OP_MULTIPLIER, FLIP_STEER
    # Starting serial bluetooth connection
    if sendCommandsToCar:
        carSerial = ConnectToSerial()
        if carSerial is None:
            print("No port selected, exiting...")
            sys.exit(-2)

    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    # Start webcam with VideoCapture. 0 -> use default webcam
    # WINDOWS_NORMAL dynamic window resize at running time
    # resizeWindow output windows webcam dimension. RESOLUSpeedsTION -> 4:3
    cv2.namedWindow('OPtoROBO', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('OPtoROBO', 1000, 750)
    stream = cv2.VideoCapture(source)
    time.sleep(2)
    if stream is None:
        print("No stream at " + str(source))
        sys.exit(-3)
    if not stream.isOpened():
        print("Stream at " + str(source) + " unvailable. Unable to open.")
        sys.exit(-4)

    # Frame counter
    counter = 0
    # Execution time
    start = time.time()
    fps = 0.0
    fps_last_time = time.time() - 1000
    fps_last_counter = counter - 1
    quit_count = 0

    error_op_counter = 0

    # moving average filter
    ret, img = stream.read()
    avg = np.float32(img)

    while True:
        # Update fps
        if counter % 5 == 1:
            fps = (counter - fps_last_counter) / (time.time() - fps_last_time)
            fps_last_time = time.time()
            fps_last_counter = counter

        # Frame reader. Each frame will be processed by OpenPose
        ret, img = stream.read()
        # Output flip
        cv2.flip(img, 1, img)

        # Bilateral Filtering
        img = cv2.bilateralFilter(img, 9, 40, 75)

        datum = op.Datum()
        datum.cvInputData = img
        opWrapper.emplaceAndPop([datum])

        # Detecting people control
        if (not datumChecks(datum.poseKeypoints) or str(datum.poseKeypoints) == str(2.0) or str(
                datum.poseKeypoints) == str(0.0)
                or str(datum.poseKeypoints) == str(1e-45)):
            print('NO DETECTED PEOPLE, YOU SHOULD GO IN FRONT OF YOUR WEBCAM')
            # time.sleep(.5)
            error_op_counter += 1
            if error_op_counter > (int(fps / MAX_OP_MULTIPLIER)):
                RightWirst_x.append(400.0)
                RightWirst_y.append(440.0)
                LeftWirst_x.append(250.0)
                LeftWirst_y.append(440.0)
            else:
                RightWirst_x.append(RightWirst_x[len(RightWirst_y) - 1])
                RightWirst_y.append(RightWirst_y[len(RightWirst_y) - 1])
                LeftWirst_x.append(LeftWirst_x[len(LeftWirst_x) - 1])
                LeftWirst_y.append(LeftWirst_x[len(LeftWirst_x) - 1])
        else:
            error_op_counter = 0
            try:
                RightWirst_x.append(datum.poseKeypoints[0][7][0])
                RightWirst_y.append(datum.poseKeypoints[0][7][1])
                LeftWirst_x.append(datum.poseKeypoints[0][4][0])
                LeftWirst_y.append(datum.poseKeypoints[0][4][1])
            except Exception as e1:
                print(e1)
                c = 0

        steeringAngle = steering_angle(LeftWirst_x[counter], -LeftWirst_y[counter],
                                       RightWirst_x[counter], -RightWirst_y[counter])

        # Direction and Stop
        # if both hands up
        if (LeftWirst_y[counter] < 380 or RightWirst_y[counter] < 380) and selected_direction != 0:
            # Go time
            status = 1
        else:
            # if one or both hands down into command part
            status = 0
            Stop()
            if (380 < LeftWirst_y[counter] < 480 and 0 < LeftWirst_x[counter] < 160
                    and RightWirst_y[counter] > 380):
                selected_direction = 1
                print('<-------BACKWARD', status)
            elif (380 < RightWirst_y[counter] < 480 and 480 < RightWirst_x[counter] < 640
                  and LeftWirst_y[counter] > 380):
                selected_direction = 2
                print('FORWARD-------->', status)
            elif (((LeftWirst_y[counter] > 380.0 and RightWirst_y[counter] > 380.0) or (
                    LeftWirst_y[counter] == 0.0
                    and RightWirst_y[
                        counter] == 0.0) or
                   (LeftWirst_y[counter] == 0.0 and RightWirst_y[counter] > 380.0)
                   or (LeftWirst_y[counter] > 380.0 and RightWirst_y[counter] == 0.0))
                  and (160 < LeftWirst_x[counter] < 640 and 0 < RightWirst_x[counter] < 480)
                  or (LeftWirst_x[counter] == 0.0 and RightWirst_x[counter] == 0.0)):
                speed = 0
                Stop()
                print('------STOP------', status)

        # If we just exited from stop zone, a Forward of Backward call is needed
        if status == 1 and last_status == 0:
            if selected_direction == 1:
                Backward()
            elif selected_direction == 2:
                Forward()
        # if last_status == 1 and status == 0:
        #    selected_direction = 0

        # Gestures detection
        if status == 1:
            speed = int(speed_value(RightWirst_y[counter], LeftWirst_y[counter]))
            if speed < 0:
                speed = 0
            if (min_angle_front < steeringAngle < max_angle_front and RightWirst_y[counter] < 380.0 and LeftWirst_y[
                counter] < 380.0):
                print('----FRONT----. STATUspeedMS: ', status_to_str(), '. SPEED:  ', speed, '. ANGLE: ', 0)
                sendSpeed()
            else:
                if (max_angle_front < steeringAngle < 90.0 and RightWirst_y[counter] < 380.0
                        and LeftWirst_y[counter] < 380.0):

                    print('LEFT---------. STATUS: ', status_to_str(), '. SPEED:  ', speed, '. ANGLE: ',
                          round(steeringAngle, 2))
                    sendSpeed()
                    if FLIP_STEER:
                        steeringAngle = -steeringAngle
                    Steer(FLIP_STEER)
                else:
                    if (-90.0 < steeringAngle < min_angle_front and RightWirst_y[counter] < 380.0
                            and LeftWirst_y[counter] < 380.0):

                        print('--------RIGHT. STATUS: ', status_to_str(), '. SPEED:  ', speed, '. ANGLE: ',
                              round(steeringAngle, 2))
                        sendSpeed()
                        if FLIP_STEER:
                            steeringAngle = -steeringAngle
                        Steer(FLIP_STEER)

        # Output with OpenPose skeleton
        img2 = datum.cvOutputData
        # Show line between hands
        steer_color = steer_front
        if steeringAngle < min_angle_front:
            steer_color = steer_left
        if steeringAngle > max_angle_front:
            steer_color = steer_right
        if not RightWirst_x[counter] == 0 and not LeftWirst_x[counter] == 0:
            cv2.line(img2, pt1=(int(RightWirst_x[counter]), int(RightWirst_y[counter])),
                     pt2=(int(LeftWirst_x[counter]), int(LeftWirst_y[counter])), color=steer_color, thickness=5)

        # Stop Line
        cv2.line(img2, (160, 380), (480, 380), (0, 0, 255), thickness=3)
        cv2.putText(img2, 'STOP', (260, 420), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (0, 0, 255), thickness=2)
        cv2.putText(img2, 'B', (65, 420), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (0, 255, 0), thickness=2)
        cv2.putText(img2, 'F', (545, 420), cv2.FONT_HERSHEY_TRIPLEX, 1.5, (255, 0, 0), thickness=2)
        # Show speedometer
        cv2.putText(img2, "SPD: " + str(speed), (right_UI_status_X, 30), cv2.FONT_HERSHEY_TRIPLEX, .5, (0, 0, 0),
                    thickness=2)
        # Show Speed ui
        cv2.rectangle(img2, (610, (370 - speed)), (630, 370), (0, 255, 0), thickness=-2)
        # Show steerAngle
        cv2.putText(img2, "STR: " + str(int(steeringAngle)), (right_UI_status_X, 60), cv2.FONT_HERSHEY_TRIPLEX, .5,
                    (0, 0, 0),
                    thickness=2)
        # Show Fps
        cv2.putText(img2, "FPS: " + str(round(fps, 1)), (right_UI_status_X, 90), cv2.FONT_HERSHEY_TRIPLEX, .5,
                    (0, 0, 0),
                    thickness=2)
        # Show Mode
        if status == 0:
            cv2.putText(img2, 'STOP MODE', (20, 30), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (0, 0, 255), thickness=2)
            if selected_direction == 1:
                cv2.putText(img2, 'BACKWARD', (170, 30), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (255, 0, 0), thickness=2)
            elif selected_direction == 2:
                cv2.putText(img2, 'FORWARD', (170, 30), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (255, 0, 0), thickness=2)
        elif status == 1:
            cv2.putText(img2, 'GO MODE', (20, 30), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (0, 255, 0), thickness=2)
            if selected_direction == 1:
                cv2.putText(img2, 'BACKWARD', (170, 30), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (255, 0, 0), thickness=2)
            elif selected_direction == 2:
                cv2.putText(img2, 'FORWARD', (170, 30), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (255, 0, 0), thickness=2)
        # Quitting progress bar
        if quit_count != 0:
            cv2.putText(img2, ' Quitting...', (10, 60), cv2.FONT_HERSHEY_TRIPLEX, 0.7, (0, 255, 0), thickness=2)
            cv2.rectangle(img2, (22, 70), (122, 90), (0, 255, 0), thickness=-2)
            cv2.rectangle(img2, (22 + 10 * quit_count, 70), (122, 90), (255, 255, 255), thickness=-2)

        # Backward/Forward zones
        cv2.rectangle(img2, (0, 380), (160, 480), (0, 255, 0), thickness=2)
        cv2.rectangle(img2, (480, 380), (640, 480), (255, 0, 0), thickness=2)

        img2 = imshow_img(img2, avg, alpha)

        cv2.imshow('OPtoROBO', img2)

        if speed >= 370:
            c = 0

        counter = counter + 1
        last_status = status

        # Quit gesture
        try:
            if (380 < LeftWirst_y[counter] < 480 and 0 < LeftWirst_x[counter] < 160
                    and 380 < RightWirst_y[counter] < 480 and 480 < RightWirst_x[counter] < 640):
                cv2.rectangle(img, (160, 400), (480, 420), (0, 255, 0), thickness=2)
                quit_count = quit_count + 1
                if quit_count > 10:
                    break
            else:
                quit_count = 0
        except Exception as e1:
            print(e1)
            c = 0

        # q, Q == quit, STOP SCRIPT; NB: waitKey MUST BE 1
        key = cv2.waitKey(1)
        if key == ord('q') or key == ord('Q'):
            break

    end = time.time()
    # Resources release
    stream.release()
    cv2.destroyAllWindows()
    total_time = end - start

    # Time and fps
    print(
        '-----------------------------------------------------------------------------------------------------------')
    print('Total script execution time : ', total_time)
    print('FPS: ', counter / total_time)
    print(
        '-----------------------------------------------------------------------------------------------------------')
コード例 #10
0
    def pose_estimation(self):
        # load openpose python api
        if self.arg.openpose is not None:
            # sys.path.append('{}/python'.format(self.arg.openpose))
            # sys.path.append('{}/build/python'.format(self.arg.openpose))
            # print(self.arg.openpose + '/build/python/openpose/Release')
            sys.path.append(self.arg.openpose +
                            '/build/python/openpose/Release')
            # sys.path.append(self.arg.openpose + '\\build\\x64\\Release')
            # sys.path.append(self.arg.openpose + '\\build\\bin')

            os.environ['PATH'] = os.environ[
                'PATH'] + ';' + self.arg.openpose + '/build/x64/Release;' + self.arg.openpose + '/build/bin;'
            print(sys.path)
        try:
            # from openpose import pyopenpose as op
            import pyopenpose as op
        except:
            print('Can not find Openpose Python API.')
            return

        video_name = self.arg.video.split('/')[-1].split('.')[0]

        # initiate
        opWrapper = op.WrapperPython()
        params = dict(model_folder='./models', model_pose='COCO')
        opWrapper.configure(params)
        opWrapper.start()
        self.model.eval()
        video_capture = cv2.VideoCapture(self.arg.video)
        video_length = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
        pose_tracker = naive_pose_tracker(data_frame=video_length)

        # pose estimation
        start_time = time.time()
        frame_index = 0
        video = list()
        pose_list = []
        pbar = tqdm.tqdm(total=video_length)
        while (True):

            # get image
            ret, orig_image = video_capture.read()
            if orig_image is None:
                break
            source_H, source_W, _ = orig_image.shape
            orig_image = cv2.resize(orig_image,
                                    (self.arg.shortedge, self.arg.shortedge *
                                     source_H // source_W))
            H, W, _ = orig_image.shape
            video.append(orig_image)

            # pose estimation
            datum = op.Datum()
            datum.cvInputData = orig_image
            opWrapper.emplaceAndPop(op.VectorDatum([datum]))
            multi_pose = datum.poseKeypoints  # (num_person, num_joint, 3)
            if len(multi_pose.shape) != 3:
                continue
            my_pose = multi_pose.copy()
            my_pose = self.__get_prior_person(my_pose)
            pose_list.append(my_pose)
            # normalization
            multi_pose[:, :, 0] = multi_pose[:, :, 0] / W
            multi_pose[:, :, 1] = multi_pose[:, :, 1] / H
            multi_pose[:, :, 0:2] = multi_pose[:, :, 0:2] - 0.5
            multi_pose[:, :, 0][multi_pose[:, :, 2] == 0] = 0
            multi_pose[:, :, 1][multi_pose[:, :, 2] == 0] = 0

            # pose tracking
            pose_tracker.update(multi_pose, frame_index)
            frame_index += 1
            pbar.update(1)
            # print('Pose estimation ({}/{}).'.format(frame_index, video_length))

        data_numpy = pose_tracker.get_skeleton_sequence()
        pbar.close()
        # np.save('stgcn_data_numpy_original.npy', data_numpy)
        return video, data_numpy, pose_list
コード例 #11
0
ファイル: findPose.py プロジェクト: nlpark/justdance
def findPose(videoPath, resultsPath):
    try:
        # Import Openpose (Windows/Ubuntu/OSX)
        dir_path = os.path.dirname(os.path.realpath(__file__))
        try:
            # Windows Import
            if platform == "win32":
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append(dir_path + '/../../python/openpose/Release');
                os.environ['PATH']  = os.environ['PATH'] + ';' + dir_path + '/../../x64/Release;' +  dir_path + '/../../bin;'
                import pyopenpose as op
            else:
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('../../python');
                # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
                # sys.path.append('/usr/local/python')
                from openpose import pyopenpose as op
        except ImportError as e:
            print('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
            raise e

        # Flags
        parser = argparse.ArgumentParser()
        # parser.add_argument("--image_path", default="../../../examples/media/COCO_val2014_000000000428.jpg", help="Process an image. Read all standard formats (jpg, png, bmp, etc.).")
        parser.add_argument("--video", default=videoPath)
        # parser.add_argument("--number_people_max", default = "2")
        # parser.add_argument("--write_video", default= "testResults.avi")
        args = parser.parse_known_args()

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()
        params["model_folder"] = "../../../models/"
        params["number_people_max"] = 2
        params["write_images"] = resultsPath
        # params["write_video"] = "testResults.avi"

        # Add others in path?
        for i in range(0, len(args[1])):
            curr_item = args[1][i]
            if i != len(args[1])-1: next_item = args[1][i+1]
            else: next_item = "1"
            if "--" in curr_item and "--" in next_item:
                key = curr_item.replace('-','')
                if key not in params:  params[key] = "1"
            elif "--" in curr_item and "--" not in next_item:
                key = curr_item.replace('-','')
                if key not in params: params[key] = next_item

        print("args", args)
        print("params",params)

        #Construct it from system arguments
        # op.init_argv(args[1])
        # oppython = op.OpenposePython()

        #Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Video
        datum = op.Datum()
        capture  = cv2.VideoCapture(args[0].video)
        fps = capture.get(cv2.CAP_PROP_FPS)
        
        frames = []

        print('fps: ', fps)
        i = 0
        count = 0

        while(capture.isOpened()):
            ret, frame = capture.read()
            if ret == False:
                break
            datum.cvInputData = frame
            opWrapper.emplaceAndPop([datum])

            keypoints = datum.poseKeypoints
            frame = buildFrame25(keypoints, i)
            frames.append(frame)

            # print("Body keypoints: \n" + str(keypoints))
            
            cv2.imwrite(resultsPath +'/video'+str(i)+'.jpg', datum.cvOutputData)
            # cv2.waitKey(20)
            # cv2.imwrite('video'+str(i)+'.jpg',frame)
            i+=1
            count+=int(fps)
            capture.set(1, count)

            # if count >= 100:
            #     break
    
        capture.release()

        return frames

    except Exception as e:
        print(e)
        sys.exit(-1)
コード例 #12
0
def main():
    try:
        # Import Openpose (Windows/Ubuntu/OSX)
        dir_path = os.path.dirname(os.path.realpath(__file__))
        try:
            # Windows Import
            if platform == "win32":
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append(dir_path + '/../../python/openpose/Release')
                os.environ['PATH'] = os.environ[
                    'PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'
                import pyopenpose as op
                # from openpose import *
            else:
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('../../python')
                # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
                # sys.path.append('/usr/local/python')
                from openpose import pyopenpose as op
                # from openpose import *
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()
        params["model_folder"] = "../../../models/"
        params["hand"] = True
        params["hand_detector"] = 2
        params["body"] = 1
        params["net_resolution"] = '320x192'  #20*11
        # params["face"] = True
        # params["disable_blending"] = True
        # params["fps_max"] = 5
        handRectangles = [[
            op.Rectangle(128, 0, 1024, 1024),
            op.Rectangle(0., 0., 0., 0.)
        ]]
        # device = 'cuda' if torch.cuda.is_available() else 'cpu'
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image
        datum = op.Datum()
        datum.handRectangles = handRectangles
        cam = cv2.VideoCapture(0)  # modify here for camera number
        cam.set(3, 1280)
        cam.set(4, 1024)
        pair_poseKeypoints = [[], []]
        input_hands = []
        prev_state = None
        msg_state = ('not_sent', time.perf_counter())
        while (cv2.waitKey(1) != 27):
            if msg_state[0] == 'sent':
                if time.perf_counter() - msg_state[1] > 2.5:
                    msg_state = ('not_sent', time.perf_counter())

            ret, frame = cam.read()
            datum.cvInputData = frame
            opWrapper.emplaceAndPop([datum])
            frame = datum.cvOutputData
            '''If Person not in Camera'''
            if datum.poseKeypoints.shape == ():
                # conn.request("POST", "/v2/chat/users/[email protected]/messages", payload, headers)
                #
                # res = conn.getresponse()
                # data = res.read()
                #
                # print(data.decode("utf-8"))
                if msg_state[0] == 'not_sent':
                    # print('WHY NOT WORKING')
                    conn.request(
                        "POST",
                        "/v2/chat/users/[email protected]/messages",
                        payload_absent, headers)
                    #
                    res = conn.getresponse()
                    data = res.read()

                    print(data.decode("utf-8"))
                    msg_state = ('sent', time.perf_counter())
                font = cv2.FONT_HERSHEY_SIMPLEX
                fontScale = 3
                fontColor = (255, 255, 0)
                lineType = 2
                fontThickness = 2
                msg_on_screen = 'ABSENT!'
                textsize = cv2.getTextSize(msg_on_screen, font, fontScale,
                                           fontThickness)[0]
                bottomLeftCornerOfText = ((1280 - textsize[0]) // 2,
                                          (1024 + textsize[1]) // 2)

                cv2.rectangle(frame, (0, 0), (1280, 1024), (0, 0, 255), 20)
                cv2.putText(frame, 'Absent!', bottomLeftCornerOfText, font,
                            fontScale, fontColor, lineType)
                cv2.imshow("Openpose 1.4.0 Webcam", frame)
                continue
            '''Evaluate Movement & Confidence'''
            del pair_poseKeypoints[0]
            pair_poseKeypoints.append(datum.poseKeypoints[0])
            body_confidence_avg = avg_pose_confidence(datum.poseKeypoints[0])
            # print(body_confidence_avg)
            moved = metric(pair_poseKeypoints)
            '''Evaluate Hand Gesture'''
            if len(input_hands) == 12:
                del input_hands[0]
            input_hands.append(datum.handKeypoints[0][0])
            # print(len(input_hands))
            prob, gesture = None, None
            hand_confidence_avg = avg_list_confidence(input_hands)
            # if len(input_hands) == 12 and avg >= 0.1:
            if len(input_hands) == 12:
                # print('Confidence : ', hand_confidence_avg)
                prob, gesture = get_hand_gesture('normalizev2.pt', input_hands,
                                                 'cuda')
            # print(prob, gesture)
            '''Output Recognition Results'''
            print_msg = False
            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = None
            fontColor = None
            fontScale = 3
            fontThickness = 2
            msg_on_screen = None

            if valid_hand(hand_confidence_avg, gesture) and gesture == 1:
                print('THUMBS DOWN PROB : ', prob)
                if prob > 11:
                    '''Counter'''

                    if prev_state is None:
                        prev_state = ('thumbs_down', time.perf_counter())
                        # print(prev_state)

                    elif prev_state[0] == 'rest':
                        if time.perf_counter() - prev_state[1] > 5.5:
                            prev_state = ('thumbs_down', time.perf_counter())
                            # print(prev_state)

                    elif prev_state[0] != 'thumbs_down':
                        prev_state = ('thumbs_down', time.perf_counter())
                        # print(prev_state)

                    else:
                        # print(time.perf_counter() - prev_state[1])
                        if msg_state[0] == 'not_sent':

                            conn.request(
                                "POST",
                                "/v2/chat/users/[email protected]/messages",
                                payload_thumbsdown, headers)
                            #
                            res = conn.getresponse()
                            data = res.read()

                            print(data.decode("utf-8"))
                            msg_state = ('sent', time.perf_counter())
                        if time.perf_counter() - prev_state[1] > 0.5:
                            print_msg = True
                            # bottomLeftCornerOfText = (450, 500)
                            fontColor = (255, 0, 0)
                            fontScale = 3
                            msg_on_screen = 'THUMBS DOWN'
                            textsize = cv2.getTextSize(msg_on_screen, font,
                                                       fontScale,
                                                       fontThickness)[0]
                            bottomLeftCornerOfText = ((1280 - textsize[0]) //
                                                      2,
                                                      (1024 + textsize[1]) //
                                                      2)
                        # if time.perf_counter() - prev_state[1] > 3.5:
                        #     prev_state = ('rest', time.perf_counter())

            elif valid_hand(hand_confidence_avg, gesture) and gesture == 2:
                print('THUMBS DOWN PROB : ', prob)
                '''Counter'''
                if prob > 11:
                    if prev_state is None:
                        prev_state = ('thumbs up', time.perf_counter())
                        # print(prev_state)

                    elif prev_state[0] == 'rest':
                        if time.perf_counter() - prev_state[1] > 5.5:
                            prev_state = ('thumbs_up', time.perf_counter())
                            # print(prev_state)

                    elif prev_state[0] != 'thumbs_up':
                        prev_state = ('thumbs_up', time.perf_counter())
                        # print(prev_state)

                    else:
                        # print(time.perf_counter() - prev_state[1])
                        if msg_state[0] == 'not_sent':

                            conn.request(
                                "POST",
                                "/v2/chat/users/[email protected]/messages",
                                payload_thumbsup, headers)
                            #
                            res = conn.getresponse()
                            data = res.read()

                            print(data.decode("utf-8"))
                            msg_state = ('sent', time.perf_counter())
                        if time.perf_counter() - prev_state[1] > 0.5:

                            print_msg = True
                            # bottomLeftCornerOfText = (450, 500)
                            fontColor = (0, 255, 0)
                            fontScale = 3
                            msg_on_screen = 'THUMBS UP'
                            textsize = cv2.getTextSize(msg_on_screen, font,
                                                       fontScale,
                                                       fontThickness)[0]
                            bottomLeftCornerOfText = ((1280 - textsize[0]) //
                                                      2,
                                                      (1024 + textsize[1]) //
                                                      2)
                        # if time.perf_counter() - prev_state[1] > 3.5:
                        #     prev_state = ('rest', time.perf_counter())

            elif valid_hand(hand_confidence_avg, gesture) and gesture == 4:
                print('RAISE HAND PROB : ', prob)
                '''Counter'''
                if prev_state is None:
                    prev_state = ('raise_hand', time.perf_counter())
                    # print(prev_state)

                elif prev_state[0] == 'rest':
                    if time.perf_counter() - prev_state[1] > 5.5:
                        prev_state = ('raise_hand', time.perf_counter())
                        # print(prev_state)

                elif prev_state[0] != 'raise_hand':
                    prev_state = ('raise_hand', time.perf_counter())
                    # print(prev_state)

                else:
                    # print(time.perf_counter() - prev_state[1])
                    if msg_state[0] == 'not_sent':

                        conn.request(
                            "POST",
                            "/v2/chat/users/[email protected]/messages",
                            payload_raisehand, headers)
                        #
                        res = conn.getresponse()
                        data = res.read()

                        print(data.decode("utf-8"))
                        msg_state = ('sent', time.perf_counter())
                    if time.perf_counter() - prev_state[1] > 0.5:
                        print_msg = True
                        bottomLeftCornerOfText = (450, 500)
                        fontColor = (0, 255, 255)
                        fontScale = 3
                        msg_on_screen = 'HAND RAISED'
                        textsize = cv2.getTextSize(msg_on_screen, font,
                                                   fontScale, fontThickness)[0]
                        bottomLeftCornerOfText = ((1280 - textsize[0]) // 2,
                                                  (1024 + textsize[1]) // 2)
                    # if time.perf_counter() - prev_state[1] > 3.5:
                    #     prev_state = ('rest', time.perf_counter())

            elif moved:
                '''Counter'''
                if prev_state is None:
                    prev_state = ('detect_move', time.perf_counter())
                    # print(prev_state)

                elif prev_state[0] == 'rest':
                    if time.perf_counter() - prev_state[1] > 1.5:
                        prev_state = ('detect_move', time.perf_counter())
                        # print(prev_state)

                elif prev_state[0] != 'detect_move':
                    prev_state = ('detect_move', time.perf_counter())
                    # print(prev_state)

                else:
                    # print(msg_state)
                    if msg_state[0] == 'not_sent':

                        conn.request(
                            "POST",
                            "/v2/chat/users/[email protected]/messages",
                            payload_movement, headers)
                        #
                        res = conn.getresponse()
                        data = res.read()

                        print(data.decode("utf-8"))
                        msg_state = ('sent', time.perf_counter())

                    print_msg = True
                    bottomLeftCornerOfText = (150, 500)
                    fontColor = (255, 255, 255)
                    fontScale = 3
                    msg_on_screen = 'MOVEMENT DETECTED'
                    textsize = cv2.getTextSize(msg_on_screen, font, fontScale,
                                               fontThickness)[0]
                    # print(textsize)
                    bottomLeftCornerOfText = ((1280 - textsize[0]) // 2,
                                              (1024 + textsize[1]) // 2)
                    # if time.perf_counter() - prev_state[1] > 3.5:
                    #     prev_state = ('rest', time.perf_counter())

            if print_msg:
                # print(data.decode("utf-8"))
                # bottomLeftCornerOfText = (550, 500)
                # fontScale = 2
                # fontColor = (255, 0, 0)
                lineType = 2
                cv2.rectangle(frame, (0, 0), (1280, 1024), fontColor, 40)
                cv2.putText(frame, msg_on_screen, bottomLeftCornerOfText, font,
                            fontScale, fontColor, lineType)
            cv2.imshow("Openpose 1.4.0 Webcam", frame)  # datum.cvOutputData
        # Always clean up
        cam.release()
        cv2.destroyAllWindows()

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        print(exc_type, fname, exc_tb.tb_lineno)
        print(e)
        sys.exit(-1)
コード例 #13
0
def run_video(video,
              path='',
              skip_cog=False,
              skip_track=False,
              plt_graph=False,
              cog_color='black',
              start_frame=0,
              debug=False):
    try:
        logging.shutdown()
        reload(logging)
    except Exception as e:
        raise e
    logger = getLogger("APP_LOG")
    stream_handler = StreamHandler()
    if debug:
        logger.setLevel(logging.DEBUG)
        stream_handler.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)
        stream_handler.setLevel(logging.INFO)
    handler_format = Formatter('%(name)s, %(levelname)s:\t%(message)s')
    stream_handler.setFormatter(handler_format)
    logger.addHandler(stream_handler)

    # setting directories to output
    path_movie_out = os.path.join(path, 'movies_estimated')
    path_csv_estimated = os.path.join(path, 'data_estimated')
    path_png_estimated = os.path.join(path, 'png_estimated')
    csv_file = os.path.join(path_csv_estimated, video.rsplit('.')[0] + '.csv')
    os.makedirs(path_movie_out, exist_ok=True)
    os.makedirs(path_png_estimated, exist_ok=True)
    os.makedirs(path_csv_estimated, exist_ok=True)

    # Custom Params (refer to include/openpose/flags.hpp for more parameters)
    params = dict()
    params["model_folder"] = "../../../models/"

    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    # open video
    cap = cv2.VideoCapture(os.path.join(path, 'movies', video))
    logger.info("OPEN: %s" % video)
    if cap.isOpened() is False:
        logger.info("ERROR: opening video stream or file")
    caps_fps = cap.get(cv2.CAP_PROP_FPS)
    ma = MotionAnalysis()
    # CSV FILE SETTING
    segments = [
        "Nose", "Neck", "RShoulder", "RElbow", "RWrist", "LShoulder", "LElbow",
        "LWrist", "MidHip", "RHip", "RKnee", "RAnkle", "LHip", "LKnee",
        "LAnkle", "background", "REye", "LEye", "REar", "LEar", "LBigToe",
        "LSmallToe", "LHeel", "RBigToe", "RSmallToe", "RHeel", "head_cog",
        "torso_cog", "r_thigh_cog", "l_thigh_cog", "r_leg_cog", "l_leg_cog",
        "r_foot_cog", "l_foot_cog", "r_arm_cog", "l_arm_cog", "r_forearm_cog",
        "l_forearm_cog", "r_hand_cog", "l_hand_cog"
    ]
    seg_columns = ['frame', 'human_id']
    [seg_columns.extend([x + '_x', x + '_y', x + '_score']) for x in segments]
    df_template = pd.DataFrame(columns=seg_columns)
    df_template.to_csv(csv_file, index=False)

    # processing video
    frame_no = 0
    while cap.isOpened():
        ret_val, image = cap.read()
        if not ret_val:
            break
        if frame_no == 0:
            h_pxl, w_pxl = image.shape[0], image.shape[1]

        # estimate pose
        t = time.time()
        datum = op.Datum()
        datum.cvInputData = image
        opWrapper.emplaceAndPop([datum])
        time_estimation = time.time() - t
        # keypoints
        humans = datum.poseKeypoints

        # calculate and save data
        t = time.time()
        ma.track_humans(frame_no, humans)
        # calculate cog
        bodies_cog = ma.bodies_cog
        # bodies_cog[np.isnan(bodies_cog[:, :, :])] = 0  # to plot, fill nan
        df_frame = pd.DataFrame(ma.humans_current.round(1))
        df_frame.to_csv(csv_file, index=False, header=None, mode='a')
        time_cog = time.time() - t

        # logging to check progress and speed
        if frame_no % int(caps_fps) == 0:
            logger.info('calculation of cog in %.4f seconds.' % time_cog)
            logger.info("Now estimating at:" + str(int(frame_no / caps_fps)) +
                        "[sec]")
            logger.info('inference in %.4f seconds.' % time_estimation)
            logger.debug('shape of image: ' + str(image.shape))
            logger.debug(str(humans))
            logger.info('shape of humans: ' + str(humans.shape))
        if frame_no % int(caps_fps) == 0:
            # for resetting number of graph.
            hum_count = len(ma.humans_id)

        # PLOT Pictures for movie
        img = datum.cvOutputData

        # plot cog & foot lines
        if len(humans.shape) != 0:
            cog_size = (calc_torso_length(humans) / 8).astype(int)
        # if not skip_cog:
        #     for i in range(len(bodies_cog)):
        #         # plot center of gravity
        #         cv2.circle(img, (bodies_cog[i, 14, 0], bodies_cog[i, 14, 1]), cog_size[i, i], color=(0, 0, 0), thickness=-1)
        #         # plot foot line
        #         if bodies_cog[i, 6, 0]:
        #             dotline(img, (bodies_cog[i, 6, 0], 0), (bodies_cog[i, 6, 0], h_pxl), color=(10, 10, 10), thickness=2)
        #         if bodies_cog[i, 7, 0]:
        #             dotline(img, (bodies_cog[i, 7, 0], 0), (bodies_cog[i, 7, 0], h_pxl), color=(10, 10, 10), thickness=2)

        for i, hum in enumerate(np.sort(ma.humans_id)):
            hum_track = ma.humans_tracklet[ma.humans_tracklet[:, 1] == hum]
            hum_track = hum_track.astype(int)
            # plot cog & foot lines
            if not skip_cog:
                cv2.circle(img, (bodies_cog[i, 14, 0], bodies_cog[i, 14, 1]),
                           cog_size[i, i],
                           color=(0, 0, 0),
                           thickness=-1)
                # plot foot line
                if bodies_cog[i, 6, 0]:
                    dotline(img, (bodies_cog[i, 6, 0], 0),
                            (bodies_cog[i, 6, 0], h_pxl),
                            color=(10, 10, 10),
                            thickness=2)
                if bodies_cog[i, 7, 0]:
                    dotline(img, (bodies_cog[i, 7, 0], 0),
                            (bodies_cog[i, 7, 0], h_pxl),
                            color=(10, 10, 10),
                            thickness=2)
                # trajectories of COG
                trajectories = np.array([(pos[39 * 3 + 2], pos[39 * 3 + 3])
                                         for pos in hum_track
                                         if pos[4 * 3 + 2] > 1])
                cv2.polylines(img, [trajectories], False, (0, 0, 0), 3,
                              cv2.LINE_4)

            # plot hands trajectories
            if not skip_track:
                trajectories = np.array([(pos[4 * 3 + 2], pos[4 * 3 + 3])
                                         for pos in hum_track
                                         if pos[4 * 3 + 2] > 1])
                cv2.polylines(img, [trajectories], False,
                              (200, 200, int(hum % 3) * 30), 3, cv2.LINE_4)
                # polydotline(img, [trajectories], (200, 200, int(i%3)*30))
                trajectories = np.array([(pos[7 * 3 + 2], pos[7 * 3 + 3])
                                         for pos in hum_track
                                         if pos[7 * 3 + 2] > 1])
                cv2.polylines(img, [trajectories], False,
                              (int(hum % 3) * 30, 200, int(hum % 3) * 30), 3,
                              cv2.LINE_4)
                # polydotline(img, [trajectories], (200, 200, int(i%3)*30))

        # plt graph of cog rate
        if not plt_graph:
            # save figure
            cv2.imwrite(
                os.path.join(
                    path_png_estimated,
                    video.split('.')[-2] + '{:06d}'.format(frame_no) + ".png"),
                img)
        else:
            graph_row = 6  # if hum_count > 6 else 4
            graph_col = graph_row + 2 + int(
                (hum_count - graph_row if hum_count - graph_row > 0 else 0) /
                graph_row)
            fig = plt.figure(figsize=(16, 8))
            grid_size = (graph_row, graph_col)
            ax_img = plt.subplot2grid(grid_size, (0, 0),
                                      rowspan=graph_row,
                                      colspan=graph_row)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            ax_img.imshow(img)
            if len(ma.humans_id):
                hum_idx = ~np.isnan(ma.humans_current[:, 19 * 3 + 2])
                ax_img.set_xticks(ma.humans_current[hum_idx, 19 * 3 + 2])
                ax_img.set_xticklabels(
                    list((ma.humans_id[hum_idx]).astype(str)))
            # hum_c = ma.humans_current
            for i, hum in enumerate(np.sort(ma.humans_id)):
                if i == (graph_row * (graph_col - graph_row)):
                    break  # count of humans is over the capacity
                hum_track = ma.humans_tracklet[ma.humans_tracklet[:, 1] == hum]
                hum_track = hum_track.astype(int)
                ax_graph = plt.subplot2grid(
                    grid_size, (i - graph_row * int(i / graph_row),
                                graph_row + int(i / graph_row)))
                hum_track = hum_track[~np.isnan(hum_track[:, 19 * 3 + 2])]
                hum_track = hum_track[~np.isnan(hum_track[:, 22 * 3 + 2])]
                if hum_track.shape[0] > 0:
                    foot = (hum_track[:, 39 * 3 + 2] - hum_track[:, 19 * 3 + 2]
                            ) / (hum_track[:, 22 * 3 + 2] -
                                 hum_track[:, 19 * 3 + 2])
                    line1 = ax_graph.plot(hum_track[:, 0], foot)
                    ax_graph.set_ylim([-0.2, 1.2])
                    ax_graph.set_xlim([hum_track[0, 0], hum_track[0, 0] + 30])
                    ax_graph.legend([str(hum)])
            plt.savefig(
                os.path.join(
                    path_png_estimated,
                    video.split('.')[-2] + '{:06d}'.format(frame_no) + ".png"))
            plt.close()
            plt.clf()
        # before increment, renew some args
        frame_no += 1
        gc.collect()
        if cv2.waitKey(1) == 27:
            break
    cv2.destroyAllWindows()

    logger.info("finish estimation & start encoding")
    cmd = [
        "ffmpeg", "-r",
        str(caps_fps), "-start_number",
        str(start_frame), "-i",
        os.path.join(path_png_estimated,
                     video.split('.')[-2] + "%06d.png"), "-vcodec", "libx264",
        "-pix_fmt", "yuv420p",
        os.path.join(path_movie_out,
                     video.split('.')[-2] + ".mp4")
    ]
    subprocess.call(cmd)
    logger.debug('finished+')
コード例 #14
0
def main():
    #reset the joint.npy file at the beginning of time
    reset_realtime_skeleton()

    #saving the models
    p = Path('/Users/kefei/Documents/real_time_skeletons/joints.npy')

    # Import Openpose (Windows/Ubuntu/OSX)
    dir_path = os.path.dirname(os.path.realpath(__file__))
    try:
        # Windows Import
        if platform == "win32":
            # Change these variables to point to the correct folder (Release/x64 etc.)
            sys.path.append(dir_path + '/../../python/openpose/Release')
            os.environ['PATH'] = os.environ[
                'PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'
            import pyopenpose as op
        else:
            # Change these variables to point to the correct folder (Release/x64 etc.)
            sys.path.append('../../python')
            # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
            # sys.path.append('/usr/local/python')
            from openpose import pyopenpose as op
    except ImportError as e:
        print(
            'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
        )
        raise e

    # Flags
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--image_path",
        default="../../../examples/media/COCO_val2014_000000000192.jpg",
        help=
        "Process an image. Read all standard formats (jpg, png, bmp, etc.).")
    args = parser.parse_known_args()

    # Custom Params (refer to include/openpose/flags.hpp for more parameters)
    params = dict()
    params["model_folder"] = "../../../models/"

    # Add others in path?
    for i in range(0, len(args[1])):
        curr_item = args[1][i]
        if i != len(args[1]) - 1: next_item = args[1][i + 1]
        else: next_item = "1"
        if "--" in curr_item and "--" in next_item:
            key = curr_item.replace('-', '')
            if key not in params: params[key] = "1"
        elif "--" in curr_item and "--" not in next_item:
            key = curr_item.replace('-', '')
            if key not in params: params[key] = next_item

    try:
        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        #Capture video from webcam
        vid_capture = cv2.VideoCapture(0)
        vid_cod = cv2.VideoWriter_fourcc(*'XVID')
        joints = []
        #output = cv2.VideoWriter("videos/cam_video.mp4", vid_cod, 20.0, (640,480))

        t = 0
        s = 0

        while (True):
            # Capture each frame of webcam video
            ret, frame = vid_capture.read()

            # Process Image
            datum = op.Datum()
            #imageToProcess = cv2.imread(args[0].image_path)
            datum.cvInputData = frame
            opWrapper.emplaceAndPop([datum])

            cv2.imshow("My cam video", datum.cvOutputData)  # it was: frame

            keypoints = datum.poseKeypoints  #(1, 25, 3)

            #only save coordinates of one person. only save when detected
            #Maybe set an upper limit, like 200 frames in storage?
            print(keypoints.shape)
            if t % 2 == 0:
                if keypoints.shape == (1, 25, 3):
                    with p.open('ab') as f:
                        s += 1
                        joints.append(keypoints)
                        np.save(f, keypoints)
                        if t > 200:
                            startTime = datetime.now()
                            joints.pop(
                                0
                            )  #remove the first observation. then save the entire 200-frame
                            np.save(
                                '/Users/kefei/Documents/real_time_skeletons/joints.npy',
                                np.asarray(joints))
                            print(datetime.now() - startTime)

                elif keypoints.shape[0] > 1:
                    if keypoints[0, :, :].shape == (25, 3):
                        first_person_points = keypoints[None, :, :]
                        print(first_person_joints.shape)
                        with p.open('ab') as f:
                            s += 1
                            joints.append(first_person_points)
                            np.save(f, first_person_points
                                    )  #save the first person it sees.
                            if t > 200:
                                joints.pop(0)
                                np.save(
                                    '/Users/kefei/Documents/real_time_skeletons/joints.npy',
                                    np.asarray(joints))
            t += 1

            #output.write(frame)
            # Close and break the loop after pressing "x" key
            k = cv2.waitKey(1)

            if t > 200:
                print('exceeds 200 frames')
            if k == ord('x'):
                break
            elif k == ord('q'):
                print('t=', t)
                break
        # close the already opened camera
        print('t=', t)
        print('s=', s)
        #np.save('/Users/kefei/Documents/joints.npy',joints)
        vid_capture.release()

        # close the already opened file
        #output.release()
        # close the window and de-allocate any associated memory usage
        cv2.destroyAllWindows()

        # Display Image
        #print("Body keypoints: \n" + str(datum.poseKeypoints))
        #cv2.imshow("OpenPose 1.5.0 - Tutorial Python API", datum.cvOutputData)
        #cv2.waitKey(0)
    except Exception as e:
        # print(e)
        sys.exit(-1)
コード例 #15
0
def openpose_demo(canvas):
    # Custom Params (refer to include/openpose/flags.hpp for more parameters)
    params = dict()
    params["model_folder"] = "../../../models/"
    params["hand"] = True

    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    kinect_ir = PyKinectRuntime.PyKinectRuntime(
        PyKinectV2.FrameSourceTypes_Infrared
        | PyKinectV2.FrameSourceTypes_Depth)
    kinect_color = PyKinectRuntime.PyKinectRuntime(
        PyKinectV2.FrameSourceTypes_Color | PyKinectV2.FrameSourceTypes_Body
        | PyKinectV2.FrameSourceTypes_Depth)
    depth_width, depth_height = kinect_ir.depth_frame_desc.Width, kinect_ir.depth_frame_desc.Height

    before_form = []
    current_form = []
    counter = 0
    global rootFlag
    while (True):

        frame_ir = kinect_ir.get_last_infrared_frame()
        frame_ir = np.uint8(frame_ir.clip(1, 4080) / 16.)
        frame_ir = np.reshape(frame_ir,
                              (depth_height, depth_width)).astype(np.uint8)
        ret, thresh1 = cv2.threshold(frame_ir, 130, 255, cv2.THRESH_BINARY)
        thresh1 = correction(thresh1)
        thresh1 = cv2.cvtColor(thresh1, cv2.COLOR_GRAY2RGB)
        M = np.float32([[1, 0, 7], [0, 1, 0]])
        kernel = np.ones((5, 5), np.uint8)
        thresh1 = cv2.dilate(thresh1, kernel, iterations=1)
        thresh1 = cv2.warpAffine(thresh1, M, (512, 424))

        frame_color = kinect_color.get_last_color_frame()
        frame_color = frame_color.reshape((1080, 1920, -1)).astype(np.uint8)
        scale = max(512 / frame_color.shape[1], 424 / frame_color.shape[0])
        frame_color = cv2.resize(frame_color, dsize=None, fx=scale, fy=scale)
        frame_color = frame_color[:, 121:633]
        frame_color = cv2.cvtColor(frame_color, cv2.COLOR_RGBA2RGB)

        frame = cv2.bitwise_and(frame_color, thresh1)

        frame_ir = None
        frame_color = None

        # Process Image
        datum = op.Datum()
        imageToProcess = frame
        datum.cvInputData = imageToProcess

        opWrapper.emplaceAndPop([datum])

        cv2.namedWindow("OpenPose 1.5.0 - Tutorial Python API",
                        cv2.WINDOW_KEEPRATIO
                        | cv2.WINDOW_NORMAL)  #ウィンドウサイズを可変に
        cv2.imshow("OpenPose 1.5.0 - Tutorial Python API", datum.cvOutputData)

        #print(datum.handKeypoints[1][0])
        try:
            right_hand, flag = hm.is_hand_recog(datum.handKeypoints[1][0])
        except Exception as e:
            continue
        #カメラの指定によっては「IndexError: too many indices for array」というエラーが出る

        if flag == True:
            current_form = hm.check_handform2(right_hand)
            print(hm.list_to_num(current_form))
            #print(current_form,counter)
            if current_form == before_form:  #1フレーム前の形と現在の形を比較する
                counter = counter + 1  #同じだったらカウントを1増やす
                if (counter == 2):  #カウントが10になったら(10回連続して同じ形を認識したら)
                    canvas.delete("all")
                    n = hm.list_to_num(current_form)  #手の形から番号を決定
                    try:
                        canvas.create_text(300, 300, text=n, font=('', 350))
                    except Exception as e:
                        break
            else:
                counter = 0  #違ったらカウントをリセットする
            before_form = current_form
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        if rootFlag == False:
            break
    cap.release()
    cv2.destroyAllWindows()
コード例 #16
0
 def __init__(self, mode, with_sound):
     self.cam = cv2.VideoCapture(0)  # modify here for camera number
     self.datum_hand = op.Datum()
     self.hand_opWrapper = self.get_opWrapper_hand()
     self.mode = mode
     self.with_sound = with_sound
コード例 #17
0
    def run(self, frame):
        height = []
        width = []
        center = []

        # Process Image
        datum = op.Datum()
        datum.cvInputData = frame
        self.opWrapper.emplaceAndPop([datum])
        img_rd = datum.cvOutputData

        # fall judge
        try:
            # key points have been identified
            x = datum.poseKeypoints[0][:, 0]
            y = datum.poseKeypoints[0][:, 1]

            width.append(np.max(x[np.nonzero(x)]) - np.min(x[np.nonzero(x)]))
            height.append(np.max(y[np.nonzero(y)]) - np.min(y[np.nonzero(y)]))

            center.append(np.mean(x[np.nonzero(x)]))
            center.append(np.mean(y[np.nonzero(y)]))

            if self.frame_start_time == 0:
                self.center0 = center.copy()
                self.width0 = width.copy()
                self.height0 = height.copy()
                self.frame_start_time = time.time()
            else:
                diff = np.array(
                    [center[0] - self.center0[0], center[1] - self.center0[1]])
                dist = math.sqrt(np.sum((diff * 10**(-4))**2))
                now = time.time()
                v = dist / (now - self.frame_start_time)
                a = (v**2 - self.v0**2) / (2 * dist)

                # print(v, abs(a))
                if (abs(a) > 0.2) and \
                        (np.subtract(np.array(width), np.array(height)) > np.subtract(np.array(self.width0), np.array(
                            self.height0)) and np.subtract(np.array(width), np.array(height)) > 0):
                    self.couter += 1
                    # print("alarm by v and a")
                elif (width > height and (x[8] != 0 or x[9] != 0 or x[12] != 0)
                      and v < 1):
                    self.couter += 1
                    # print("alarm by w and h")
                else:
                    if self.error == 0:
                        self.error += 1
                    else:
                        self.couter = 0
                        self.error = 0

                if self.couter > 3:
                    font = ImageFont.truetype("simsun.ttc", 30, index=1)
                    img_rd = Image.fromarray(
                        cv2.cvtColor(datum.cvOutputData, cv2.COLOR_BGR2RGB))
                    draw = ImageDraw.Draw(img_rd)
                    draw.text((10, 10),
                              text="Fall Detected",
                              font=font,
                              fill=(255, 0, 0))
                    img_rd = cv2.cvtColor(np.array(img_rd), cv2.COLOR_RGB2BGR)
                    cv2.imwrite('fall_detection.jpg', frame)
                    t = threading.Thread(
                        target=post(event=3, imagePath='fall_detection.jpg'))
                    t.setDaemon(False)
                    t.start()
                    # status = post(event=3, imagePath='fall_detection.jpg')
                    # print("fall")

                # update variables
                self.frame_start_time = now
                self.v0 = v
                self.width0 = width.copy()
                self.height0 = height.copy()
            # if width > height:
            #     print("alarm")
            self.firstFrame = None
        except Exception as e:
            gray = frame_process.preprocess_frame(frame)

            if self.firstFrame is None:
                self.firstFrame = gray
                pass

            frameDelta = cv2.absdiff(self.firstFrame, gray)

            cnts = frame_process.get_contours(self.firstFrame, gray)

            defined_min_area = 3000
            frame, self.alert = algorithm_fall.fall_detect(
                cnts, defined_min_area, frame, self.prevX, self.prevY,
                self.xList, self.yList, self.centerV, self.alert)

            img_rd = frame
            # cv2.imshow("OpenPose 1.6.0 - Tutorial Python API", frame)

        frame = cv2.resize(img_rd, (640, 480))
        # cv2.imshow("OpenPose 1.6.0 - Tutorial Python API", img_rd)
        return frame


# http://zhuooyu.cn:8000/api/person/old/10
コード例 #18
0
def video_start():
    global SSF_Wait, SLL_Wait, Pose, n, db, canvas, video_flag, cap, sx, sy, cursor, fill_line, canvas3, V
    '''配置模型'''
    params = dict()  # 创建一个字典
    params["model_folder"] = model  # 修改路径
    params["model_pose"] = "BODY_25"  # 选择pose模型
    params["number_people_max"] = 2  # 最大检测人体数
    params["display"] = 2  # 2D模式

    opWrapper = op.WrapperPython()
    opWrapper.configure(params)  # 导入上述参数
    opWrapper.start()
    '''配置模型'''
    cap = cv2.VideoCapture(0)  # 打开摄像头
    Initial = 0
    video_flag = True
    week = str(strftime("%A", localtime()))
    while video_flag:  # 判断是否开启camera
        SSF_Wait = 5
        SLL_Wait = 2  # 加快video速度
        cap.open(0)
        ret, Frame = cap.read()
        H = round(0.63 * sy)
        W = round(0.906 * sy)
        Frame = cv2.resize(Frame, (W, H))  # 图像尺寸变换
        imageToProcess = cv2.cvtColor(Frame, cv2.COLOR_RGB2BGR)  # RGB转换为BGR
        BGR = imageToProcess

        '''计算关节点'''
        datum = op.Datum()
        datum.cvInputData = imageToProcess
        opWrapper.emplaceAndPop(op.VectorDatum([datum]))

        # print("Body keypoints: \n" + str(datum.poseKeypoints))
        Pose = datum.poseKeypoints  # 关键点存在的地方,若无人像,输出None
        # BGR = datum.cvOutputData  # imageToProcess 已识别后的图像
        '''计算关节点'''

        '''初值'''
        if Initial == 0:  # 赋初值
            '''SSF'''
            n = 0  # 默认user=0
            j = 0
            j1 = 0
            j2 = 0
            j3 = 0  # 4种状态
            SSF = 0  # 计数
            SSF_Angle = 270  # 实际角度
            SSF_Max = 270  # 抬手最大值
            SSF_Time = 0  # 测量间隔
            SSF_Num = 3  # 间隔数
            SSF_Hold = 0  # hold时间
            SSF_Flag = 0  # 计时阀门关闭
            SSF_start = 0  # 开始时间
            SSF_Deviation = 0  # 0.18s角度差值
            SSF_Sum = 0  # 角度差值和
            hip_angle = 90  # hip角度
            helper = 'None'
            string2 = 'None'
            comps = 0  # 补偿角
            SSF_Row = 1  # 第1行开始记录
            '''SLL'''
            i = 0
            i1 = 0
            i2 = 0
            i3 = 0
            SLL = 0
            SLL_Angle = 120  # 默认120为开始lift leg
            SLL_Max = 90
            SLL_Time = 0  # 测量间隔
            SLL_Num = 3  # 间隔数
            SLL_Hold = 0  # hold时间
            SLL_Flag = 0  # 计时阀门关闭
            SLL_start = 0  # 开始时间
            SLL_Deviation = 0  # 0.18s角度差值
            SLL_Sum = 0  # 角度差值和
            SLL_Row = 1  # 第1行开始记录
            SLL_string2 = 'None'
            # 实际角度
            V = 1
            Initial = 1  # 赋值完毕
        '''初值'''

        '''模型测量'''
        if str(Pose) != 'None':

            '''Helper'''
            if Pose.shape[0] == 1:
                n = 0  # Only one person
                helper = 'Yes!'
            else:
                if Pose[1][8][2] != 0 and Pose[1][9][2] != 0 and Pose[1][12][2] != 0:
                    if Pose[0][8][2] != 0 and Pose[0][9][2] != 0 and Pose[0][12][2] != 0:
                        if Pose[0][8][1] > Pose[1][8][1] and Pose[0][9][1] > Pose[1][9][1] and Pose[0][12][1] > \
                                Pose[1][12][1]:
                            n = 0  # 0先生是坐着的
                        else:
                            n = 1
                    else:
                        n = 1
                else:
                    n = 0
                helper = 'No'
            '''Helper'''  # n = user

            '''Compensation'''
            # neck_angle = round(Angle(5, 1, 8), 0)
            comps = np.arctan(abs((Pose[n][1][0] - Pose[n][8][0])) / (Pose[n][8][1] - Pose[n][1][1])) * 180 / np.pi
            comps = round(comps, 0)
            if comps < 10:
                string2 = 'Straight!'
                comps = 0
            else:
                if Pose[n][1][0] > Pose[n][8][0]:
                    string2 = 'Left Comps! ' + str(comps)
                else:
                    string2 = 'Right Comps! ' + str(abs(comps))

            if SSF_Max > Angle(1, 2, 3) and j == 1 and j1 == 1 and j2 == 1 and j3 == 1:
                SSF_Max = round(Angle(1, 2, 3), 0)  # 抬手时找角度最小
                # real_angle > Angle(1, 2, 3) - (180 - hip_angle - neck_angle)
            '''Compensation'''

            '''SSF'''
            if Pose[n][4][2] != 0 and Pose[n][2][2] != 0 and Pose[n][1][2] != 0 and Pose[n][3][2] != 0:
                '''Num'''
                SSF_Deviation = abs(SSF_Angle - Angle(1, 2, 3))
                SSF_Angle = Angle(1, 2, 3)
                if Pose[n][4][1] < Pose[n][2][1]:  # hands up
                    j3 = 1
                else:  # hands down
                    j3 = 0
                if j == 0 and j1 == 0 and j2 == 1 and j3 == 1:  # 0011抬手
                    SSF = SSF + 1
                j = j1
                j1 = j2
                j2 = j3
                '''Num'''

                '''Hold time'''
                if j == 1 and j1 == 1 and j2 == 1 and j3 == 1:  # 1111 hands up
                    SSF_Time = SSF_Time + 1  # 单位0.18s
                    if SSF_Time % SSF_Num == 0:  # 开始判定
                        if SSF_Sum / SSF_Num < 6:  # if hold?
                            if SSF_Flag == 0:
                                SSF_start = time()
                                SSF_Flag = 1
                        else:
                            SSF_Flag = 0
                        SSF_Sum = 0
                    else:  # 角度均值
                        SSF_Sum = SSF_Deviation + SSF_Sum
                else:
                    SSF_Flag = 0
                if SSF_Flag == 1 and round(time() - SSF_start, 2) > 1:  # 找到最后的hold值
                    SSF_Hold = round(time() - SSF_start, 2)
                '''Hold time'''

                '''Fill in database'''
                if j == 1 and j1 == 1 and j2 == 0 and j3 == 0:  # 1100放手
                    sql = "INSERT INTO " + week + "(SSF_Times, SSF_Angle, SSF_Hold, SSF_Comps ,SSF_DIY) VALUES (%s, %s, " \
                                                  "%s, %s, %s); "
                    cursor.execute(sql, (SSF_Row, 270 - SSF_Max, SSF_Hold, comps, helper))
                    db.commit()
                    SSF_Row = SSF_Row + 1
                    SSF_Max = 270
                    SSF_Hold = 0
                '''Fill in database'''

            else:
                BGR = cv2.putText(BGR, 'Move left', (5, H - 90), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)
            '''SSF'''
            ###################################################################################################
            '''Sitting Leg Lift'''
            if Pose[n][9][2] != 0 and Pose[n][10][2] != 0 and Pose[n][11][2] != 0:

                '''COMPS'''
                SLL_comps = np.arctan(
                    abs((Pose[n][9][1] - Pose[n][10][1]) / (Pose[n][10][0] - Pose[n][9][0]))) * 180 / np.pi
                SLL_comps = round(SLL_comps, 0)
                if SLL_comps < 15:
                    SLL_string2 = 'Horizontal'
                    SLL_comps = 0
                else:
                    SLL_string2 = 'Comps! ' + str(abs(SLL_comps))
                '''COMPS'''
                '''Num'''
                SLL_Deviation = SLL_Angle - Angle(9, 10, 11)
                SLL_Angle = Angle(9, 10, 11)
                if SLL_Max < SLL_Angle:
                    SLL_Max = round(SLL_Angle, 0)
                if SLL_Angle > 120:  # leg up
                    i3 = 1
                else:  # leg down
                    i3 = 0
                if i == 0 and i1 == 0 and i2 == 1 and i3 == 1:  # 0011抬脚
                    SLL = SLL + 1
                i = i1
                i1 = i2
                i2 = i3
                '''Num'''

                '''Hold time'''
                if i == 1 and i1 == 1 and i2 == 1 and i3 == 1:  # 1111 hands up
                    SLL_Time = SLL_Time + 1  # 单位0.18s
                    if SLL_Time % SLL_Num == 0:  # 开始判定
                        if SLL_Sum / SSF_Num < 6:  # if hold?
                            if SLL_Flag == 0:
                                SLL_start = time()
                                SLL_Flag = 1
                        else:
                            SLL_Flag = 0
                        SLL_Sum = 0
                    else:  # 角度偏差均值
                        SLL_Sum = SLL_Deviation + SLL_Sum
                else:
                    SLL_Flag = 0
                if SLL_Flag == 1 and round(time() - SLL_start, 2) > 1:  # 找到最后的hold值
                    SLL_Hold = round(time() - SLL_start, 2)
                '''Hold time'''

                '''Fill in database'''
                if i == 1 and i1 == 1 and i2 == 0 and i3 == 0:  # 1100放腿
                    sql = "INSERT INTO " + week + "(SLL_Times, SLL_Angle, SLL_Hold, SLL_Comps ,SLL_DIY) VALUES (%s, %s, " \
                                                  "%s, %s, %s); "
                    cursor = db.cursor()
                    cursor.execute(sql, (SLL_Row, SLL_Max, SLL_Hold, SLL_comps, helper))
                    db.commit()
                    SLL_Row = SLL_Row + 1
                    SLL_Max = 90
                    SLL_Hold = 0
                '''Fill in database'''

            else:
                BGR = cv2.putText(BGR, 'Move left', (5, H - 90), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)

            '''SLL'''
        else:
            BGR = cv2.putText(BGR, 'Move into camera', (5, H - 60), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 2)

        '''mirror_feedback'''
        string0 = 'SSF: ' + str(SSF) + ' Hold:' + str(SSF_Hold) + 's'
        BGR = cv2.putText(BGR, string0, (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)
        BGR = cv2.putText(BGR, 'Angle: ' + str(270 - SSF_Max), (5, 70), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)
        BGR = cv2.putText(BGR, string2, (5, 110), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)
        BGR = cv2.putText(BGR, helper, (5, 150), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)
        ############################################################################################
        SLL_string0 = 'SLL: ' + str(SLL) + ' Hold:' + str(SLL_Hold) + 's'
        BGR = cv2.putText(BGR, SLL_string0, (W - 350, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 2)
        BGR = cv2.putText(BGR, 'Angle: ' + str(SLL_Max), (W - 350, 70), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 2)
        BGR = cv2.putText(BGR, SLL_string2, (W - 350, 110), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 2)
        BGR = cv2.putText(BGR, helper, (W - 350, 150), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 2)

        '''控件显示'''
        # mirror feedback
        RGB = Image.fromarray(BGR)
        RGB = ImageTk.PhotoImage(RGB)
        canvas.create_image(0, 0, anchor='nw', image=RGB)  # 刷新图片,nw表示从西北角开始排列图片
        canvas.bg = RGB
        text2.set('Camera Open!')
        # times
        print(SSF, V)
        if int(SSF) < int(V):
            text3.set(str(SSF) + ' / ' + str(V))
        else:
            text3.set('Finish!')
        # progress bar
        if j == 1 and j1 == 1 and j2 == 1 and j3 == 1:  # hands up
            k = 0.4 * sx / 90  # 总长度0.4*sx
            n = k * (180 - SSF_Max)
        else:
            n = 0
        canvas3.coords(fill_line, (0, 0, n, 60))
        window.update()
        cv2.waitKey(1)
コード例 #19
0
def pullUpDetect(length):
    # Process
    cnt = 0

    results = []
    tendency = []
    result = {}
    r_elbow_angle_list = []
    l_elbow_angle_list = []
    eye_distance_list = []
    tick = []
    pullUpCnt = 0

    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    # get video from webcam or video
    start = time.time()
    cap = cv2.VideoCapture(0)
    cap_t = cv2.VideoCapture(1)
    # cap2 = cv2.VideoCapture(r'E:\University\科研创新\雏燕计划-体测\体测姿势素材\push-up\push-up-test-1.mp4')
    coorList = []

    start_time = datetime.now()

    while True:
        # Get images from cam
        ret, imageToProcess = cap.read()
        ret, imageToTest = cap_t.read()
        cv2.imshow('video', imageToProcess)
        cv2.imshow('2', imageToTest)

        cur_time = datetime.now()
        delta_time = (cur_time - start_time).seconds
        print('\r倒计时:' + length - delta_time + '秒', end='')
        if delta_time == length:
            break

        if cnt % 2 == 0:
            datum = op.Datum()
            datum.cvInputData = imageToProcess
            opWrapper.emplaceAndPop([datum])
            if datum.poseKeypoints.size != 1:
                coor = kpp.getKeyPoints(datum.poseKeypoints[0])
                if coor:
                    r_elbow_angle = getValue.getElbowAngle(coor, 'R')
                    l_elbow_angle = getValue.getElbowAngle(coor, 'L')
                    eye_distance = getValue.getEyeWristDistance(coor)

                    if l_elbow_angle:
                        l_elbow_angle_list.append(l_elbow_angle)

                    if eye_distance:
                        eye_distance_list.append(eye_distance)

                    if r_elbow_angle:
                        r_elbow_angle_list.append(r_elbow_angle)
                        tick.append(r_elbow_angle)
                    if len(tick) == 5:
                        tend = analysis.getTendency(tick, 8)  # One tick
                        tick = []
                        if tend:
                            tendency.append(tend)
                            if 3 <= len(tendency):
                                if tendency[-1] == 'down' or tendency[
                                        -1] == 'stable':
                                    if tendency[-2] == 'upper' and tendency[
                                            -3] == 'upper':  # a period
                                        pullUpCnt += 1
                                        result['Num'] = pullUpCnt
                                        standard = analysis.pullUpPeriodJudge(
                                            r_elbow_angle_list,
                                            l_elbow_angle_list,
                                            eye_distance_list)
                                        result['IsRElbowStandard'], result[
                                            'IsLElbowStandard'], result[
                                                'IsHeightStandard'], total = standard
                                        r_elbow_angle_list = l_elbow_angle_list = eye_distance_list = []
                                        results.append(result)
                                        print(result)
                                        result = {}
                                        db.ret(delta_time, total, pullUpCnt,
                                               'pullUp', results, True)

                cv2.imshow("OpenPose 1.5.1 - Tutorial Python API",
                           datum.cvOutputData)

            if cv2.waitKey(1) == ord('q'):
                break
        cnt += 1
    db.ret(None, None, None, None, None, False)
コード例 #20
0
def openpose_demo(canvas):
    # Custom Params (refer to include/openpose/flags.hpp for more parameters)
    params = dict()
    params["model_folder"] = "../../../models/"
    params["hand"] = True

    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    cap = cv2.VideoCapture(1)  #0だったり1だったり
    #cap.set(3,1280)
    #cap.set(4,960)
    cap.set(3, 1920)
    cap.set(4, 1080)

    before_form = []
    current_form = []
    counter = 0
    global rootFlag
    while (True):
        ret, frame = cap.read()
        #cv2.imshow('Raw Frame', frame)
        #frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        #frame = cv2.cvtColor(frame,cv2.COLOR_GRAY2RGB)

        #frame = adjust(frame,alpha=1.0,beta=-25.0)

        # Process Image
        datum = op.Datum()
        imageToProcess = frame
        datum.cvInputData = imageToProcess

        opWrapper.emplaceAndPop([datum])

        cv2.namedWindow("OpenPose 1.5.0 - Tutorial Python API",
                        cv2.WINDOW_KEEPRATIO
                        | cv2.WINDOW_NORMAL)  #ウィンドウサイズを可変に
        cv2.imshow("OpenPose 1.5.0 - Tutorial Python API", datum.cvOutputData)

        #print(datum.handKeypoints[1][0])
        try:
            right_hand, flag = hm.is_hand_recog(datum.handKeypoints[1][0])
        except Exception as e:
            continue
        #カメラの指定によっては「IndexError: too many indices for array」というエラーが出る

        if flag == True:
            current_form = hm.check_handform2(right_hand)
            #print(current_form)
            if current_form == before_form:  #1フレーム前の形と現在の形を比較する
                counter = counter + 1  #同じだったらカウントを1増やす
                if (counter == 5):  #カウントが10になったら(10回連続して同じ形を認識したら)
                    canvas.delete("all")
                    n = hm.list_to_num(current_form)  #手の形から番号を決定

                    try:
                        canvas.create_text(300, 300, text=n, font=('', 350))
                    except Exception as e:
                        break
            else:
                counter = 0  #違ったらカウントをリセットする
                try:
                    canvas.create_text(300, 300, text=" ", font=('', 350))
                except Exception as e:
                    break
            before_form = current_form
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        if rootFlag == False:
            break
    cap.release()
    cv2.destroyAllWindows()
コード例 #21
0
def openpose_demo(canvas):
    upperbody_cascade = cv2.CascadeClassifier(upperbody_cascade_path)

    # Custom Params (refer to include/openpose/flags.hpp for more parameters)
    params = dict()
    params["model_folder"] = "../../../models/"
    params["hand"] = True

    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    cap = cv2.VideoCapture(0)  #0だったり1だったり
    cap.set(3, 1280)
    cap.set(4, 960)

    before_form = []
    current_form = []
    counter = 0
    global rootFlag
    while (True):
        ret, frame = cap.read()

        frame = adjust(frame, alpha=1.0, beta=-25.0)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        bodys = upperbody_cascade.detectMultiScale(gray)
        max_x, max_y, max_h, max_w = max_body_select(bodys)
        frame2 = frame[max_y:max_y + max_h, max_x:max_x + max_w]

        # Process Image
        datum = op.Datum()
        imageToProcess = frame2
        datum.cvInputData = imageToProcess

        opWrapper.emplaceAndPop([datum])

        try:
            if datum.poseKeypoints[0][0][2] < 0.6:
                continue
            #頭部の座標を取得
            head_x = int(datum.poseKeypoints[0][0][0])
            head_y = int(datum.poseKeypoints[0][0][1])

            top = head_y - 240
            bottom = head_y + 240
            left = head_x - 320
            right = head_x + 320
            frame2 = frame[top:bottom, left:right]

            #cv2.namedWindow("OpenPose 1.5.0 - Tutorial Python API", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL) #ウィンドウサイズを可変に
            #cv2.imshow("OpenPose 1.5.0 - Tutorial Python API", datum.cvOutputData)

            del datum

            # Process Image
            datum2 = op.Datum()
            datum2.cvInputData = frame2
            opWrapper.emplaceAndPop([datum2])

            cv2.imshow("zoom image", datum2.cvOutputData)

            try:
                right_hand, flag = hm.is_hand_recog(datum2.handKeypoints[1][0])
            except Exception as e:
                continue
            #カメラの指定によっては「IndexError: too many indices for array」というエラーが出る

            if flag == True:
                current_form = hm.check_handform2(right_hand)
                #print(current_form,counter)
                if current_form == before_form:  #1フレーム前の形と現在の形を比較する
                    counter = counter + 1  #同じだったらカウントを1増やす
                    if (counter == 2):  #カウントが10になったら(10回連続して同じ形を認識したら)
                        canvas.delete("all")
                        n = hm.list_to_num(current_form)  #手の形から番号を決定
                        try:
                            canvas.create_text(300,
                                               300,
                                               text=n,
                                               font=('', 350))
                        except Exception as e:
                            break
                else:
                    counter = 0  #違ったらカウントをリセットする
                before_form = current_form
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            if rootFlag == False:
                break

        except Exception as e:
            continue

    cap.release()
    cv2.destroyAllWindows()
コード例 #22
0
def detect_keypoints(image):
    # Process Image
    datum = op.Datum()
    datum.cvInputData = image
    opWrapper.emplaceAndPop([datum])
    return datum
コード例 #23
0
ファイル: demo_offline.py プロジェクト: julycrow/my-st-gcn
    def pose_estimation(self):  # 调用openpose识别出关节点
        # load openpose python api
        # if self.arg.openpose is not None:
        #     sys.path.append('{}/python'.format(self.arg.openpose))
        #     sys.path.append('{}/build/python'.format(self.arg.openpose))
        #     sys.path.append('{}/build/python/openpose'.format(self.arg.openpose))
        #     sys.path.append('{}/build/example/openpose'.format(self.arg.openpose))
        #     sys.path.append('{}/build/python/openpose/Release'.format(self.arg.openpose))
        #     sys.path.append('{}/build/x64/Release'.format(self.arg.openpose))
        #     sys.path.append('{}/build/bin'.format(self.arg.openpose))
        # try:
        #     #from openpose import pyopenpose as op
        #     import pyopenpose as op
        # except:
        #     print('Can not find Openpose Python API.')
        #     return
        #     dir_path = os.path.dirname(os.path.realpath(__file__))

        dir_path = os.path.dirname(os.path.realpath(__file__))
        try:
            # Windows Import
            # if platform == "win32":
            # Change these variables to point to the correct folder (Release/x64 etc.)
            # sys.path.append('D:/PycharmProject/openpose/build/python/openpose/Release')
            # os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + 'D:/PycharmProject/openpose/build/x64/Release' + dir_path + 'D:/PycharmProject/openpose/build/bin'
            sys.path.append(dir_path +
                            '/../../openpose/build/python/openpose/Release')
            os.environ['PATH'] = os.environ[
                'PATH'] + ';' + dir_path + '/../../openpose/build/x64/Release;' + dir_path + '/../../openpose/build/bin;'
            import pyopenpose as op
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e

        video_name = self.arg.video.split('/')[-1].split('.')[0]

        # initiate
        opWrapper = op.WrapperPython()
        params = dict(model_folder='./models', model_pose='COCO')
        opWrapper.configure(params)
        opWrapper.start()
        self.model.eval()
        video_capture = cv2.VideoCapture(self.arg.video)
        # video_capture = cv2.VideoCapture(0) # 0为打开摄像头
        video_length = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
        pose_tracker = naive_pose_tracker(data_frame=video_length)

        # pose estimation
        start_time = time.time()
        frame_index = 0
        video = list()
        while (True):

            # get image
            ret, orig_image = video_capture.read()
            if orig_image is None:
                break
            source_H, source_W, _ = orig_image.shape
            orig_image = cv2.resize(orig_image,
                                    (256 * source_W // source_H, 256))
            H, W, _ = orig_image.shape
            video.append(orig_image)

            # pose estimation
            datum = op.Datum()
            datum.cvInputData = orig_image
            # opWrapper.emplaceAndPop([datum])
            opWrapper.emplaceAndPop(op.VectorDatum([datum]))
            multi_pose = datum.poseKeypoints  # (num_person, num_joint, 3)

            #if len(multi_pose.shape) != 3:  # 是否三维数组
            if (multi_pose is None):  # 是否有数据,否则为None的时候会报错
                continue

            # normalization
            multi_pose[:, :, 0] = multi_pose[:, :, 0] / W
            multi_pose[:, :, 1] = multi_pose[:, :, 1] / H
            multi_pose[:, :, 0:2] = multi_pose[:, :, 0:2] - 0.5
            multi_pose[:, :, 0][multi_pose[:, :, 2] == 0] = 0
            multi_pose[:, :, 1][multi_pose[:, :, 2] == 0] = 0

            # pose tracking
            pose_tracker.update(multi_pose, frame_index)
            frame_index += 1

            print('Pose estimation ({}/{}).'.format(frame_index, video_length))

        data_numpy = pose_tracker.get_skeleton_sequence()
        return video, data_numpy
コード例 #24
0
def detect_skeleton(file_name):
    try:
        # Import Openpose (Windows/Ubuntu/OSX)
        try:
            # Windows Import
            if platform == "win32":
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('../../openpose/build/python/openpose/Release')
                os.environ['PATH'] = os.environ[
                    'PATH'] + ';' + '../../openpose/build/x64/Release;' + '../../openpose/build/bin'
                import pyopenpose as op
            else:
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('../../openpose/build/python')
                # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
                # sys.path.append('/usr/local/python')
                from openpose import pyopenpose as op
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e

        # Flags
        parser = argparse.ArgumentParser()
        parser.add_argument("--video_path",
                            default=f"../media/{file_name}.mp4",
                            help="Read input video (avi, mp4).")
        parser.add_argument("--no_display",
                            default=False,
                            help="Enable to disable the visual display.")
        args = parser.parse_known_args()

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()
        params["model_folder"] = "../../openpose/models/"
        params["disable_multi_thread"] = "false"
        numberGPUs = 1

        # Add others in path?
        for i in range(0, len(args[1])):
            curr_item = args[1][i]
            if i != len(args[1]) - 1:
                next_item = args[1][i + 1]
            else:
                next_item = "1"
            if "--" in curr_item and "--" in next_item:
                key = curr_item.replace('-', '')
                if key not in params: params[key] = "1"
            elif "--" in curr_item and "--" not in next_item:
                key = curr_item.replace('-', '')
                if key not in params: params[key] = next_item

        # Construct it from system arguments
        # op.init_argv(args[1])
        # oppython = op.OpenposePython()

        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Video
        datum = op.Datum()
        cap = cv2.VideoCapture(args[0].video_path)
        set_frame_size(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                       int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))

        # 관절을 입힌 동영상 생성
        # fourcc = cv2.VideoWriter_fourcc('D', 'I', 'V', 'X')
        # out = cv2.VideoWriter(f'../output/output/video/{file_name}.avi', fourcc, 20.0, get_frame_size())

        frame_data = []
        frame_id = -1

        while cap.isOpened():
            frame_id += 1

            grabbed, frame = cap.read()

            if frame is None or not grabbed:
                print("Finish reading video frames...")
                break

            datum.cvInputData = frame
            opWrapper.emplaceAndPop(op.VectorDatum([datum]))
            # print("Body keypoints: \n" + str(datum.poseKeypoints))

            # 동영상 저장
            #out.write(datum.cvOutputData)

            # cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", datum.cvOutputData)
            # cv2.waitKey(1)

            # save the keypoint as a list
            frame_data.append(make_json(datum, frame_id))

        else:
            print('cannot open the file')

        # show list as json
        with open(f'../output/json/output{file_name}.json',
                  'w',
                  encoding="utf-8") as make_file:
            json.dump(frame_data, make_file, ensure_ascii=False, indent="\t")

        cap.release()
        # out.release()
        # cv2.destroyAllWindows()
    except Exception as e:
        print(e)
        sys.exit(-1)

    return frame_data
コード例 #25
0
def main():
    try:
        '''
        Part to get openpose & pyopenpose from built openpose.
        Be sure to build the openpose system first.
        Feel free to change directories of built python packages.
        '''
        # Import Openpose (Windows/Ubuntu/OSX)
        dir_path = os.path.dirname(os.path.realpath(__file__))
        try:
            # Windows Import
            if platform == "win32":
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append(dir_path + '/../../python/openpose/Release')
                os.environ['PATH'] = os.environ[
                    'PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'
                import pyopenpose as op
                # from openpose import *
            else:
                # Change these variables to point to the correct folder (Release/x64 etc.)
                sys.path.append('../../python')
                # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
                # sys.path.append('/usr/local/python')
                from openpose import pyopenpose as op
                # from openpose import *
        except ImportError as e:
            print(
                'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
            )
            raise e
        '''
        Customizing part for Openpose Python Wrapper.
        For the project, only enable body & hand.
        '''
        params = dict()
        params["model_folder"] = "../../../models/"
        params["hand"] = True
        params["hand_detector"] = 2
        params["body"] = 1
        params["net_resolution"] = '320x192'  #20*11
        # params["face"] = True
        # params["disable_blending"] = True
        # params["fps_max"] = 5

        handRectangles = [[
            op.Rectangle(128, 0, 1024, 1024),
            op.Rectangle(0., 0., 0., 0.)
        ]]
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        # Process Image
        datum = op.Datum()
        datum.handRectangles = handRectangles
        cam = cv2.VideoCapture(0)  # modify here for camera number
        cam.set(3, 1280)
        cam.set(4, 1024)
        pair_poseKeypoints = [[], []]
        input_hands = []
        prev_state = None
        '''
        Loading Model for Recognizing Hand Gestures
        Please refer to model.py for various models trained for the task.
        Current implementaiton is based on cnn_resample_100.pt which shows best demo performance.
        If you would like to use other models, be sure to modify the codes below properly.
        '''
        model = HandGestureNet(n_channels=42, n_classes=5)
        model.load_state_dict(torch.load('cnn_resample_100.pt'))
        # model.load_state_dict(torch.load('CNN_mid12.pt'))
        model.to('cuda')
        model.eval()
        '''
        Message state contains time of state changes.
        Used to prevent burst of messages in short time to the Zoom chat.
        '''
        msg_state = ('not_sent', time.perf_counter())
        '''
        Starting the Project
        Pose & Hand Gesture Estimation
        '''
        while (cv2.waitKey(1) != 27):
            if msg_state[0] == 'sent':
                if time.perf_counter() - msg_state[1] > 2.5:
                    msg_state = ('not_sent', time.perf_counter())

            ret, frame = cam.read()
            datum.cvInputData = frame
            opWrapper.emplaceAndPop([datum])
            frame = datum.cvOutputData
            '''If Person not in Camera'''
            if datum.poseKeypoints.shape == ():
                if msg_state[0] == 'not_sent':
                    # print('WHY NOT WORKING')
                    conn.request(
                        "POST",
                        "/v2/chat/users/[email protected]/messages",
                        payload_absent, headers)
                    #
                    res = conn.getresponse()
                    data = res.read()

                    print(data.decode("utf-8"))
                    msg_state = ('sent', time.perf_counter())
                font = cv2.FONT_HERSHEY_SIMPLEX
                fontScale = 3
                fontColor = (0, 0, 255)
                lineType = 2
                fontThickness = 2
                msg_on_screen = 'ABSENT!'
                textsize = cv2.getTextSize(msg_on_screen, font, fontScale,
                                           fontThickness)[0]
                bottomLeftCornerOfText = ((1280 - textsize[0]) // 2,
                                          (1024 + textsize[1]) // 2)

                cv2.rectangle(frame, (0, 0), (1280, 1024), (0, 0, 255), 20)
                cv2.putText(frame, 'Absent!', bottomLeftCornerOfText, font,
                            fontScale, fontColor, lineType)
                cv2.imshow("Openpose 1.4.0 Webcam", frame)
                continue
            '''If there are more than one people on camera'''
            if len(datum.poseKeypoints) > 1:
                if prev_state is not None and prev_state[0] == 'multi_people':
                    if prev_state[1] > 2:
                        if msg_state[0] == 'not_sent':
                            # print('WHY NOT WORKING')
                            conn.request(
                                "POST",
                                "/v2/chat/users/[email protected]/messages",
                                payload_multi, headers)
                            #
                            res = conn.getresponse()
                            data = res.read()

                            print(data.decode("utf-8"))
                            msg_state = ('sent', time.perf_counter())
                        font = cv2.FONT_HERSHEY_SIMPLEX
                        fontScale = 3
                        fontColor = (0, 0, 255)
                        lineType = 2
                        fontThickness = 2
                        msg_on_screen = 'MUTLIPLE PEOPLE!'
                        textsize = cv2.getTextSize(msg_on_screen, font,
                                                   fontScale, fontThickness)[0]
                        bottomLeftCornerOfText = ((1280 - textsize[0]) // 2,
                                                  (1024 + textsize[1]) // 2)

                        cv2.rectangle(frame, (0, 0), (1280, 1024), (0, 0, 255),
                                      20)
                        cv2.putText(frame, msg_on_screen,
                                    bottomLeftCornerOfText, font, fontScale,
                                    fontColor, lineType)
                        cv2.imshow("Openpose 1.4.0 Webcam", frame)
                        msg_state = ('sent', time.perf_counter())
                        continue
                else:
                    prev_state = ('multi_people', time.perf_counter())
            '''Evaluate Movement & Confidence'''
            del pair_poseKeypoints[0]
            pair_poseKeypoints.append(datum.poseKeypoints[0])
            body_confidence_avg = avg_pose_confidence(datum.poseKeypoints[0])
            # print(body_confidence_avg)
            moved = metric(pair_poseKeypoints)
            '''
            Evaluate Hand Gesture
            Please modify the below if you use other models for hand gestures.
            Refer to model.py
            '''
            # if len(input_hands) == 12:  # normalizev2.pt
            #     del input_hands[0]
            # input_hands.append(datum.handKeypoints[0][0])
            # # print(len(input_hands))
            # prob, gesture = None, None
            # hand_confidence_avg = avg_list_confidence(input_hands)
            # # if len(input_hands) == 12 and avg >= 0.1:
            # if len(input_hands) == 12:
            #     # print('Confidence : ', hand_confidence_avg)
            #     prob, gesture = get_hand_gesture(model, input_hands)
            # # print(prob, gesture)

            norm_hand = normalize(datum.handKeypoints[0][0])
            # print(norm_hand.shape)
            if len(input_hands) == 100:
                for i in range(5):
                    del input_hands[0]
            for i in range(5):
                input_hands.append(norm_hand)
            # print(len(input_hands))
            prob, gesture = None, None
            hand_confidence_avg = avg_list_confidence(input_hands)
            # if len(input_hands) == 12 and avg >= 0.1:
            if len(input_hands) == 100:
                # print('Confidence : ', hand_confidence_avg)
                # print(input_hands[0][:][:2])
                inputs = torch.FloatTensor(input_hands).to('cuda')
                inputs = inputs[:, :, :2]
                # print(inputs.size())
                # prob, gesture = get_hand_gesture_cnn(model, input_hands[:][:][:1])
                prob, gesture = get_hand_gesture_cnn(model, inputs)

            print(prob, gesture)
            '''Output Recognition Results'''
            print_msg = False
            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = None
            fontColor = None
            fontScale = 3
            fontThickness = 2
            msg_on_screen = None
            '''Thumbs DOWN'''
            if valid_hand(hand_confidence_avg, gesture) and gesture == 1:
                print('THUMBS DOWN PROB : ', prob)
                if prob > 0:  # mitigated by using softmax
                    '''Counter'''

                    if prev_state is None:
                        prev_state = ('thumbs_down', time.perf_counter())
                        # print(prev_state)

                    elif prev_state[0] == 'rest':
                        if time.perf_counter() - prev_state[1] > 5.5:
                            prev_state = ('thumbs_down', time.perf_counter())
                            # print(prev_state)

                    elif prev_state[0] != 'thumbs_down':
                        prev_state = ('thumbs_down', time.perf_counter())
                        # print(prev_state)

                    else:
                        # print(time.perf_counter() - prev_state[1])
                        if msg_state[0] == 'not_sent':

                            conn.request(
                                "POST",
                                "/v2/chat/users/[email protected]/messages",
                                payload_thumbsdown, headers)
                            #
                            res = conn.getresponse()
                            data = res.read()

                            print(data.decode("utf-8"))
                            msg_state = ('sent', time.perf_counter())
                        if time.perf_counter() - prev_state[1] > 0.5:
                            print_msg = True
                            # bottomLeftCornerOfText = (450, 500)
                            fontColor = (255, 0, 0)
                            fontScale = 3
                            msg_on_screen = 'THUMBS DOWN'
                            textsize = cv2.getTextSize(msg_on_screen, font,
                                                       fontScale,
                                                       fontThickness)[0]
                            bottomLeftCornerOfText = ((1280 - textsize[0]) //
                                                      2,
                                                      (1024 + textsize[1]) //
                                                      2)
                        # if time.perf_counter() - prev_state[1] > 3.5:
                        #     prev_state = ('rest', time.perf_counter())
                '''Thumbs UP'''
            elif valid_hand(hand_confidence_avg, gesture) and gesture == 2:
                print('THUMBS UP PROB : ', prob)
                '''Counter'''
                if prob > 0:
                    if prev_state is None:
                        prev_state = ('thumbs up', time.perf_counter())
                        # print(prev_state)

                    elif prev_state[0] == 'rest':
                        if time.perf_counter() - prev_state[1] > 5.5:
                            prev_state = ('thumbs_up', time.perf_counter())
                            # print(prev_state)

                    elif prev_state[0] != 'thumbs_up':
                        prev_state = ('thumbs_up', time.perf_counter())
                        # print(prev_state)

                    else:
                        # print(time.perf_counter() - prev_state[1])
                        if msg_state[0] == 'not_sent':

                            conn.request(
                                "POST",
                                "/v2/chat/users/[email protected]/messages",
                                payload_thumbsup, headers)
                            #
                            res = conn.getresponse()
                            data = res.read()

                            print(data.decode("utf-8"))
                            msg_state = ('sent', time.perf_counter())
                        if time.perf_counter() - prev_state[1] > 0.5:

                            print_msg = True
                            # bottomLeftCornerOfText = (450, 500)
                            fontColor = (0, 255, 0)
                            fontScale = 3
                            msg_on_screen = 'THUMBS UP'
                            textsize = cv2.getTextSize(msg_on_screen, font,
                                                       fontScale,
                                                       fontThickness)[0]
                            bottomLeftCornerOfText = ((1280 - textsize[0]) //
                                                      2,
                                                      (1024 + textsize[1]) //
                                                      2)
                        # if time.perf_counter() - prev_state[1] > 3.5:
                        #     prev_state = ('rest', time.perf_counter())
                '''RAISED HAND'''
            elif valid_hand(hand_confidence_avg, gesture) and gesture == 4:
                print('RAISE HAND PROB : ', prob)
                '''Counter'''
                if prev_state is None:
                    prev_state = ('raise_hand', time.perf_counter())
                    # print(prev_state)

                elif prev_state[0] == 'rest':
                    if time.perf_counter() - prev_state[1] > 5.5:
                        prev_state = ('raise_hand', time.perf_counter())
                        # print(prev_state)

                elif prev_state[0] != 'raise_hand':
                    prev_state = ('raise_hand', time.perf_counter())
                    # print(prev_state)

                else:
                    # print(time.perf_counter() - prev_state[1])
                    if msg_state[0] == 'not_sent':

                        conn.request(
                            "POST",
                            "/v2/chat/users/[email protected]/messages",
                            payload_raisehand, headers)
                        #
                        res = conn.getresponse()
                        data = res.read()

                        print(data.decode("utf-8"))
                        msg_state = ('sent', time.perf_counter())
                    if time.perf_counter() - prev_state[1] > 0.5:
                        print_msg = True
                        bottomLeftCornerOfText = (450, 500)
                        fontColor = (0, 255, 255)
                        fontScale = 3
                        msg_on_screen = 'HAND RAISED'
                        textsize = cv2.getTextSize(msg_on_screen, font,
                                                   fontScale, fontThickness)[0]
                        bottomLeftCornerOfText = ((1280 - textsize[0]) // 2,
                                                  (1024 + textsize[1]) // 2)
                    # if time.perf_counter() - prev_state[1] > 3.5:
                    #     prev_state = ('rest', time.perf_counter())
                '''
                Movement Detected
                Please refer to metric.py for detection metrics
                '''
            elif moved:
                '''Counter'''
                if prev_state is None:
                    prev_state = ('detect_move', time.perf_counter())
                    # print(prev_state)

                elif prev_state[0] == 'rest':
                    if time.perf_counter() - prev_state[1] > 1.5:
                        prev_state = ('detect_move', time.perf_counter())
                        # print(prev_state)

                elif prev_state[0] != 'detect_move':
                    prev_state = ('detect_move', time.perf_counter())
                    # print(prev_state)

                else:
                    # print(msg_state)
                    if msg_state[0] == 'not_sent':

                        conn.request(
                            "POST",
                            "/v2/chat/users/[email protected]/messages",
                            payload_movement, headers)
                        #
                        res = conn.getresponse()
                        data = res.read()

                        print(data.decode("utf-8"))
                        msg_state = ('sent', time.perf_counter())

                    print_msg = True
                    bottomLeftCornerOfText = (150, 500)
                    fontColor = (0, 0, 255)
                    fontScale = 3
                    msg_on_screen = 'MOVEMENT DETECTED'
                    textsize = cv2.getTextSize(msg_on_screen, font, fontScale,
                                               fontThickness)[0]
                    # print(textsize)
                    bottomLeftCornerOfText = ((1280 - textsize[0]) // 2,
                                              (1024 + textsize[1]) // 2)
                    # if time.perf_counter() - prev_state[1] > 3.5:
                    #     prev_state = ('rest', time.perf_counter())
            '''Print messages according to states above'''
            if print_msg:
                lineType = 2
                cv2.rectangle(frame, (0, 0), (1280, 1024), fontColor, 40)
                cv2.putText(frame, msg_on_screen, bottomLeftCornerOfText, font,
                            fontScale, fontColor, lineType)
            cv2.imshow("Openpose 1.4.0 Webcam", frame)  # datum.cvOutputData
        # Always clean up
        cam.release()
        cv2.destroyAllWindows()

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        print(exc_type, fname, exc_tb.tb_lineno)
        print(e)
        sys.exit(-1)
コード例 #26
0
ファイル: webcam.py プロジェクト: seanzhan0319/SmartTello
def main():
    params = set_params()

    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    webcam = VideoStream(src=0).start()

    font = cv2.FONT_HERSHEY_SIMPLEX

    while True:
        frame = webcam.read()
        frame = cv2.resize(frame, (400, 300))
        datum = op.Datum()
        datum.cvInputData = frame
        opWrapper.emplaceAndPop([datum])

        # cam = datum.cvOutputData
        cam = frame

        # print(type(datum.poseKeypoints))
        # print(isinstance(datum.poseKeypoints, np.ndarray))
        # print(datum.poseKeypoints.size)

        # if (type(datum.poseKeypoints) == )

        # nose position
        if (isinstance(datum.poseKeypoints, np.ndarray)
                and datum.poseKeypoints.size == 75):
            noseX = datum.poseKeypoints[0][0][0]
            noseY = datum.poseKeypoints[0][0][1]
            cv2.circle(cam, (noseX, noseY), 5, (0, 255, 255))

            neckX = datum.poseKeypoints[0][1][0]
            neckY = datum.poseKeypoints[0][1][1]
            cv2.circle(cam, (neckX, neckY), 5, (0, 255, 255))

            rWristX = datum.poseKeypoints[0][4][0]
            rWristY = datum.poseKeypoints[0][4][1]
            cv2.circle(cam, (rWristX, rWristY), 5, (0, 255, 255))

            right_hand_up(cam, (noseX, noseY), (neckX, neckY),
                          (rWristX, rWristY))

            lWristX = datum.poseKeypoints[0][7][0]
            lWristY = datum.poseKeypoints[0][7][1]
            cv2.circle(cam, (lWristX, lWristY), 5, (0, 255, 255))

            left_hand_up(cam, (noseX, noseY), (neckX, neckY),
                         (lWristX, lWristY))

        # noseX = datum.poseKeypoints[0][0][0]
        # noseY = datum.poseKeypoints[0][0][1]
        # cv2.circle(cam, (noseX, noseY), 5, (0, 255, 255))
        # kPoints = datum.poseKeypoints[0, i, :, :]

        # Display the stream
        # cv2.resize(cam, (256, 144))
        cv2.imshow('Est', cam)

        # keyboard interactions
        key = cv2.waitKey(1)
        if key == ord('q'):
            break

    cv2.destroyAllWindows()
コード例 #27
0
def test_SVM_Classifier(test_images_path, isPCAon=True):
    #
    # try to predict some input images with/without yoga poses
    #

    if test_images_path == "":
        return

    #region init OpenPose
    # Import Openpose (Windows/Ubuntu/OSX)
    dir_path = os.path.dirname(os.path.abspath(__file__))
    try:
        # Windows Import
        if platform == "win32":
            # Change these variables to point to the correct folder (Release/x64 etc.)
            sys.path.append(dir_path + '/dependencies/openpose/libs')
            os.environ['PATH'] = os.environ[
                'PATH'] + ';' + dir_path + '/dependencies/openpose/dlls;'
            import pyopenpose as op
        else:
            # Change these variables to point to the correct folder (Release/x64 etc.)
            sys.path.append('../../python')
            # If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
            # sys.path.append('/usr/local/python')
            from openpose import pyopenpose as op
    except ImportError as e:
        print(
            'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
        )
        raise e

    # Custom Params (refer to include/openpose/flags.hpp for more parameters)
    params = dict()
    params["model_folder"] = "./dependencies/openpose/models/"
    params["net_resolution"] = "320x176"

    # Flags
    parser = argparse.ArgumentParser()
    args = parser.parse_known_args()
    # parse other params passed
    for i in range(0, len(args[1])):
        curr_item = args[1][i]
        if i != len(args[1]) - 1:
            next_item = args[1][i + 1]
        else:
            next_item = "1"
        if "--" in curr_item and "--" in next_item:
            key = curr_item.replace('-', '')
            if key not in params: params[key] = "1"
        elif "--" in curr_item and "--" not in next_item:
            key = curr_item.replace('-', '')
            if key not in params: params[key] = next_item

    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()
    # endregion

    # test images path
    test_images_path = 'yoga-pose-images-for-test'
    # get all images
    images = get_all_images_recursively(test_images_path)

    # hold combined features
    record_details = []
    # hold image indexes of appropriate combined features
    img_idxs_of_records = []
    # hold test images data
    images_data = []
    for idx in range(0, len(images)):
        print("\nCurrent image: " + images[idx] + "\n")

        # get image full name (with file extension)
        _, image_full_name = os.path.split(images[idx])
        # image name without extension
        image_name = image_full_name[0:image_full_name.rindex('.')]

        # processed by OpenPose
        datum = op.Datum()
        imageToProcess = cv2.imread(images[idx])
        img_rgb = cv2.cvtColor(imageToProcess, cv2.COLOR_BGR2RGB)
        # add image data to list
        images_data.append(img_rgb)

        height, width, channels = imageToProcess.shape
        datum.cvInputData = imageToProcess
        opWrapper.emplaceAndPop([datum])

        # print("Body keypoints: \n" + str(datum.poseKeypoints))

        # check if exists pose key points data
        if not (datum.poseKeypoints.size < 2):
            # merging bounding boxes if applicable
            merged_poseKeypoints = bounding_box_merging(
                datum.poseKeypoints,
                ratio_of_intersec_thresh=0.36,
                ratio_of_distance_thresh=2)
            # assign merged_poseKeypoints to datum.poseKeypoints array
            datum.poseKeypoints = merged_poseKeypoints

            # draw bounding box on test image
            drawn_img = draw_bounding_box(datum)
            drawn_img = cv2.cvtColor(drawn_img, cv2.COLOR_BGR2RGB)
            images_data[idx] = drawn_img

            # an array to save mixed features (keypoints and HOG) of an image
            keyPoint_HOG_feats_arrs = []
            if datum.poseKeypoints.size > 1 and datum.poseKeypoints.ndim == 3:
                # region extract keypoints features
                keypoints_coordinates = datum.poseKeypoints[:, :, :2]

                # translate and scale keypoints by its center and box size
                flatted_features = normalize_keypoint_by_its_bounding_box(
                    keypoints_coordinates)
                # image_size_in_str = str(width) + 'x' + str(height)
                for row in flatted_features:
                    keyPoint_HOG_feats_arrs.append(row)
                # endregion

                # region extract HOG features
                extracted_hog_feats_arrs = \
                    extract_ROI_and_HOG_feature(datum,
                                                image_name,
                                                isDisplayImageWithROIs=False,
                                                isExtractRoiHogBitmap=False)

                for i in range(0, len(extracted_hog_feats_arrs)):
                    keyPoint_HOG_feats_arrs[i] = np.append(
                        keyPoint_HOG_feats_arrs[i],
                        extracted_hog_feats_arrs[i])
                # endregion

                # add features data to accumulate array
                for row in keyPoint_HOG_feats_arrs:
                    record_details.append(row)
                    img_idxs_of_records.append(idx)

    if len(record_details) > 0:

        # normalize feature vector
        X = pd.DataFrame(record_details)

        # separate keypoint features and HOG features apart
        X1 = X.iloc[:, :50]
        X2 = X.iloc[:, 50:]

        # apply scaler to each type of feature
        trans = StandardScaler()
        X1 = trans.fit_transform(X1)
        X2 = trans.fit_transform(X2)

        # merge features after applying standard scaler
        X = np.concatenate((X1, X2), axis=1)
        # apply scaling to entire dataset
        X = trans.fit_transform(X)

        pose_preds = []
        if isPCAon:
            # load PCA model
            pca = load('saved_models/SVM-PCA-transform.joblib')

            # apply mapping (transform) to both training and test set
            X = pca.transform(X)

            # load SVM classifier with PCA
            clf = load('saved_models/SVM-model-with-PCA.joblib')

            # predict poses
            pose_preds = clf.predict(X)
        else:
            # load SVM classifier without PCA
            clf = load('saved_models/SVM-model-without-PCA.joblib')

            # predict poses
            pose_preds = clf.predict(X)

        # get encoded classes
        encoded_classes = load('saved_models/encoded-classes.joblib')

        # build predict poses for each image
        # there might be more than one pose in one image
        image_poses_list = [()] * len(images_data)
        for pose_idx in range(0, len(pose_preds)):
            image_poses_list[img_idxs_of_records[pose_idx]] = image_poses_list[
                img_idxs_of_records[pose_idx]] + (pose_preds[pose_idx], )

        # count instances of poses in each image
        # and build label for each predicted image
        # hold labels for each predicted image
        predicted_img_lbls = [None] * len(images_data)
        for img_idx in range(0, len(image_poses_list)):
            c = Counter(image_poses_list[img_idx])
            lbl = ""
            for pose in c.keys():
                lbl = lbl + str(c[pose]) + " " + str(
                    encoded_classes[pose]) + ", "
            if lbl == "": lbl = "No pose detected"
            else: lbl = lbl.rstrip().rstrip(',')

            # assign label to list
            predicted_img_lbls[img_idx] = lbl

        # show grid of predicted images
        nCols = 4
        nRows = 1
        if len(images_data) % nCols == 0:
            nRows = (int)(len(images_data) / nCols)
        else:
            nRows = (int)(len(images_data) / nCols + 1)

        pyplot.rc('figure', figsize=(20, 20))
        fig = pyplot.figure()
        grid = ImageGrid(
            fig,
            111,  # similar to subplot(111)
            nrows_ncols=(nRows, nCols),  # creates mxn grid of axes
            axes_pad=0.3,  # pad between axes in inch.
            share_all=True)
        fontdict = {'fontsize': 10, 'color': 'red'}
        for ax, im, lbl in zip(grid, images_data, predicted_img_lbls):
            # resize image
            im = cv2.resize(im, (400, 320))
            # Iterating over the grid returns the Axes.
            ax.imshow(im)
            ax.set_title(lbl, fontdict=fontdict, pad=2)

        pyplot.show()
コード例 #28
0
    ##        if datum.poseKeypoints[p,i,2] < .70:
    ##            color = (0,255,255)

    ##        if datum.poseKeypoints[p,i,2] < .2:
    ##            color = (0,0,255)

    ##        cv2.circle(newImage, (datum.poseKeypoints[p,i,0],datum.poseKeypoints[p,i,1]),3,color,2)

    ##DrawPointsWithConfidence(newImage,datum.poseKeypoints)
    ##DrawLineOnImage(newImage,[1,-1,0])
    ##cv2.imshow("Confidence Points", newImage)

    testbodypartID = 4
    imagesToLoad = GetImagesToLoad()
    datums = [op.Datum() for i in range(numberOfCameras)]
    imagesToProcess = loadImages(imagesToLoad)
    for i in range(numberOfCameras):
        datums[i].cvInputData = imagesToProcess[i]
        opWrapper.emplaceAndPop([datums[i]])

    imgIDA = 2
    imgIDB = 6
    imgIDC = 4
    imgIDD = 8
    #newImage = imagesToProcess[imgIDA].copy()
    #DrawPointsWithConfidence(newImage,datums[imgIDA].poseKeypoints)
    #cv2.imshow("Confidence Points", newImage)

    cameraIntrinsics = GetCameraIntrinsics(cameraIntrinsicsFileLocation,
                                           numberOfCameras)
コード例 #29
0
            'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
        )
        raise e

    params = dict()
    params['model_folder'] = config.OPENPOSE_MODEL_PATH
    params['hand'] = config.OPENPOSE_DETECT_HAND
    params['face'] = config.OPENPOSE_DETECT_FACE

    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    # Process Image
    datum = op.Datum()
    image = cv2.imread('G:\\images\\a-characters\\qi-sheng-luo-zbz2-0004.jpg')
    height, width, _ = image.shape
    print(f'raw shape: {height} x {width}')
    min_axis = min(height, width)
    height_ = 768 if height == min_axis else int(height * (768. / min_axis))
    width_ = 768 if width == min_axis else int(width * (768. / min_axis))
    datum.cvInputData = image
    opWrapper.emplaceAndPop([datum])

    keypoints = datum.poseKeypoints[0] if len(
        datum.poseKeypoints) > 0 else None
    if keypoints is None:
        sys.exit(0)
    print('key size:', len(keypoints))
    x_list = [int(x[0]) for x in keypoints]
コード例 #30
0
def main(session):
    # Nao
    motion_service = session.service("ALMotion")
    posture_service = session.service("ALRobotPosture")
    motion_service.wakeUp()
    posture_service.goToPosture("Stand", 0.5)
    params = dict()
    params[
        "model_folder"] = "/home/anton/Documents/hci-project/openpose/models"
    # params["model_pose"] = "COCO"
    params["net_resolution"] = "160x80"
    params["body"] = 1
    params["display"] = 1

    # Starting OpenPose
    opWrapper = op.WrapperPython()
    opWrapper.configure(params)
    opWrapper.start()

    # Process Videao
    datum = op.Datum()
    cam = cv2.VideoCapture(0)  # modify here for camera number
    while (cv2.waitKey(1) != 27):
        # Get camera frame
        # cam.set(cv2.CAP_PROP_BUFFERSIZE, 1)
        ret, frame = cam.read()
        datum.cvInputData = frame
        opWrapper.emplaceAndPop([datum])
        frame = datum.cvOutputData
        cv2.imshow("Webcam", frame)  # datum.cvOutputData)
        leftRight, theta, phi = [], [], []
        print(len(str(datum.poseKeypoints)))
        print("\n")

        if len(str(datum.poseKeypoints)) > 1000:
            if (datum.poseKeypoints[0][5][2] > 0.7
                    and datum.poseKeypoints[0][6][2] > 0.7
                    and datum.poseKeypoints[0][7][2] > 0.7):
                theta.append(
                    math.atan2(
                        datum.poseKeypoints[0][6][1] -
                        datum.poseKeypoints[0][5][1],
                        datum.poseKeypoints[0][6][0] -
                        datum.poseKeypoints[0][5][0]))

                leftRight.append("L")
                print("Left - Theta: " + str(math.degrees(theta[-1])) + "\n")

            if datum.poseKeypoints[0][2][2] > 0.7 and datum.poseKeypoints[0][3][2] > 0.7\
              and datum.poseKeypoints[0][4][2] > 0.7:
                theta.append(-math.atan2(
                    datum.poseKeypoints[0][2][1] - datum.poseKeypoints[0][3]
                    [1], datum.poseKeypoints[0][2][0] -
                    datum.poseKeypoints[0][3][0]))

                leftRight.append("R")
                print("Right - Theta: " + str(math.degrees(theta[-1])) + "\n")

            userArmArticular(motion_service, theta, leftRight)
    # Always clean up
    cam.release()
    cv2.destroyAllWindows()