def __init__(self, arguments):
        self.arguments = arguments

        # Frame window dim
        self.winWidth = 640
        self.winHeight = 480

        actionPredictor_params.__init__(self)

        self.fps_time = 0
        #self.step = 15
        self.mode = {
            'Pose Estimation': 'estimation',
            'Tracking': 'tracking',
            'Action Recognition': 'recognition'
        }

        w, h = model_wh(self.arguments.resize)
        if w > 0 and h > 0:
            self.estimator = TfPoseEstimator(get_graph_path(
                self.arguments.model),
                                             target_size=(w, h))
        else:
            self.estimator = TfPoseEstimator(get_graph_path(
                self.arguments.model),
                                             target_size=(432, 368))

        self.cam = cv2.VideoCapture(self.arguments.camera)

        # Tracker based on Sort
        self.sort_max_age = 20
        self.sort_min_hit = 3
        self.tracker = Sort(self.sort_max_age, self.sort_min_hit)
Exemple #2
0
def load_model():
    global poseEstimator
    global sk_cnn_actionPredicter
    poseEstimator = TfPoseEstimator(get_graph_path('mobilenet_thin'),
                                    target_size=(432, 368))
    sk_cnn_actionPredicter = sk_cnn.SkelCNN()
    sk_cnn_actionPredicter.load_weights(
        './action_pre_sk_cnn/models/skel_cnn_model/sk-cnn.hdf5')
def load_model():
    global poseEstimator
    poseEstimator = TfPoseEstimator(get_graph_path('mobilenet_thin'),
                                    target_size=(432, 368))
# -*- coding: utf-8 -*-
"""
Created on Thu Oct  3 20:41:35 2019

@author: ASUS
"""
import cv2
import numpy as np
import settings
from pose.estimator import TfPoseEstimator
from pose.networks import get_graph_path
from imutils.video import VideoStream

poseEstimator = None

poseEstimator = TfPoseEstimator(get_graph_path('mobilenet_thin'), target_size=(432, 368))

cap=cv2.VideoCapture(0)           
   
#cap = VideoStream(src='rtsp://*****:*****@192.168.51.162/PSIA/streaming/channels/102').start()

while True:
    
    ret,frame=cap.read()
    ret=True
    if ret :
        
        show = cv2.resize(frame, (settings.winWidth, settings.winHeight))
        
        humans = poseEstimator.inference(show)
                   
fps_time = 0

if __name__ == '__main__':
    # 类别以及要保存的视频段长度
    action = 'satnd'
    clip_length = 90
    root_path = '/home/dl1/datasets/actions/'
    if not os.path.exists(root_path + action):
        os.mkdir(root_path + action)
    if not os.path.exists(root_path + action + '/txt/'):
        os.mkdir(root_path + action + '/txt/')
        os.mkdir(root_path + action + '/imgs/')
    samples = len(os.listdir(root_path + action + '/txt/'))
    sample_count = 1000 if samples == 0 else 1000 + samples

    e = TfPoseEstimator(get_graph_path('mobilenet_thin'),
                        target_size=(432, 368))
    cam = cv2.VideoCapture(0)
    ret_val, image = cam.read()
    joints = []
    joints_imgs = []
    while True:
        ret_val, image = cam.read()
        if ret_val:
            humans = e.inference(image)
            image, joint, *_, sk = TfPoseEstimator.get_humans(image,
                                                              humans,
                                                              imgcopy=False)
            if joint:
                if len(joints) < clip_length:
                    joints.append(joint[0])