Example #1
0
# -*- coding: UTF-8 -*-
import cv2 as cv
import argparse
import numpy as np
import time
from utils import choose_run_mode, load_pretrain_model, set_video_writer
from Pose.pose_visualizer import TfPoseVisualizer
from Action.recognizer import load_action_premodel, framewise_recognize

parser = argparse.ArgumentParser(description='Action Recognition')
parser.add_argument('--video', help='Path to video file.')
args = parser.parse_args()


estimator = load_pretrain_model('VGG_origin')
action_classifier = load_action_premodel('Action/framewise_recognition_under_scene.h5')


realtime_fps = '0.0000'
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0


cap = choose_run_mode(args)
video_writer = set_video_writer(cap, write_fps=int(7.0))

# f = open('origin_data.txt', 'a+')
Example #2
0
# -*- coding: UTF-8 -*-
import cv2 as cv
import argparse
import numpy as np
import time
from utils import choose_run_mode, load_pretrain_model, set_video_writer
from Pose.pose_visualizer import TfPoseVisualizer
from Action.recognizer import load_action_premodel, framewise_recognize

parser = argparse.ArgumentParser(description='Action Recognition by OpenPose')
parser.add_argument('--video', help='Path to video file.')
args = parser.parse_args()

# 导入相关模型
estimator = load_pretrain_model('VGG_origin')
action_classifier = load_action_premodel('Action/framewise_recognition.h5')

# 参数初始化
realtime_fps = '0.0000'
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0

# 读写视频文件(仅测试过webcam输入)
cap = choose_run_mode(args)
video_writer = set_video_writer(cap, write_fps=int(30.0))

# # 保存关节数据的txt文件,用于训练过程(for training)
# f = open('origin_data.txt', 'a+')
import cv2 as cv
import argparse
import numpy as np
import time
from utils import choose_run_mode, load_pretrain_model, set_video_writer
from Pose.pose_visualizer import TfPoseVisualizer
from Action.recognizer import load_action_premodel, framewise_recognize
parser = argparse.ArgumentParser(description='Action Recognition by OpenPose')
parser.add_argument('--video', default='Escalator/light_1.5_left_30.mp4',help='Path to video file.')
args = parser.parse_args()

# 导入相关模型 建立图
# estimator = load_pretrain_model('VGG_origin') #返回一个估计的模型
estimator = load_pretrain_model('mobilenet_thin')  #返回一个类的句柄TfPoseVisualizer 并且建立了计算图
# action_classifier = load_action_premodel('Action/Es_all_demo.h5') #返回动作分类模型 且里面定义了tracker
action_classifier = load_action_premodel('Action/framewise_recognition_bobei.h5') #返回动作分类模型 且里面定义了tracker

# 参数初始化
realtime_fps = '0.0000'
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0

#读写视频文件(仅测试过webcam输入)
cap = choose_run_mode(args) #选择摄像头或者是本地文件
video_writer = set_video_writer(cap, write_fps=int(12)) #保存到本地的视频用到的参数初始化
video_1 = cv.VideoWriter('test_out/ex1.mp4',
                          cv.VideoWriter_fourcc(*'mp4v'),
                          int(12),
import sys
from utils import choose_run_mode, load_pretrain_model, set_video_writer
from Pose.pose_visualizer import TfPoseVisualizer
from Action.recognizer import load_action_premodel, framewise_recognize

parser = argparse.ArgumentParser(
    description='Gesture control camera based on OpenPose')
parser.add_argument('--video', help='Path to video file.')
args = parser.parse_args()

# 导入相关模型
#尝试mobile_thin
#estimator = load_pretrain_model('mobilenet_thin')
#estimator = load_pretrain_model('mobilenet_small')
estimator = load_pretrain_model('VGG_origin')
action_classifier = load_action_premodel('Action/own_stand_wave_08.h5')

# 参数初始化
realtime_fps = '0.0000'
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0

#获取被控相机
cap_Receptor = EasyPySpin.VideoCapture(0)

# 获取主控相机
cap_main = choose_run_mode(args)
Example #5
0
import argparse
import numpy as np
import time

from utils import choose_run_mode, load_pretrain_model, set_video_writer
from Pose.pose_visualizer import TfPoseVisualizer
from Action.recognizer import load_action_premodel, framewise_recognize

parser = argparse.ArgumentParser(description='Action Recognition by OpenPose')
parser.add_argument('--video', help='Path to video file.')
args = parser.parse_args()

# imported related models
# estimator = load_pretrain_model('VGG_origin')
estimator = load_pretrain_model('mobilenet_thin')
action_classifier = load_action_premodel('Action/training/amazon_recognition.h5')

# parameter initialization
realtime_fps = 0.0000
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0
# Read and write video files (tested only for webcam input)
cap = choose_run_mode(args)
video_writer = set_video_writer(cap, write_fps=int(15.0))


# # A txt file that stores joint data for the training process (for training)
#f = open('origin_data.txt', 'a+')