Exemplo n.º 1
0
args = parser.parse_args()

# 모델 불러오기
estimator = load_pretrain_model('VGG_origin')
action_classifier = load_action_premodel('Action/taekwondo_recognition.h5')

# 매개변수 초기화
realtime_fps = '0.0000'
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0

# video file 읽기 및 쓰기
cap = choose_run_mode(args)
video_writer = set_video_writer(cap, write_fps=int(7.0))

# skeleton data for Training을 txt 파일로 저장
# f = open('data\\txt_data\\taekwondoskill2-3-1.txt', 'a+')

while cv.waitKey(1) < 0:
    has_frame, show = cap.read()
    if has_frame:
        fps_count += 1
        frame_count += 1

        # pose estimation
        humans = estimator.inference(show)
        # get pose info
        pose = TfPoseVisualizer.draw_pose_rgb(
Exemplo n.º 2
0
estimator = load_pretrain_model(
    'mobilenet_thin')  #返回一个类的句柄TfPoseVisualizer 并且建立了计算图
# action_classifier = load_action_premodel('Action/Es_all_demo.h5') #返回动作分类模型 且里面定义了tracker
action_classifier = load_action_premodel(
    'Action/framewise_recognition_bobei.h5')  #返回动作分类模型 且里面定义了tracker

# 参数初始化
realtime_fps = '0.0000'
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0

#读写视频文件(仅测试过webcam输入)
cap = choose_run_mode(args)  #选择摄像头或者是本地文件
video_writer = set_video_writer(cap, write_fps=int(12))  #保存到本地的视频用到的参数初始化
video_1 = cv.VideoWriter('test_out/alpha.mp4', cv.VideoWriter_fourcc(*'mp4v'),
                         int(12), (round(cap.get(cv.CAP_PROP_FRAME_WIDTH)),
                                   round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))))
# # 保存关节数据的txt文件,用于训练过程(for training)
# f = open('origin_data.txt', 'a+') #通过openPose提取keyPoint 然后标记keyPoint进行分类训练 最后通过openpose送到分类

while True and cap.isOpened():  #loop
    has_frame, show = cap.read()  #framewise
    if has_frame:
        fps_count += 1
        frame_count += 1
        #crop image ,then ,the image into network
        #todo 缩小检测范围 显然影响到了检测精度 想要改变显示显然需要从节点生成后去做了
        # pose estimation body_parts PartPair uidx_list
Exemplo n.º 3
0
fps_count = 0
run_timer = 0
frame_count = 0
cam_video = 1  #cam_video=0->camera  cam_video=1->video
file_p = "camera_record/*.mp4"  #要读取的视频所在文件夹的路径
is_save = False
vid_index = 0
#读取视频列表
if (cam_video == 1):
    video_list = glob.glob(file_p)
    video_len = len(video_list)
    print(video_list)
    if (video_len <= 0):
        print('No video in that file')
        sys.exit(1)
cap = choose_run_mode(cam_video, video_list, vid_index)  #读取视频
#video_writer = set_video_writer(cap, write_fps=int(7.0))
video_writer = set_video_writer(cap,
                                write_fps=int(30.0),
                                output_path='after_cam.mp4')
video_writer_origin = set_video_writer(cap,
                                       write_fps=int(30.0),
                                       output_path='origin_cam.mp4')
vid_index += 1

if is_save:
    f = open('origin_data.txt', 'a+')
cur_pose = {}
befo_pose = {}
cur_box = {}
cur_tag = {}
Exemplo n.º 4
0
args = parser.parse_args()  # args 이름으로 파싱 성공시 args.parameter 형태로 주어진 인자 값을 받아서 사용가능

# 관련 모델 가져오기
# tensorflow 추상화 라이브러리
estimator = load_pretrain_model('VGG_origin')  # 훈련 모델 로드(VGG_origin) 분류??

# 인자 초기화
realtime_fps = '0.0000'
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0

# 동영상 파일 읽고 쓰기(웹캠 입력만 테스트)
cap = choose_run_mode(args)  # cap 객체에 choose_run_mode 파싱

while cv.waitKey(1) < 0: #키가 입력될때까지 반복
    data = []
    has_frame, show = cap.read()  # has_frame 과 show에 비디오를 한프레임씩 읽음 성공시 True, 실패시 False
    if has_frame:
        fps_count += 1  # fps 카운트
        frame_count += 1  # frame 카운트

        # pose estimation 추정치 배열
        humans = estimator.inference(show)  # 비디오에서 사람 객체 추정
        # get pose info 사람의 동작을 추정 frame,joints, bboxes, xcenter 반환
        pose = TfPoseVisualizer.draw_pose_rgb(show, humans)  # return frame, joints, bboxes, xcenter

        image = img_to_array(show)  # image (450, 300, 3)
        image = cv.resize(image, dsize=(224, 224),
Exemplo n.º 5
0
action_classifier = load_action_premodel(
    'Action/framewise_recognition.h5')  #action classifier model

# 参数初始化
realtime_fps = '0.0000'
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0
cam_video = 1  #cam_video=0->camera  cam_video=1->video
file_p = "test/*.avi"
is_save = True
# 读写视频文件(仅测试过webcam输入)
#cap = choose_run_mode(args)
cap = choose_run_mode(cam_video, file_p)
video_writer = set_video_writer(cap, write_fps=int(7.0))

# # 保存关节数据的txt文件,用于训练过程(for training)
if is_save:
    f = open('origin_data.txt', 'a+')
while cv.waitKey(1) < 0:
    has_frame, show = cap.read()
    if has_frame:
        fps_count += 1
        frame_count += 1
        # pose estimation
        humans = estimator.inference(show)
        # get pose info
        pose = TfPoseVisualizer.draw_pose_rgb(
            show, humans)  # return frame, joints, bboxes, xcenter
estimator = load_pretrain_model('VGG_origin')
action_classifier = load_action_premodel('Action/own_stand_wave_08.h5')

# 参数初始化
realtime_fps = '0.0000'
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0

#获取被控相机
cap_Receptor = EasyPySpin.VideoCapture(0)

# 获取主控相机
cap_main = choose_run_mode(args)

# 读写视频文件
video_writer = set_video_writer(cap_main, write_fps=int(7.0))
# print("DEBUG:stage 1")

# # 保存多组关节数据的txt文件,用于训练过程(for training),重命名为wave_*.txt/stand_*.txt
# f = open('test_out/origin_data.txt', 'a+')

while cv.waitKey(1) < 0:
    #print("DEBUG:stage 2")
    has_frame, show = cap_main.read()

    #print("DEBUG:stage 3")
    if has_frame:
        fps_count += 1