def get(self): from tf_pose.estimator import TfPoseEstimator from tf_pose.networks import get_graph_path from skvideo.io import vread import sldatasets as sld dataset = sld.get("boston").data data = dataset.__next__()[0] data = vread() n, h, w, _c = data.shape e = TfPoseEstimator(get_graph_path('cmu'), target_size=(w, h)) for j in range(n): img = data[j:] humans = e.inference(img, True, 4.0) image = TfPoseEstimator.draw_humans(img, humans, imgcopy=False)
from tf_pose.estimator import TfPoseEstimator from tf_pose.networks import get_graph_path from matplotlib import pyplot as plt import sldatasets as sld dataset = sld.get("lsa64", version="cut", index=4) fps = 30.0 data = dataset.__next__()[0] e = TfPoseEstimator(get_graph_path('cmu'), target_size=(432, 368)) for tup in data: img = tup[0] humans = e.inference(img, True, 4.0) image = TfPoseEstimator.draw_humans(img, humans, imgcopy=False) plt.imshow(image) plt.title(f'Frame {j}') plt.pause(1.0 / fps)
import positions import sldatasets as sld from pathlib import Path import os.path as osp example_path = osp.join(Path.home(), '.sldatasets') # must provide the path of 'lsa64_positions.mat', in this example # ~/.sldatasets/LSA&$_pre # a_path = osp.join(example_path, 'LSA64_pre') # positions.positions_mat_to_npz(osp.join(example_path, 'LSA64_pre')) # # can provide the destiny path of the npz else current dir will be used, in this example # # ~/.sldatasets/LSA64_raw a_path = osp.join(example_path, 'LSA64_raw') positions.get_humans_from_dataset(sld.get('lsa64', version='raw').data, a_path) a_path = osp.join(example_path, 'LSA64_cut') positions.get_humans_from_dataset(sld.get('lsa64', version='cut').data, a_path) a_path = osp.join(example_path, 'ASLLVD_pre') positions.get_humans_from_dataset(sld.get('asllvd').data, a_path)
import sldatasets as sld # in progress video_dataset = sld.get('lsa64', version='raw') print("test loading lsa64 raw npz positions file") #npzfile = np.load(video_dataset.get_positions()) # print(npzfile.files) print("Done")
import sldatasets as sld dataset = sld.get("boston") dataset.summary() video, description = dataset.__next__() print(video[0][0].shape) print(description) dataset = sld.get("lsa64", version="raw") dataset.summary() video, description = dataset.__next__() print(video[0][0].shape) print(description)
import sldatasets as sld dataset = sld.get("asllvd", word='Twenty', consultant='liz') print("dataset summary:") dataset.summary() for video, description in dataset: print('video description:') print(description) frame, frame_annot = video[3] print('frame shape: ', frame.shape) print('estimated body parts (x,y,score): ') # this method pretty print a Dictionary that returns the method frame_annot.get() frame_annot.pretty() face = frame_annot.h.get_face_box(640, 480) print('face box: ', face) print('upper body box: ', frame_annot.h.get_upper_body_box(640, 480)) print('\n testing lsa64...') dataset = sld.get("lsa64", version='cut', word=25, consultant=10, repetition=5) dataset.summary() for video, description in dataset: print(description) frame, frame_annot = video[3] print('frame shape: ', frame.shape) print('estimated body parts (x,y,score): ') frame_annot.pretty()