def main(): with sample_video() as video: db = scannerpy.Database() frames = [0, 100, 200] [bboxes] = face_detection.detect_faces(db, videos=[video], frames=[frames]) [clothing] = clothing_detection.detect_clothing(db, videos=[video], frames=[frames], bboxes=[bboxes]) print(next(clothing.load())[0])
def main(): with sample_video() as video: frame_nums = list(range(0, video.num_frames(), 100)) montage_img = video.montage(frame_nums, cols=5) st.imwrite('sample_montage.jpg', montage_img) print('Wrote frame montage to {}'.format( os.path.abspath('sample_montage.jpg')))
def main(): with sample_video() as video: db = scannerpy.Database() [shots] = shot_detection.detect_shots(db, videos=[video]) montage_img = video.montage(shots) imwrite('sample_shots.jpg', montage_img) print('Wrote shot montage to {}'.format(os.path.abspath('sample_shots.jpg')))
def main(): with sample_video() as video: db = scannerpy.Database() frames = list(range(10)) bboxes = face_detection.detect_faces(db, videos=[video], frames=[frames]) embeddings = face_embedding.embed_faces(db, videos=[video], frames=[frames], bboxes=bboxes) print('First embedding: {}'.format(next(embeddings[0].load()))) print('Finished computing embeddings')
def main(): with sample_video() as video: db = scannerpy.Database() frames = list(range(50)) bboxes = face_detection.detect_faces(db, videos=[video], frames=[frames]) genders = gender_detection.detect_genders(db, videos=[video], frames=[frames], bboxes=bboxes) print('First gender: {}'.format(next(genders[0].load()))) print('Finished computing genders')
def main(): with sample_video() as video: db = scannerpy.Database() frames = [0, 100, 200] poses = pose_detection.detect_poses(db, videos=[video], frames=[frames]) vis.draw_poses(db, videos=[video], poses=poses, frames=[frames], path='sample_poses.mp4') print('Wrote video with poses drawn to {}'.format( os.path.abspath('sample_poses.mp4')))
def main(): with sample_video() as video: db = scannerpy.Database() frames = list(range(50)) bboxes = face_detection.detect_faces(db, videos=[video], frames=[frames]) vis.draw_bboxes(db, videos=[video], frames=[frames], bboxes=bboxes, paths=['sample_faces.mp4']) print('Wrote video with objects drawn to {}'.format( os.path.abspath('sample_faces.mp4')))
def main(): with sample_video() as video: db = scannerpy.Database() frames = list(range(0, 20, 3)) print('Running object detector') [bboxes] = object_detection.detect_objects(db, videos=[video], frames=[frames]) print('Running bbox visualizer') vis.draw_bboxes(db, videos=[video], frames=[frames], bboxes=[bboxes], paths=['sample_objects.mp4']) print('Wrote video with objects drawn to {}'.format( os.path.abspath('sample_objects.mp4')))
from scannertools import pose_detection, vis, sample_video import scannerpy import os with sample_video() as video: db = scannerpy.Database() frames = [0, 100, 200] poses = pose_detection.detect_poses(db, videos=[video], frames=[frames]) vis.draw_poses(db, videos=[video], poses=poses, frames=[frames], path='sample_poses.mp4') print('Wrote video with poses drawn to {}'.format(os.path.abspath('sample_poses.mp4')))
from scannertools import sample_video, audio import scannerpy import os import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt with sample_video(delete=False) as video: db = scannerpy.Database() aud = audio.AudioSource(video) out = audio.compute_average_volume(db, audio=[aud]) plt.plot(list(out[0].load())) plt.savefig('volume.png')
def video(): with st.sample_video() as video: yield video