{'name': 'others', 'color': (0, 0, 255)}, {'name': 'startgame', 'color': (0, 255, 255)}, {'name': 'endgame', 'color': (255, 255, 255)}, {'name': 'replay_goal', 'color': (0, 0, 0)}, {'name': 'resume','color':(64,244,226)}, {'name':'SOT','color':(66, 86, 244)}, {'name':'play','color':(244, 155, 65)}, {'name':'replay','color':(193, 52, 156)}], clips_folder, sort_files_list=True, N_show_approx=20, screen_ratio=16/9, image_resize=1, loop_duration=None, annotation_file=os.path.join(annotation_path,'labels.json')) if not os.path.exists(clips_folder): # Split the video into clips os.makedirs(os.path.join(clips_folder)) print('Generating clips from the video...') annotator.video_to_clips(os.path.join(videos_folder,each), clips_folder, clip_length=60, overlap=0, resize=0.5) if annotation_path_for_vid: if loadflag is None: resp = input("the anotations will be overwritten. continue? (y/n)") if resp.strip().lower() == "n": sys.exit() else: #remove the older annotations for the video. if annotation_path_for_vid: os.remove(os.path.join(annotation_path,"labels.json")) # Run the annotator annotator.main()
clips_folder = 'test_overlap_clips' if os.path.exists(clips_folder): shutil.rmtree(clips_folder) os.makedirs(clips_folder) # Test the annotator from annotator import Annotator # Initialise the annotator annotator = Annotator([{ 'name': 'test_label_1', 'color': (0, 1, 0) }, { 'name': 'test_label_2', 'color': (0, 0, 1) }, { 'name': 'test_label_3', 'color': (0, 1, 1) }], clips_folder, loop_duration=2, annotation_file='overlap_annotation.json', status_file='overlap_status.json') # Create the overlapping clips annotator.video_to_clips('dummy_digits.mp4', clips_folder, resize=0.5, overlap=0.5, clip_length=6) # Run! annotator.main()
stream.download(demo_folder, filename='youtube') # Initialise the annotator annotator = Annotator([{ 'name': 'result_table', 'color': (0, 1, 0) }, { 'name': 'olympics_logo', 'color': (0, 0, 1) }, { 'name': 'stretching', 'color': (0, 1, 1) }], clips_folder, sort_files_list=True, N_show_approx=100, screen_ratio=16 / 9, image_resize=1, loop_duration=None, annotation_file='demo_labels.json') # Split the video into clips print('Generating clips from the video...') annotator.video_to_clips(youtube_filename, clips_folder, clip_length=90, overlap=0, resize=0.5) # Run the annotator annotator.main()
# Initialise the annotator annotator = Annotator( # [ # {'name': 'clarity 100', 'color': (0, 255, 0)}, # {'name': 'clarity 80', 'color': (0, 0, 255)}, # {'name': 'clarity 60', 'color': (0, 255, 255)}, # {'name': 'clarity 40', 'color': (255, 100, 0)}, # {'name': 'clarity 20', 'color': (0, 100, 255)}], [ {'name': '1', 'color': (0, 255, 0)}, {'name': '2', 'color': (0, 0, 255)}, {'name': '3', 'color': (0, 255, 255)}, {'name': '4', 'color': (255, 100, 0)}, {'name': '5', 'color': (0, 100, 255)}, {'name': '6', 'color': (0, 100, 50)}, {'name': '7', 'color': (0, 150, 100)}, {'name': '8', 'color': (50, 100, 255)}, {'name': '9', 'color': (100, 50, 50)}, {'name': '10', 'color': (50, 100, 150)}, {'name': '11', 'color': (100, 100, 200)} ], clips_folder, sort_files_list=True, N_show_approx=20, screen_ratio=16 / 9, image_resize=1, loop_duration=None, annotation_file='ourdata_section.json') # Split the video into clips print('Generating clips from the video...') annotator.video_to_clips(ourdata_filename, clips_folder, clip_length=150, overlap=0, resize=1) # Run the annotator annotator.main()
'name': 'sluty', 'color': (0, 0, 1) }, { 'name': 'sexy', 'color': (0, 1, 1) }, { 'name': 'normal', 'color': (0, 1, 0) }] # Initialise MuViLab name = 'H.mp4' fn = 'E:\\download\\593310496_saturdays85\\test\\%s' % name clips_folder = './%s' % name.rsplit('.', 1)[0] # Split the main video into clips annotator = Annotator(labels, clips_folder, annotation_file='%s.json' % name, N_show_approx=10) import os if not os.path.exists(clips_folder): os.mkdir(clips_folder) annotator.video_to_clips(fn, clips_folder, clip_length=1200, overlap=0, resize=0.5) # Run the GUI annotator.main()
def run_tool(video_file, labels): videoFileName = video_file.split("/")[-1].split(".")[0] out_folder = './output/' if not os.path.exists(out_folder): os.mkdir(out_folder) output_folder = f'{out_folder}{videoFileName}/' if not os.path.exists(output_folder): os.mkdir(output_folder) clips_folder = output_folder + 'clips/' json_file = f'{output_folder}{videoFileName}.json' ref_labels = f'{output_folder}{videoFileName}_ref_labels.json' #Copy the reference json file in to the folder copyfile(labels, ref_labels) # Create the folders if not os.path.exists(clips_folder): os.mkdir(clips_folder) print(json_file) colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255), (255, 0, 255), (192, 192, 192), (128, 128, 128), (128, 0, 0), (128, 128, 0), (0, 128, 0), (128, 0, 128), (0, 128, 128), (0, 0, 128)] # Initialise the annotator with open(labels) as f: lines = f.readlines() labels = [l.strip() for l in lines] assert len(labels) > 0, "Labels file doesn't have any labels" if len(labels) <= len(colors): colors_need = colors[:len(labels)] else: t = len(labels) // len(colors) r = len(labels) % len(colors) colors_need = t * colors + colors[:r] assert len(labels) == len(colors_need), "Check the code for error" annotation_labels = [] for idx, label in enumerate(labels): annotation_labels.append({'name': label, 'color': colors_need[idx]}) annotator = Annotator(annotation_labels, clips_folder, sort_files_list=True, N_show_approx=16, screen_ratio=16 / 9, image_resize=1, loop_duration=None, annotation_file=json_file) bClippingRequired = True try: filesArr = os.listdir(clips_folder) if len(filesArr) > 0: bClippingRequired = False except: print("no files") if bClippingRequired: # Split the video into clips print('Generating clips from the video...') annotator.video_to_clips(video_file, clips_folder, clip_length=60, overlap=0, resize=0.5) # Run the annotator annotator.main()
video_file = args.video video_basename = os.path.basename(video_file) video_name, ext = os.path.split(video_basename) out_file = args.out_file if os.path.isdir(video_file): clip_dir = video_file else: clip_dir = 'clips-' + video_basename annotator = Annotator(segment_labels if args.l == 'shot' else semantic_labels, clip_dir, annotation_file=out_file, N_show_approx=100) if not os.path.exists(clip_dir): vc = cv2.VideoCapture(video_file) fps = vc.get(cv2.CAP_PROP_FPS) vc.release() clip_frames = round(fps * args.cl) print('Clips contain {} frames'.format(clip_frames)) os.makedirs(clip_dir) with open(os.path.join(clip_dir, 'frame_count.txt'), 'w') as fp: fp.write(str(clip_frames)) annotator.video_to_clips(video_file, clip_dir, clip_length=clip_frames, overlap=0, resize=0.1) annotator.main()