if __name__ == '__main__': video_path='path2your-video' yolo = YOLO() vid = cv2.VideoCapture(video_path) if not vid.isOpened(): raise IOError("Couldn't open webcam or video") accum_time = 0 curr_fps = 0 fps = "FPS: ??" prev_time = timer() while True: return_value, frame = vid.read() image = Image.fromarray(frame) image = yolo.detect_image(image) result = np.asarray(image) curr_time = timer() exec_time = curr_time - prev_time prev_time = curr_time accum_time = accum_time + exec_time curr_fps = curr_fps + 1 if accum_time > 1: accum_time = accum_time - 1 fps = "FPS: " + str(curr_fps) curr_fps = 0 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) cv2.namedWindow("result_evaluate", cv2.WINDOW_NORMAL) cv2.imshow("result_evaluate", result) if cv2.waitKey(1) & 0xFF == ord('q'):
hue_value = (i * 70 % 180, 255, 255) color_value = (255, 255, 0) cv2.rectangle(result, (trace_box.left, trace_box.top), (trace_box.right, trace_box.bottom), color_value, 2) # result = cv2.cvtColor(result, cv2.COLOR_HSV2RGB) cv2.imshow('result', result) # if args.save_video: # out.write(result) trace_boxes_database.print_boxes(args.input, args.output) cv2.waitKey(0) break image = Image.fromarray(frame) # Run detection image, return_boxes = yolo.detect_image(image) result = np.asarray(image) trace_boxes_database.update(return_boxes) # result = cv2.cvtColor(result, cv2.COLOR_RGB2HSV) # 色々描画 for j, trace_box in enumerate(trace_boxes_database.trace_boxes): if j == 0: continue dir_name = f'{os.path.basename(args.input)}_{j}' os.makedirs(dir_name, exist_ok=True) number = len(os.listdir(dir_name))