def recognize_from_video(): # net initialize classifier = ailia.Classifier( MODEL_PATH, WEIGHT_PATH, env_id=args.env_id, format=ailia.NETWORK_IMAGE_FORMAT_RGB, range=IMAGE_RANGE, ) capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath is not None: f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) save_h, save_w = webcamera_utils.calc_adjust_fsize( f_h, f_w, IMAGE_HEIGHT, IMAGE_WIDTH) writer = webcamera_utils.get_writer(args.savepath, save_h, save_w) else: writer = None while (True): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break in_frame, frame = webcamera_utils.adjust_frame_size( frame, IMAGE_HEIGHT, IMAGE_WIDTH) frame = preprocess_image(frame) # inference classifier.compute(frame, MAX_CLASS_COUNT) # get result # count = classifier.get_class_count() plot_results(in_frame, classifier, resnet50_labels.imagenet_category) cv2.imshow('frame', in_frame) time.sleep(SLEEP_TIME) # save results if writer is not None: writer.write(in_frame) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')
def recognize_from_video(): # net initialize classifier = ailia.Classifier( MODEL_PATH, WEIGHT_PATH, env_id=args.env_id, format=ailia.NETWORK_IMAGE_FORMAT_RGB, range=ailia.NETWORK_IMAGE_RANGE_S_FP32, ) capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath is not None: f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) writer = webcamera_utils.get_writer(args.savepath, f_h, f_w) else: writer = None while(True): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break _, resized_frame = webcamera_utils.adjust_frame_size( frame, IMAGE_HEIGHT, IMAGE_WIDTH ) input_data = cv2.cvtColor( resized_frame.astype(np.float32), cv2.COLOR_RGB2BGRA ).astype(np.uint8) classifier.compute(input_data, MAX_CLASS_COUNT) # count = classifier.get_class_count() # show results plot_results(frame, classifier, efficientnet_labels.imagenet_category) cv2.imshow('frame', frame) time.sleep(SLEEP_TIME) # save results if writer is not None: writer.write(frame) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')
def recognize_from_video(): # net initialize net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id) capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath is not None: f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) save_h, save_w = webcamera_utils.calc_adjust_fsize( f_h, f_w, IMAGE_HEIGHT, IMAGE_WIDTH ) # save_w * 2: we stack source frame and estimated heatmap writer = webcamera_utils.get_writer(args.savepath, save_h, save_w) else: writer = None while(True): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break input_image, input_data = webcamera_utils.preprocess_frame( frame, IMAGE_HEIGHT, IMAGE_WIDTH, normalize_type='ImageNet' ) # Inference preds_ailia = net.predict(input_data) plot_results( input_image, preds_ailia, partialconv_label.imagenet_category ) cv2.imshow('frame', input_image) time.sleep(SLEEP_TIME) # save results if writer is not None: writer.write(input_image) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')
def recognize_from_video(): net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id) capture = webcamera_utils.get_capture(args.video) while True: ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break input_batch = _preprocess_image(frame) output = net.predict(input_batch) output = _softmax(output) plot_results(frame, output, alexnet_labels.imagenet_category) cv2.imshow('frame', frame) capture.release() logger.info('Script finished successfully.')
def recognize_from_video(): capture = get_capture(args.video) # net initialize net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id) net.set_input_shape((1, 3, args.duration, IMAGE_HEIGHT, IMAGE_WIDTH)) # prepare input data original_queue = deque([]) input_blob = np.empty((1, 3, args.duration, IMAGE_HEIGHT, IMAGE_WIDTH)) for i in range(args.duration - 1): ret, frame = capture.read() if not ret: continue original_queue.append(frame) input_blob[0, :, i, :, :] = convert_input_frame(frame) next_input_index = args.duration - 1 input_frame_size = capture.get(cv2.CAP_PROP_FRAME_COUNT) while (next_input_index <= input_frame_size or input_frame_size == 0): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break original_queue.append(frame) input_blob[0, :, args.duration - 1, :, :] = convert_input_frame(frame) result = net.predict(input_blob) print_mars_result(result) preview_img = original_queue.popleft() plot_results(preview_img, result, HMDB51_LABEL) cv2.imshow('preview', preview_img) for i in range(args.duration - 1): input_blob[0, :, i, :, :] = input_blob[0, :, i + 1, :, :] next_input_index += 1 capture.release() print('Script finished successfully.')
def recognize_from_video(): # net initialize net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id) capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath is not None: f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) writer = webcamera_utils.get_writer(args.savepath, f_h, f_w) else: writer = None while (True): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break _, input_data = webcamera_utils.preprocess_frame( frame, IMAGE_HEIGHT, IMAGE_WIDTH, normalize_type='ImageNet') input_data = input_data.transpose(0, 2, 3, 1) # inference preds_ailia = net.predict(input_data) # postprocessing plot_results(frame, preds_ailia, efficientnetv2_labels.imagenet_category) cv2.imshow('frame', frame) time.sleep(SLEEP_TIME) # save results if writer is not None: writer.write(frame) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')
def recognize_from_video(net): capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath is not None: f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) writer = webcamera_utils.get_writer(args.savepath, f_h, f_w) else: writer = None while (True): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break img = cv2.cvtColor(frame, cv2.COLOR_BGRA2RGB) # inference prob = predict(net, img) # get result plot_results(frame, [prob], imagenet_classes) cv2.imshow('frame', frame) # save results if writer is not None: writer.write(frame) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')