def recognize_from_video(): # net initialize net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id) capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath is not None: f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) writer = webcamera_utils.get_writer(args.savepath, f_h, f_w) else: writer = None while (True): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break _, input_data = webcamera_utils.preprocess_frame( frame, IMAGE_HEIGHT, IMAGE_WIDTH, normalize_type='ImageNet') input_data = input_data.transpose(0, 2, 3, 1) # inference preds_ailia = net.predict(input_data) # postprocessing plot_results(frame, preds_ailia, efficientnetv2_labels.imagenet_category) cv2.imshow('frame', frame) time.sleep(SLEEP_TIME) # save results if writer is not None: writer.write(frame) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')
def recognize_from_video(): # net initialize net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id) capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath != SAVE_IMAGE_PATH: print( '[WARNING] currently, video results cannot be output correctly...') writer = webcamera_utils.get_writer(args.savepath, IMAGE_HEIGHT, IMAGE_WIDTH) else: writer = None while (True): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break src_img, input_data = webcamera_utils.preprocess_frame( frame, IMAGE_HEIGHT, IMAGE_WIDTH, normalize_type='ImageNet') src_img = cv2.resize(src_img, (IMAGE_WIDTH, IMAGE_HEIGHT)) src_img = cv2.cvtColor(src_img, cv2.COLOR_BGR2RGB) preds_ailia = net.predict(input_data) res_img = postprocess(src_img, preds_ailia) cv2.imshow('frame', res_img / 255.0) # # save results # if writer is not None: # writer.write(res_img) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() print('Script finished successfully.')
def recognize_from_video(rpn, box, tracker, feat_ext): video_file = args.video if args.video else args.input[0] capture = get_capture(video_file) assert capture.isOpened(), 'Cannot capture source' # create video writer if savepath is specified as video format f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) if args.savepath != None: logger.warning( 'currently, video results cannot be output correctly...' ) writer = get_writer(args.savepath, f_h, f_w) else: writer = None while True: ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break # inference img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) boxes = predict(rpn, box, tracker, feat_ext, img) res_img = frame_vis_generator(frame, boxes) # show cv2.imshow('frame', res_img) # save results if writer is not None: writer.write(res_img.astype(np.uint8)) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')
def recognize_from_video(detector): capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath != SAVE_IMAGE_PATH: f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) writer = webcamera_utils.get_writer(args.savepath, f_h, f_w) else: writer = None while (True): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break img = letterbox_convert(frame, (IMAGE_HEIGHT, IMAGE_WIDTH)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = np.transpose(img, [2, 0, 1]) img = img.astype(np.float32) / 255 img = np.expand_dims(img, 0) output = detector.predict([img]) detect_object = post_processing(img, args.threshold, args.iou, output) detect_object = reverse_letterbox(detect_object[0], frame, (IMAGE_HEIGHT, IMAGE_WIDTH)) res_img = plot_results(detect_object, frame, COCO_CATEGORY) cv2.imshow('frame', res_img) # save results if writer is not None: writer.write(res_img) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() print('Script finished successfully.')
def recognize_from_video(video, detector, params): capture = webcamera_utils.get_capture(video) # create video writer if savepath is specified as video format if args.savepath != SAVE_IMAGE_PATH: f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) writer = webcamera_utils.get_writer(args.savepath, f_h, f_w) else: writer = None category = params['category'] palette = get_palette(len(category)) while True: ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break pixel_labels = detect_objects(frame, detector, params['img_size']) # draw segmentation area mask = pixel_labels != 0 im = Image.fromarray(np.asarray(pixel_labels, dtype=np.uint8)) im.putpalette(palette) fill = np.asarray(im.convert("RGB")) fill = cv2.cvtColor(fill, cv2.COLOR_RGB2BGR) frame[mask] = frame[mask] * 0.6 + fill[mask] * 0.4 # show cv2.imshow('frame', frame) # save results if writer is not None: writer.write(frame) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release()
def recognize_from_video(net, pretrained): capture = webcamera_utils.get_capture(args.video) fig = plt.figure(figsize=(4, 2)) # create video writer if savepath is specified as video format if args.savepath != SAVE_IMAGE_PATH: fig.canvas.draw() im = np.array(fig.canvas.renderer.buffer_rgba()) f_h, f_w = im.shape[:2] writer = webcamera_utils.get_writer(args.savepath, f_h, f_w) else: writer = None while (True): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break # inference out = predict(net, frame, pretrained) plt.axis("off") plt.imshow(np.log10(out), cmap='plasma_r') fig.canvas.draw() im = np.array(fig.canvas.renderer.buffer_rgba()) im = cv2.cvtColor(im, cv2.COLOR_RGBA2BGR) cv2.imshow('frame', im) # save results if writer is not None: writer.write(im) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')
def recognize_from_video(): net = InvNet() cap = webcamera_utils.get_capture(args.video) if not cap.isOpened(): exit() if args.savepath != SAVE_IMAGE_PATH: f_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) writer = webcamera_utils.get_writer( args.savepath, f_w, f_h ) else: writer = None while(True): ret, frame = cap.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break _, resized_image = webcamera_utils.adjust_frame_size(frame, 256, 256) noised_frame = add_noise(resized_image) denoised_frame = net.predict(noised_frame) # half and half noised_frame[:,128:256,:] = denoised_frame[:,128:256,:] cv2.imshow('frame', noised_frame) # save results if writer is not None: writer.write(noised_frame) cap.release() raw_video.release() noised_video.release() denoised_video.release() cv2.destroyAllWindows()
def recognize_from_video(): # net initialize net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id) net.set_input_shape((1, 1, IMAGE_HEIGHT, IMAGE_WIDTH)) capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath != SAVE_IMAGE_PATH: f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) writer = webcamera_utils.get_writer(args.savepath, f_h, f_w) else: writer = None while (True): ret, img = capture.read() # press q to end video capture if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) (img_lab_orig, img_lab_rs) = preprocess(img) out = net.predict({'input.1': img_lab_rs})[0] out_img = post_process(out, img_lab_orig) out_img = np.array(out_img * 255, dtype=np.uint8) out_img = cv2.cvtColor(out_img, cv2.COLOR_RGB2BGR) cv2.imshow('frame', out_img) # save results if writer is not None: writer.write(out_img) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')
def recognize_from_video(net): video_file = args.video if args.video else args.input[0] capture = get_capture(video_file) assert capture.isOpened(), 'Cannot capture source' # create video writer if savepath is specified as video format f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) if args.savepath != SAVE_IMAGE_PATH: logger.warning( 'currently, video results cannot be output correctly...' ) writer = get_writer(args.savepath, f_h, f_w) else: writer = None while True: ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break # inference out_img = predict(net, frame) # show cv2.imshow('frame', out_img) # save results if writer is not None: writer.write(out_img) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')
def recognize_from_video(): # net initialize detector = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id) capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath != SAVE_IMAGE_PATH: f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) writer = webcamera_utils.get_writer(args.savepath, f_h, f_w) else: writer = None while (True): ret, to_show = capture.read() # press q to end video capture if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break img, scale = hand_detection_pytorch_utils.pre_process(to_show) detector.set_input_shape((1, 3, img.shape[2], img.shape[3])) out = detector.predict({'input.1': img}) dets = hand_detection_pytorch_utils.post_process( out, img, scale, THRESHOLD, IOU) for i in range(dets.shape[0]): cv2.rectangle(to_show, (dets[i][0], dets[i][1]), (dets[i][2], dets[i][3]), [0, 0, 255], 3) cv2.imshow('frame', to_show) # save results if writer is not None: writer.write(to_show) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')
def recognize_from_video(): # net initialize net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id) capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath != SAVE_IMAGE_PATH: print( '[WARNING] currently, video results cannot be output correctly...' ) f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) writer = webcamera_utils.get_writer(args.savepath, f_h, f_w) else: writer = None while(True): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break h, w = frame.shape[0], frame.shape[1] frame = frame[h//2:h//2+h//4, w//2:w//2+w//4, :] output_img = tiling(net, frame) cv2.imshow('frame', output_img) # # save results # if writer is not None: # writer.write(output_img) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() print('Script finished successfully.')
def recognize_from_video(net): video_file = args.video if args.video else args.input[0] capture = get_capture(video_file) assert capture.isOpened(), 'Cannot capture source' # create video writer if savepath is specified as video format f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) if args.savepath != SAVE_IMAGE_PATH: writer = get_writer(args.savepath, f_h, f_w) else: writer = None while True: ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break # inference pred = predict(net, frame) # plot result res_img = visualize(frame, pred, weight=0.6) # show cv2.imshow('frame', res_img) # save results if writer is not None: writer.write(res_img.astype(np.uint8)) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')
def recognize_from_video(video, det_model, rec_model, ga_model): capture = webcamera_utils.get_capture(video) # create video writer if savepath is specified as video format if args.savepath != SAVE_IMAGE_PATH: f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) writer = webcamera_utils.get_writer(args.savepath, f_h, f_w) else: writer = None # load identities ident_names, ident_feats = load_identities(rec_model) while (True): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break faces = predict(frame, det_model, rec_model, ga_model) faces = face_identification(faces, ident_feats) # plot result res_img = draw_detection(frame, faces, ident_names) # show cv2.imshow('frame', res_img) # save results if writer is not None: writer.write(res_img) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() print('Script finished successfully.')
def recognize_from_video(net): threshold = args.threshold capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath != SAVE_IMAGE_PATH: f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) writer = webcamera_utils.get_writer(args.savepath, f_h, f_w) else: writer = None palette = get_palette(100) while (True): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break frame, resized_img = webcamera_utils.adjust_frame_size(frame, IMAGE_SIZE, IMAGE_SIZE) # inference detections = predict(net, frame) frame = draw_detections(frame, detections, palette, threshold) cv2.imshow('frame', frame) # save results if writer is not None: writer.write(frame) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')
def recognize_from_video(): # net initialize net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id) capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath != SAVE_IMAGE_PATH: f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) writer = webcamera_utils.get_writer(args.savepath, f_h, f_w) else: writer = None while (True): ret, image = capture.read() # press q to end video capture if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break x, ratio_w, ratio_h = craft_pytorch_utils.pre_process(image) net.set_input_shape((1, 3, x.shape[2], x.shape[3])) y, _ = net.predict({'input.1': x}) img = craft_pytorch_utils.post_process(y, image, ratio_w, ratio_h) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) cv2.imshow('frame', img) # save results if writer is not None: writer.write(img) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')
def recognize_from_video(): # net initialize net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id) capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath is not None: f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) writer = webcamera_utils.get_writer(args.savepath, f_h, f_w) else: writer = None logger.warning('Inference using CPU because model accuracy is low on GPU.') while (True): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break img_input = np.array(frame) # inference preds_img = gradio_wrapper_for_LSD(img_input, net) cv2.imshow('frame', preds_img) # save results if writer is not None: writer.write(preds_img) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')
def recognize_from_video(net): capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath is not None: f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) writer = webcamera_utils.get_writer(args.savepath, f_h, f_w) else: writer = None while (True): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break img = cv2.cvtColor(frame, cv2.COLOR_BGRA2RGB) # inference prob = predict(net, img) # get result plot_results(frame, [prob], imagenet_classes) cv2.imshow('frame', frame) # save results if writer is not None: writer.write(frame) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')
def estimate_from_video(): # net initialize enc_net = ailia.Net(ENC_MODEL_PATH, ENC_WEIGHT_PATH, env_id=args.env_id) dec_net = ailia.Net(DEC_MODEL_PATH, DEC_WEIGHT_PATH, env_id=args.env_id) capture = webcamera_utils.get_capture(args.video) # create video writer if savepath is specified as video format if args.savepath != SAVE_IMAGE_PATH: logger.warning('currently video results output feature ' 'is not supported in this model!') f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) save_h, save_w = webcamera_utils.calc_adjust_fsize( f_h, f_w, IMAGE_HEIGHT, IMAGE_WIDTH) # save_w * 2: we stack source frame and estimated heatmap writer = webcamera_utils.get_writer(args.savepath, save_h, save_w * 2) else: writer = None ret, frame = capture.read() org_height, org_width, _ = frame.shape while (True): ret, frame = capture.read() if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret: break _, input_data = webcamera_utils.preprocess_frame( frame, IMAGE_HEIGHT, IMAGE_WIDTH) # encoder enc_input_blobs = enc_net.get_input_blob_list() enc_net.set_input_blob_data(input_data, enc_input_blobs[0]) enc_net.update() features = enc_net.get_results() # decoder dec_inputs_blobs = dec_net.get_input_blob_list() for f_idx in range(len(features)): dec_net.set_input_blob_data(features[f_idx], dec_inputs_blobs[f_idx]) dec_net.update() preds_ailia = dec_net.get_results() # postprocessing disp = preds_ailia[-1] disp_resized, vmax = result_plot(disp, org_width, org_height) plt.imshow(disp_resized, cmap='magma', vmax=vmax) plt.pause(.01) if not plt.get_fignums(): break # save results # FIXME: How to save plt --> cv2.VideoWriter() # if writer is not None: # # put pixel buffer in numpy array # canvas = FigureCanvas(fig) # canvas.draw() # mat = np.array(canvas.renderer._renderer) # res_img = cv2.cvtColor(mat, cv2.COLOR_RGB2BGR) # writer.write(res_img) capture.release() cv2.destroyAllWindows() if writer is not None: writer.release() logger.info('Script finished successfully.')