def main():
    video_capture = WebcamVideoStream(src=0, width=480, height=360).start()
    fps = FPS().start()

    detection_graph = model_load_into_memory()

    thread1 = ServerHandlerPacket("Thread-1-ServerHandlerPacket")
    thread1.daemon = True
    thread1.start()

    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            while True:
                # Camera detection loop
                frame = video_capture.read()
                cv2.imshow('Entrada', frame)
                t = time.time()
                output = detect_objects(frame, sess, detection_graph)
                cv2.imshow('Video', output)
                fps.update()
                print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

            video_capture.stop()
            fps.stop()
            print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
            print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

            cv2.destroyAllWindows()
コード例 #2
0
    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()

    # Define the codec and create VideoWriter object
    fourcc = cv.VideoWriter_fourcc(*args.codec)
    out = cv.VideoWriter(args.save, fourcc, args.fps,
                         (args.width, args.height))

    fps = FPS().start()
    while True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)
        t = time.time()
        output_frame = output_q.get()
        out.write(output_frame)
        cv.imshow('Video', output_frame)
        fps.update()
        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
        if cv.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    pool.terminate()
    video_capture.stop()
    out.release()
    cv.destroyAllWindows()
コード例 #3
0
        # Display the resulting image
        cv2.imshow('Video', canvas)

        # Track FPS
        fps.update()

        # Press the following keys to activate features
        key_press = cv2.waitKey(1) & 0xFF
        if key_press == ord('q'):
            # 'q' to quit
            break
        elif key_press == ord('v'):
            # 'v' to turn video mode on or off
            settings['video'] = not settings['video']
        elif key_press == ord('p'):
            # 'p' to turn pose showing on or off
            settings['showpose'] = not settings['showpose']
        elif key_press == ord('s'):
            # 'p' to turn pose showing on or off
            settings['nosketch'] = not settings['nosketch']

    # Print time performance
    fps.stop()
    logger.info('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    logger.info('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    # Release handle to the webcam
    video_capture.stop()
    cv2.destroyAllWindows()
コード例 #4
0
def main():
    # Load the AdaIN model
    ada_in = AdaINference(args.checkpoint, args.vgg_path, device=args.device)

    # Load a panel to control style settings
    style_window = StyleWindow(args.style_path, args.style_size, args.scale, args.alpha, args.interpolate)

    # Start the webcam stream
    cap = WebcamVideoStream(args.video_source, args.width, args.height).start()

    _, frame = cap.read()

    # Grab a sample frame to calculate frame size
    frame_resize = cv2.resize(frame, None, fx=args.scale, fy=args.scale)
    img_shape = frame_resize.shape

    # Setup video out writer
    if args.video_out is not None:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        if args.concat:
            out_shape = (img_shape[1]+img_shape[0],img_shape[0]) # Make room for the style img
        else:
            out_shape = (img_shape[1],img_shape[0])
        print('Video Out Shape:', out_shape)
        video_writer = cv2.VideoWriter(args.video_out, fourcc, args.fps, out_shape)
    
    fps = FPS().start() # Track FPS processing speed

    keep_colors = args.keep_colors

    count = 0

    while(True):
        ret, frame = cap.read()

        if ret is True:       
            frame_resize = cv2.resize(frame, None, fx=style_window.scale, fy=style_window.scale)

            if args.noise:  # Generate textures from noise instead of images
                frame_resize = np.random.randint(0, 256, frame_resize.shape, np.uint8)
                frame_resize = gaussian_filter(frame_resize, sigma=0.5)

            count += 1
            print("Frame:",count,"Orig shape:",frame.shape,"New shape",frame_resize.shape)

            content_rgb = cv2.cvtColor(frame_resize, cv2.COLOR_BGR2RGB)  # OpenCV uses BGR, we need RGB

            if args.random > 0 and count % args.random == 0:
                style_window.set_style(random=True, style_idx=0)

            if keep_colors:
                style_rgb = preserve_colors_np(style_window.style_rgbs[0], content_rgb)
            else:
                style_rgb = style_window.style_rgbs[0]

            if args.interpolate is False:
                # Run the frame through the style network
                stylized_rgb = ada_in.predict(content_rgb, style_rgb, style_window.alpha)
            else:
                interp_weights = [style_window.interp_weight, 1 - style_window.interp_weight]
                stylized_rgb = ada_in.predict_interpolate(content_rgb, 
                                                          style_window.style_rgbs,
                                                          interp_weights,
                                                          style_window.alpha)

            # Stitch the style + stylized output together, but only if there's one style image
            if args.concat and args.interpolate is False:
                # Resize style img to same height as frame
                style_rgb_resized = cv2.resize(style_rgb, (stylized_rgb.shape[0], stylized_rgb.shape[0]))
                stylized_rgb = np.hstack([style_rgb_resized, stylized_rgb])
            
            stylized_bgr = cv2.cvtColor(stylized_rgb, cv2.COLOR_RGB2BGR)
                
            if args.video_out is not None:
                stylized_bgr = cv2.resize(stylized_bgr, out_shape) # Make sure frame matches video size
                video_writer.write(stylized_bgr)

            cv2.imshow('AdaIN Style', stylized_bgr)

            fps.update()

            key = cv2.waitKey(10) 
            if key & 0xFF == ord('r'):   # Load new random style
                style_window.set_style(random=True, style_idx=0)
                if args.interpolate:     # Load a a second style if interpolating
                    style_window.set_style(random=True, style_idx=1, window='style2')    
            elif key & 0xFF == ord('c'):
                keep_colors = not keep_colors
                print("Switching to keep_colors",keep_colors)
            elif key & 0xFF == ord('q'): # Quit
                break
        else:
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    cap.stop()
    
    if args.video_out is not None:
        video_writer.release()
    
    cv2.destroyAllWindows()
コード例 #5
0
        sort_tracker.update(dets, labels, probs_max, faces, persist_queue)
        for tracker in sort_tracker.trackers:
            bbox = convert_x_to_bbox(tracker.kf.x[:4, :]).astype('int')
            (left, top, right, bottom) = bbox.flatten()
            left = max(0, left)
            top = max(0, top)
            right = min(right, frame.shape[1])
            bottom = min(bottom, frame.shape[0])
            nama = tracker.mode_names()
            probs = round(tracker.mean_probs() * 100, 4)
            text = "{}  {}%".format(nama,
                                    probs) if nama != UNKNOWN else UNKNOWN
            color = COLOR_GREEN if nama != UNKNOWN else COLOR_RED
            cv2.rectangle(frame, (left, top), (right, bottom), color, 4)
            cv2.putText(frame, text, (left - 10, top - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.2, COLOR_WHITE, 4)

        cv2.putText(frame, "{:.1f} FPS".format(fps.fps()), (1100, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.8, COLOR_BLACK, 2)
        current_time = time.ctime()
        cv2.putText(frame, current_time, (100, 50), cv2.FONT_HERSHEY_SIMPLEX,
                    1.0, COLOR_BLACK, 2)
        cv2.namedWindow("Frame", cv2.WINDOW_NORMAL)
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        if 'q' == chr(key):
            break
        # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        # sys.stdout.buffer.write(frame.tobytes())
        fps.update()
コード例 #6
0
def main():
    # Load the WCT model
    wct_model = WCT(checkpoints=args.checkpoints,
                    relu_targets=args.relu_targets,
                    vgg_path=args.vgg_path,
                    device=args.device)

    # Load a panel to control style settings
    style_window = StyleWindow(args.style_path, args.style_size,
                               args.crop_size, args.scale, args.alpha,
                               args.interpolate)

    # Start the webcam stream
    cap = WebcamVideoStream(args.video_source, args.width, args.height).start()

    _, frame = cap.read()

    # Grab a sample frame to calculate frame size
    frame_resize = cv2.resize(frame, None, fx=args.scale, fy=args.scale)
    img_shape = frame_resize.shape

    # Setup video out writer
    if args.video_out is not None:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        if args.concat:
            out_shape = (img_shape[1] + img_shape[0], img_shape[0]
                         )  # Make room for the style img
        else:
            out_shape = (img_shape[1], img_shape[0])
        print('Video Out Shape:', out_shape)
        video_writer = cv2.VideoWriter(args.video_out, fourcc, args.fps,
                                       out_shape)

    fps = FPS().start()  # Track FPS processing speed

    keep_colors = args.keep_colors

    count = 0

    while (True):
        if args.max_frames > 0 and count > args.max_frames:
            break

        ret, frame = cap.read()

        if ret is True:
            frame_resize = cv2.resize(frame,
                                      None,
                                      fx=style_window.scale,
                                      fy=style_window.scale)

            if args.noise:  # Generate textures from noise instead of images
                frame_resize = np.random.randint(0, 256, frame_resize.shape,
                                                 np.uint8)

            count += 1
            print("Frame:", count, "Orig shape:", frame.shape, "New shape",
                  frame_resize.shape)

            content_rgb = cv2.cvtColor(
                frame_resize,
                cv2.COLOR_BGR2RGB)  # OpenCV uses BGR, we need RGB

            if args.random > 0 and count % args.random == 0:
                style_window.set_style(random=True, style_idx=0)

            if keep_colors:
                style_rgb = preserve_colors_np(style_window.style_rgbs[0],
                                               content_rgb)
            else:
                style_rgb = style_window.style_rgbs[0]

            # For best results style img should be comparable size to content
            # style_rgb = resize_to(style_rgb, min(content_rgb.shape[0], content_rgb.shape[1]))

            if args.interpolate is False:
                # Run the frame through the style network
                stylized_rgb = wct_model.predict(content_rgb, style_rgb,
                                                 style_window.alpha)

                if args.passes > 1:
                    for i in range(args.passes - 1):
                        stylized_rgb = wct_model.predict(
                            stylized_rgb, style_rgb, style_window.alpha)
                # stylized_rgb = wct_model.predict_np(content_rgb, style_rgb, style_window.alpha) # Numpy version
            # else: ## TODO Implement interpolation
            #     interp_weights = [style_window.interp_weight, 1 - style_window.interp_weight]
            #     stylized_rgb = wct_model.predict_interpolate(content_rgb,
            #                                               style_window.style_rgbs,
            #                                               interp_weights,
            #                                               style_window.alpha)

            # Stitch the style + stylized output together, but only if there's one style image
            if args.concat and args.interpolate is False:
                # Resize style img to same height as frame
                style_rgb_resized = cv2.resize(
                    style_rgb, (stylized_rgb.shape[0], stylized_rgb.shape[0]))
                stylized_rgb = np.hstack([style_rgb_resized, stylized_rgb])

            stylized_bgr = cv2.cvtColor(stylized_rgb, cv2.COLOR_RGB2BGR)

            if args.video_out is not None:
                stylized_bgr = cv2.resize(
                    stylized_bgr,
                    out_shape)  # Make sure frame matches video size
                video_writer.write(stylized_bgr)

            cv2.imshow('WCT Universal Style Transfer', stylized_bgr)

            fps.update()

            key = cv2.waitKey(10)
            if key & 0xFF == ord('r'):  # Load new random style
                style_window.set_style(random=True, style_idx=0)
                if args.interpolate:  # Load a a second style if interpolating
                    style_window.set_style(random=True,
                                           style_idx=1,
                                           window='style2')
            elif key & 0xFF == ord('c'):
                keep_colors = not keep_colors
                print('Switching to keep_colors', keep_colors)
            elif key & 0xFF == ord('s'):
                out_f = "{}.png".format(time.time())
                save_img(out_f, stylized_rgb)
                print('Saved image to', out_f)
            elif key & 0xFF == ord('q'):  # Quit
                break
        else:
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    cap.stop()

    if args.video_out is not None:
        video_writer.release()

    cv2.destroyAllWindows()
コード例 #7
0
    process = Process(target=worker, args=((input_q, output_q)))
    process.daemon = True
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        cv2.imshow('Video', output_q.get())
        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    video_capture.stop()
    cv2.destroyAllWindows()
コード例 #8
0
			# 画出来
			cv2.rectangle(frame, (startX, startY), (endX, endY),
				(0, 255, 0), 2)
			cv2.putText(frame, l, (startX, startY - 15),
				cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)

	# 也可以把结果保存下来
	if writer is not None:
		writer.write(frame)

	# 显示
	cv2.imshow("Frame", frame)
	key = cv2.waitKey(1) & 0xFF

	# 退出
	if key == 27:
		break

	# 计算FPS
	fps.update()


fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

if writer is not None:
	writer.release()

cv2.destroyAllWindows()
vs.release()