def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        output_q.put(detect_objects(frame, sess, detection_graph))

    fps.stop()
    sess.close()
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(path_to_model, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        output_q.put(detect_objects(frame, sess, detection_graph))

    fps.stop()
    sess.close()
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(graph=detection_graph, config=config)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        output_q.put(detect_objects(frame, sess, detection_graph))

    fps.stop()
    sess.close()
Exemplo n.º 4
0
def main():
    video_capture = WebcamVideoStream(src=0, width=480, height=360).start()
    fps = FPS().start()

    detection_graph = model_load_into_memory()
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            while True:  # Camera detection loop
                frame = video_capture.read()
                cv2.imshow('Entrada', frame)
                t = time.time()
                output = detect_objects(frame, sess, detection_graph)
                cv2.imshow('Video', output)
                fps.update()
                print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

            video_capture.stop()
            fps.stop()
            print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
            print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
            cv2.destroyAllWindows()
Exemplo n.º 5
0
    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()

    # Define the codec and create VideoWriter object
    fourcc = cv.VideoWriter_fourcc(*args.codec)
    out = cv.VideoWriter(args.save, fourcc, args.fps,
                         (args.width, args.height))

    fps = FPS().start()
    while True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)
        t = time.time()
        output_frame = output_q.get()
        out.write(output_frame)
        cv.imshow('Video', output_frame)
        fps.update()
        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
        if cv.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    pool.terminate()
    video_capture.stop()
    out.release()
    cv.destroyAllWindows()
Exemplo n.º 6
0
def main():
    # Load the AdaIN model
    ada_in = AdaINference(args.checkpoint, args.vgg_path, device=args.device)

    # Load a panel to control style settings
    style_window = StyleWindow(args.style_path, args.style_size, args.scale, args.alpha, args.interpolate)

    # Start the webcam stream
    cap = WebcamVideoStream(args.video_source, args.width, args.height).start()

    _, frame = cap.read()

    # Grab a sample frame to calculate frame size
    frame_resize = cv2.resize(frame, None, fx=args.scale, fy=args.scale)
    img_shape = frame_resize.shape

    # Setup video out writer
    if args.video_out is not None:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        if args.concat:
            out_shape = (img_shape[1]+img_shape[0],img_shape[0]) # Make room for the style img
        else:
            out_shape = (img_shape[1],img_shape[0])
        print('Video Out Shape:', out_shape)
        video_writer = cv2.VideoWriter(args.video_out, fourcc, args.fps, out_shape)
    
    fps = FPS().start() # Track FPS processing speed

    keep_colors = args.keep_colors

    count = 0

    while(True):
        ret, frame = cap.read()

        if ret is True:       
            frame_resize = cv2.resize(frame, None, fx=style_window.scale, fy=style_window.scale)

            if args.noise:  # Generate textures from noise instead of images
                frame_resize = np.random.randint(0, 256, frame_resize.shape, np.uint8)
                frame_resize = gaussian_filter(frame_resize, sigma=0.5)

            count += 1
            print("Frame:",count,"Orig shape:",frame.shape,"New shape",frame_resize.shape)

            content_rgb = cv2.cvtColor(frame_resize, cv2.COLOR_BGR2RGB)  # OpenCV uses BGR, we need RGB

            if args.random > 0 and count % args.random == 0:
                style_window.set_style(random=True, style_idx=0)

            if keep_colors:
                style_rgb = preserve_colors_np(style_window.style_rgbs[0], content_rgb)
            else:
                style_rgb = style_window.style_rgbs[0]

            if args.interpolate is False:
                # Run the frame through the style network
                stylized_rgb = ada_in.predict(content_rgb, style_rgb, style_window.alpha)
            else:
                interp_weights = [style_window.interp_weight, 1 - style_window.interp_weight]
                stylized_rgb = ada_in.predict_interpolate(content_rgb, 
                                                          style_window.style_rgbs,
                                                          interp_weights,
                                                          style_window.alpha)

            # Stitch the style + stylized output together, but only if there's one style image
            if args.concat and args.interpolate is False:
                # Resize style img to same height as frame
                style_rgb_resized = cv2.resize(style_rgb, (stylized_rgb.shape[0], stylized_rgb.shape[0]))
                stylized_rgb = np.hstack([style_rgb_resized, stylized_rgb])
            
            stylized_bgr = cv2.cvtColor(stylized_rgb, cv2.COLOR_RGB2BGR)
                
            if args.video_out is not None:
                stylized_bgr = cv2.resize(stylized_bgr, out_shape) # Make sure frame matches video size
                video_writer.write(stylized_bgr)

            cv2.imshow('AdaIN Style', stylized_bgr)

            fps.update()

            key = cv2.waitKey(10) 
            if key & 0xFF == ord('r'):   # Load new random style
                style_window.set_style(random=True, style_idx=0)
                if args.interpolate:     # Load a a second style if interpolating
                    style_window.set_style(random=True, style_idx=1, window='style2')    
            elif key & 0xFF == ord('c'):
                keep_colors = not keep_colors
                print("Switching to keep_colors",keep_colors)
            elif key & 0xFF == ord('q'): # Quit
                break
        else:
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    cap.stop()
    
    if args.video_out is not None:
        video_writer.release()
    
    cv2.destroyAllWindows()
Exemplo n.º 7
0
def main():
    # Load the WCT model
    wct_model = WCT(checkpoints=args.checkpoints,
                    relu_targets=args.relu_targets,
                    vgg_path=args.vgg_path,
                    device=args.device)

    # Load a panel to control style settings
    style_window = StyleWindow(args.style_path, args.style_size,
                               args.crop_size, args.scale, args.alpha,
                               args.interpolate)

    # Start the webcam stream
    cap = WebcamVideoStream(args.video_source, args.width, args.height).start()

    _, frame = cap.read()

    # Grab a sample frame to calculate frame size
    frame_resize = cv2.resize(frame, None, fx=args.scale, fy=args.scale)
    img_shape = frame_resize.shape

    # Setup video out writer
    if args.video_out is not None:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        if args.concat:
            out_shape = (img_shape[1] + img_shape[0], img_shape[0]
                         )  # Make room for the style img
        else:
            out_shape = (img_shape[1], img_shape[0])
        print('Video Out Shape:', out_shape)
        video_writer = cv2.VideoWriter(args.video_out, fourcc, args.fps,
                                       out_shape)

    fps = FPS().start()  # Track FPS processing speed

    keep_colors = args.keep_colors

    count = 0

    while (True):
        if args.max_frames > 0 and count > args.max_frames:
            break

        ret, frame = cap.read()

        if ret is True:
            frame_resize = cv2.resize(frame,
                                      None,
                                      fx=style_window.scale,
                                      fy=style_window.scale)

            if args.noise:  # Generate textures from noise instead of images
                frame_resize = np.random.randint(0, 256, frame_resize.shape,
                                                 np.uint8)

            count += 1
            print("Frame:", count, "Orig shape:", frame.shape, "New shape",
                  frame_resize.shape)

            content_rgb = cv2.cvtColor(
                frame_resize,
                cv2.COLOR_BGR2RGB)  # OpenCV uses BGR, we need RGB

            if args.random > 0 and count % args.random == 0:
                style_window.set_style(random=True, style_idx=0)

            if keep_colors:
                style_rgb = preserve_colors_np(style_window.style_rgbs[0],
                                               content_rgb)
            else:
                style_rgb = style_window.style_rgbs[0]

            # For best results style img should be comparable size to content
            # style_rgb = resize_to(style_rgb, min(content_rgb.shape[0], content_rgb.shape[1]))

            if args.interpolate is False:
                # Run the frame through the style network
                stylized_rgb = wct_model.predict(content_rgb, style_rgb,
                                                 style_window.alpha)

                if args.passes > 1:
                    for i in range(args.passes - 1):
                        stylized_rgb = wct_model.predict(
                            stylized_rgb, style_rgb, style_window.alpha)
                # stylized_rgb = wct_model.predict_np(content_rgb, style_rgb, style_window.alpha) # Numpy version
            # else: ## TODO Implement interpolation
            #     interp_weights = [style_window.interp_weight, 1 - style_window.interp_weight]
            #     stylized_rgb = wct_model.predict_interpolate(content_rgb,
            #                                               style_window.style_rgbs,
            #                                               interp_weights,
            #                                               style_window.alpha)

            # Stitch the style + stylized output together, but only if there's one style image
            if args.concat and args.interpolate is False:
                # Resize style img to same height as frame
                style_rgb_resized = cv2.resize(
                    style_rgb, (stylized_rgb.shape[0], stylized_rgb.shape[0]))
                stylized_rgb = np.hstack([style_rgb_resized, stylized_rgb])

            stylized_bgr = cv2.cvtColor(stylized_rgb, cv2.COLOR_RGB2BGR)

            if args.video_out is not None:
                stylized_bgr = cv2.resize(
                    stylized_bgr,
                    out_shape)  # Make sure frame matches video size
                video_writer.write(stylized_bgr)

            cv2.imshow('WCT Universal Style Transfer', stylized_bgr)

            fps.update()

            key = cv2.waitKey(10)
            if key & 0xFF == ord('r'):  # Load new random style
                style_window.set_style(random=True, style_idx=0)
                if args.interpolate:  # Load a a second style if interpolating
                    style_window.set_style(random=True,
                                           style_idx=1,
                                           window='style2')
            elif key & 0xFF == ord('c'):
                keep_colors = not keep_colors
                print('Switching to keep_colors', keep_colors)
            elif key & 0xFF == ord('s'):
                out_f = "{}.png".format(time.time())
                save_img(out_f, stylized_rgb)
                print('Saved image to', out_f)
            elif key & 0xFF == ord('q'):  # Quit
                break
        else:
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    cap.stop()

    if args.video_out is not None:
        video_writer.release()

    cv2.destroyAllWindows()
    process = Process(target=worker, args=((input_q, output_q)))
    process.daemon = True
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        cv2.imshow('Video', output_q.get())
        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    video_capture.stop()
    cv2.destroyAllWindows()