Exemple #1
0
# Load model
model = torch.jit.load(args.model_checkpoint)
model.backbone_scale = args.model_backbone_scale
model.refine_mode = args.model_refine_mode
model.refine_sample_pixels = args.model_refine_sample_pixels
model.model_refine_threshold = args.model_refine_threshold
model.cuda().eval()

width, height = args.resolution
cam = Camera(width=width, height=height)
dsp = Displayer('RTHRBM Preview',
                cam.width,
                cam.height,
                show_info=(not args.hide_fps))
ctr = Controller()
fake_camera = pyvirtualcam.Camera(width=cam.width, height=cam.height, fps=30)
dsp.webcam = fake_camera


def cv2_frame_to_cuda(frame):
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    return ToTensor()(Image.fromarray(frame)).unsqueeze_(0).cuda()


preloaded_image = cv2_frame_to_cuda(cv2.imread(args.target_image))
tb_video = VideoDataset(args.target_video, transforms=ToTensor())


def grab_bgr():
    bgr_frame = cam.read()
    bgr_blur = cv2.GaussianBlur(bgr_frame.astype('float32'), (67, 67),
Exemple #2
0
print('LOADING DETECTOR')
detector = dlib.get_frontal_face_detector()
print('DETECTOR LOADED\n')

print('LAODING MODEL')
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
print('MODEL LOADED\n')

# video stream
vs = VideoStream().start()

# our image size
size = 400

# create a virtual cam
cam = pyvirtualcam.Camera(width=size, height=size, fps=20)

# create an empty list for face dots so that script doesn't crash if there's no face
face = [(0, 0) for _ in range(68)]

# empty list of particles
HAIR = []

# main video loop
stream = True
print('STREAMING BEGINS')
while stream:

    pic = vs.read()  # take a new frame from camera
    pic = imutils.resize(pic, width=size)  # resize image to sizexsize
    gray = cv2.cvtColor(pic, cv2.COLOR_BGR2GRAY)  # convert to grayscale
Exemple #3
0
 def __enter__(self):
     self.cam = pyvirtualcam.Camera(width=self.width,
                                    height=self.height,
                                    fps=self.fps)
     self.cam.__enter__()
     return self
Exemple #4
0
def test_invalid_frame_dtype():
    with pyvirtualcam.Camera(width=1280, height=720, fps=20) as cam:
        with pytest.raises(TypeError):
            cam.send(np.zeros((cam.height, cam.width, 3), np.uint16))
posX = 0
posY = 0

cvScaleFactor = 4
cvWidth = int(capWidth/cvScaleFactor)
cvHeight = int(capHeight/cvScaleFactor)
smallestFeature = tuple([int(minFaceSize//cvScaleFactor)])*2

centerFace = [capWidth/(2*cvScaleFactor)-25,  capHeight/(2*cvScaleFactor)-25,  50,  50]
lastFace = centerFace
lastFaceReuse = 0


fpsList = [0.03]

with pyvirtualcam.Camera(width=capWidth, height=capHeight, fps=20, delay = 0) as cam:
    print("Virtual cam connected")
    ardLib.startingAnimation(ser)
    time.sleep(0.2)
    posY += -0.2
    progStart = time.time()
    while(time.time() - progStart < 60):
    #while(True):
        startOfFrame = time.time()
        
        ret, capFrame = cap.read()

        editedFrame = capFrame

        # Read the input image
        img = cv2.resize(capFrame, (cvWidth, cvHeight))
import pyvirtualcam
import numpy as np

if __name__ == '__main__':
    with pyvirtualcam.Camera(width=1280, height=720, fps=30) as cam:
        while True:
            frame = np.zeros((cam.height, cam.width, 4), np.uint8)  # RGBA
            frame[:, :, :3] = cam.frames_sent % 255  # grayscale animation
            frame[:, :, 3] = 255
            cam.send(frame)
            cam.sleep_until_next_frame()
Exemple #7
0
def main():
    width = 640
    height = 360
    fps = 10
    webcam = cv2.VideoCapture(0)
    webcam.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    webcam.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)
    # background = cv2.imread('./backgrounds/pycon.png')
    frames = [
        Frame(id='thunder',
              path='./backgrounds/thunder.jpg',
              width=width,
              height=height),
        Frame(id='time',
              path='./backgrounds/its_time_to_stop.mp4',
              width=width,
              height=height,
              video=True),
        Frame(id='error',
              path='./backgrounds/exp_error.mp4',
              width=width,
              height=height,
              video=True),
        Frame(id='macizo',
              path='./backgrounds/macizo.jpg',
              width=width,
              height=height),
        Frame(id='hacked',
              path='./backgrounds/hacked.mp4',
              width=width,
              height=height,
              video=True),
        Frame(id='ali',
              path='./backgrounds/nyancat.mp4',
              width=width,
              height=height,
              video=True,
              background_ignore=True)
    ]
    image_manager = ImageManager()
    frames = image_manager.load_files(frames)
    listener = keyboard.GlobalHotKeys({
        '<ctrl>+<alt>+a': hot_key_a,
        '<ctrl>+<alt>+d': hot_key_d,
        '<ctrl>+<alt>+n': hot_key_n,
        '<ctrl>+<alt>+t': hot_key_t,
        '<ctrl>+<alt>+e': hot_key_e,
        '<ctrl>+<alt>+m': hot_key_m,
        '<ctrl>+<alt>+h': hot_key_h,
    })
    listener.start()
    print('data loaded!')
    with pyvirtualcam.Camera(width=width, height=height, fps=fps,
                             delay=1) as cam:
        count = 0
        lenght = 0
        while True:
            if key == 'a':
                video = Frame().get_frame_by_id(frames, 'ali')
                if video:
                    count = video.video_validator(count)
                    result = video.frame[count]
                    count += 1
                    result = cv2.cvtColor(result, cv2.COLOR_BGR2RGBA)
                    cam.send(result)
                    cam.sleep_until_next_frame()
            elif key == 't':
                _, image_frame = webcam.read()
                background = Frame().get_frame_by_id(frames, 'thunder')
                if background:
                    result = image_manager.insert_background_instead_green(
                        image_frame, background.frame)
                    result = cv2.cvtColor(result, cv2.COLOR_BGR2RGBA)
                    cam.send(result)
            elif key == 'd':
                video = Frame().get_frame_by_id(frames, 'time')
                if video:
                    count = video.video_validator(count)
                    result = video.frame[count]
                    count += 1
                    result = cv2.cvtColor(result, cv2.COLOR_BGR2RGBA)
                    cam.send(result)
                    cam.sleep_until_next_frame()
            elif key == 'e':
                video = Frame().get_frame_by_id(frames, 'error')
                if video:
                    count = video.video_validator(count)
                    result = video.frame[count]
                    count += 1
                    result = cv2.cvtColor(result, cv2.COLOR_BGR2RGBA)
                    cam.send(result)
                    cam.sleep_until_next_frame()
            elif key == 'h':
                video = Frame().get_frame_by_id(frames, 'hacked')
                if video:
                    count = video.video_validator(count)
                    result = video.frame[count]
                    count += 1
                    result = cv2.cvtColor(result, cv2.COLOR_BGR2RGBA)
                    cam.send(result)
                    cam.sleep_until_next_frame()
            elif key == 'm':
                _, image_frame = webcam.read()
                background = Frame().get_frame_by_id(frames, 'macizo')
                if background:
                    result = cv2.cvtColor(background.frame, cv2.COLOR_BGR2RGBA)
                    cam.send(result)
            elif key == 'n':
                _, image_frame = webcam.read()
                result = cv2.cvtColor(image_frame, cv2.COLOR_BGR2RGBA)
                cam.send(result)
Exemple #8
0
    def run(self):
        global default_bkg_img_path
        global bkg_img_path
        global bkg_changed_event

        global default_video_id
        global video_id
        global video_lock

        global exit_event

        # capture from web cam
        CONFIG_FILE = 'model_mobilenetv2_with_prior_channel.yaml'
        curr_dir = os.getcwd()
        config_path = os.path.join(curr_dir, 'config', CONFIG_FILE)

        with open(config_path, 'rb') as f:
            cont = f.read()
        config = yaml.safe_load(cont)

        xml_path = 'model.xml'
        bin_path = 'model.bin'
        core = IECore()
        network = core.read_network(model=xml_path, weights=bin_path)
        exec_network = core.load_network(network, 'CPU')

        ### Input shape required by model
        input_blob = next(iter(network.inputs))
        input_shape = network.inputs[input_blob].shape
        height = input_shape[2]
        width = input_shape[3]

        out_shape = (960, 640)
        background_img = cv2.imread(default_bkg_img_path)
        background_img = cv2.resize(background_img, out_shape)

        # Create virtual cam
        cap = None
        prev_video_id = None
        try:
            with pyvirtualcam.Camera(width=out_shape[0],
                                     height=out_shape[1],
                                     fps=30) as cam:
                while True:
                    if exit_event.is_set():
                        break

                    if cap is not None:
                        if video_changed_event.is_set():
                            if prev_video_id != video_id:
                                try:
                                    cap.release()
                                except Exception as e:
                                    del cap

                                with video_lock:
                                    cap = cv2.VideoCapture(video_id)
                                    video_changed_event.clear()
                    else:
                        cap = cv2.VideoCapture(video_id)
                    prev_video_id = video_id

                    display_img = np.zeros((*out_shape, 3), dtype='uint8')
                    if cap is not None:
                        ret, origin_image = cap.read()
                        if ret:
                            # Check background change
                            if bkg_changed_event.is_set():
                                with bkg_lock:
                                    background_img = cv2.imread(bkg_img_path)
                                    background_img = cv2.resize(
                                        background_img, out_shape)
                                    bkg_changed_event.clear()

                            # Preprocessing
                            in_shape = origin_image.shape
                            image, bbx = resize_padding(
                                origin_image, [
                                    config['input_height'],
                                    config['input_width']
                                ],
                                pad_value=config['padding_color'])
                            p_image = generate_input(config, image, None)
                            p_image = p_image[None, :, :, :]

                            # Prediction
                            exec_network.infer({input_blob: p_image})
                            results = exec_network.requests[0].outputs

                            # Post-processing
                            output_mask = results['output']
                            pred = softmax(output_mask, axis=1)
                            predimg = pred[0].transpose((1, 2, 0))[:, :, 1]
                            alphargb = predimg[bbx[1]:bbx[3], bbx[0]:bbx[2]]
                            alphargb = cv2.resize(alphargb, out_shape)
                            alphargb = cv2.cvtColor(alphargb,
                                                    cv2.COLOR_GRAY2BGR)

                            # Display
                            origin_image = cv2.resize(origin_image, out_shape)
                            display_img = np.uint8(origin_image * alphargb +
                                                   background_img *
                                                   (1 - alphargb))

                    self.change_pixmap_signal.emit(display_img)
                    display_img = cv2.flip(display_img, 1)
                    rgba_img = cv2.cvtColor(display_img, cv2.COLOR_BGR2RGBA)
                    cam.send(rgba_img)
        except Exception as e:
            logging.error(e)
Exemple #9
0
pref_fps_in = 30
vc.set(cv2.CAP_PROP_FRAME_WIDTH, pref_width)
vc.set(cv2.CAP_PROP_FRAME_HEIGHT, pref_height)
vc.set(cv2.CAP_PROP_FPS, pref_fps_in)

# Query final capture device values (may be different from preferred settings).
width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps_in = vc.get(cv2.CAP_PROP_FPS)
print(f'Webcam capture started ({width}x{height} @ {fps_in}fps)')

fps_out = 20

with pyvirtualcam.Camera(width,
                         height,
                         fps_out,
                         fmt=PixelFormat.BGR,
                         print_fps=args.fps) as cam:
    print(
        f'Virtual cam started: {cam.device} ({cam.width}x{cam.height} @ {cam.fps}fps)'
    )

    # Shake two channels horizontally each frame.
    channels = [[0, 1], [0, 2], [1, 2]]

    while True:
        # Read frame from webcam.
        ret, frame = vc.read()
        if not ret:
            raise RuntimeError('Error fetching frame')
    config.read('settings.ini')
    filename1 = config["Settings"]["filepath"]
    print(filename1)
    prev_frame = None
    img1 = cv2.imread(filename1)
    preds_source = handler.get(img1, get_all=False)
    img1 = cv2.resize(img1, (640, 480))
    print(img1.shape)
    points1 = []
    for pred in preds_source:
        pred = np.round(pred).astype(np.int)
        for i in range(pred.shape[0]):
            p = tuple(pred[i])
            points1.append(p)

    with pyvirtualcam.Camera(width=640, height=480, fps=30, print_fps=True) as cam:
        while True:
            _, img2 = cap.read()
            # img1Warped = np.copy(img2)
            img1Warped = np.zeros_like(img2)
            img_copy = img2.copy()
            # Read array of corresponding points
            # points1 = readPoints(filename1 + '.txt')
            # points2 = readPoints(filename2 + '.txt')

            points2 = []

            preds_target = handler.get(img2, get_all=False)

            color = (200, 160, 75)
Exemple #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--filter",
                        choices=["cat", "pikachu", "dog", "rainbow_cat"],
                        default="cat")
    parser.add_argument("--camera",
                        type=int,
                        default=0,
                        help="ID of webcam device (default: 0)")
    parser.add_argument("--fps",
                        action="store_true",
                        help="output fps every second")
    parser.add_argument("--magnify",
                        type=float,
                        default=None,
                        help="Set gif magnification factor")
    parser.add_argument("--effect", choices=["shake", "none"], default="none")
    parser.add_argument("--position", default=None, help="'top' or 'bottom'")
    parser.add_argument("--speed",
                        type=int,
                        default=None,
                        help="gif moving speed from right to left. Idle = 0")
    args = parser.parse_args()

    # Set up webcam capture.
    vc = cv2.VideoCapture(args.camera)

    if not vc.isOpened():
        raise RuntimeError('Could not open video source')

    vc.set(cv2.CAP_PROP_FRAME_WIDTH, PREF_WIDTH)
    vc.set(cv2.CAP_PROP_FRAME_HEIGHT, PREF_HEIGHT)
    vc.set(cv2.CAP_PROP_FPS, PREF_FPS_IN)

    # Query final capture device values (may be different from preferred settings).
    width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps_in = vc.get(cv2.CAP_PROP_FPS)
    print(f'Webcam capture started ({width}x{height} @ {fps_in}fps)')

    character = args.filter
    magnify = gif_map[character][
        'magnify'] if args.magnify == None else args.magnify
    position = gif_map[character][
        'position'] if args.position == None else args.position
    speed = gif_map[character]['speed'] if args.speed == None else args.speed

    fps_out = 20

    with pyvirtualcam.Camera(width,
                             height,
                             fps_out,
                             fmt=PixelFormat.BGR,
                             print_fps=args.fps) as cam:
        print(
            f'Virtual cam started: {cam.device} ({cam.width}x{cam.height} @ {cam.fps}fps)'
        )

        # Shake two channels horizontally each frame.
        channels = [[0, 1], [0, 2], [1, 2]]
        gif = cv2.VideoCapture(gif_map[character]['path'])
        count = 0
        while True:
            # Read frame from webcam.
            ret_cam, frame = vc.read()
            ret_gif, meme = gif.read()
            count += 1
            if not ret_cam:
                raise RuntimeError('Error fetching frame')
            if not ret_gif:
                gif.set(cv2.CAP_PROP_POS_FRAMES, 0)
                ret_gif, meme = gif.read()
            meme_shape = meme.shape
            meme_width = int(meme_shape[1] * magnify)
            meme_height = int(meme_shape[0] * magnify)
            meme = cv2.resize(meme, (meme_width, meme_height))
            if gif_map[character]['flip']:
                meme = cv2.flip(meme, 1)
            meme = np.array(meme, np.int16)
            move_speed = count * speed

            frame = swap(frame, meme, meme_width, meme_height, position,
                         height, move_speed)

            if args.effect == "shake":
                dx = 15 - cam.frames_sent % 5
                c1, c2 = channels[cam.frames_sent % 3]
                frame[:, :-dx, c1] = frame[:, dx:, c1]
                frame[:, dx:, c2] = frame[:, :-dx, c2]

            cam.send(frame)

            # Wait until it's time for the next frame.
            cam.sleep_until_next_frame()
(rStart, rEnd) = setLandmarks("right_eye")
(mStart, mEnd) = setLandmarks("mouth")

vs = launchVideoStream()

lastUpdateTime = getCurrentTimee()
lastAlertTime = getCurrentTimee()
sheet = initGsheet("FYPconditionMonitoring")
EARcalibrated = False

# userName = getUserName()
userName = "******"

# test_EAR()
# test_MAR()
with pyvirtualcam.Camera(width=640, height=480, fps=30,
                         fmt=PixelFormat.RGB) as cam:
    print(f'Using virtual camera: {cam.device}')
    frames = np.zeros((cam.height, cam.width, 3), np.uint8)
    while True:

        # shortcut keys
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):  # "q" to Quit the program
            break
        if key == ord("r"):  # "r" to recalibrate EAR (Eye Aspect Ratio)
            EARcalibrated = False

        frame = readResizeVS()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        rects = grayScale(gray)
Exemple #13
0
gif = imageio.get_reader(args.path)
gif_length = gif.get_length()
gif_height, gif_width = gif.get_data(0).shape[:2]
gif_fps = 1000 / gif.get_meta_data()['duration']

cam_width = 1280
cam_height = 720
cam_fmt = PixelFormat.RGBA

gif_x = (cam_width - gif_width) // 2
gif_y = (cam_height - gif_height) // 2

with pyvirtualcam.Camera(cam_width,
                         cam_height,
                         gif_fps,
                         fmt=cam_fmt,
                         device=args.device,
                         print_fps=args.fps) as cam:
    print(
        f'Virtual cam started: {cam.device} ({cam.width}x{cam.height} @ {cam.fps}fps)'
    )

    print()
    print('Capturing in OBS with transparency:')
    print('1. Add "Video Capture Device" as new source')
    print(f'2. Select "{cam.device}" as device')
    print('3. Set "Resolution/FPS Type" to "Custom"')
    print(
        f'4. Set "Resolution" to match virtual camera: {cam.width}x{cam.height}'
    )
    print('5. Set "Video Format" to "ARGB"')
Exemple #14
0
    line = labelsfile.readline()
    while line:
        # retrieve just class name and append to classes
        classes.append(line.split(' ', 1)[1].rstrip())
        line = labelsfile.readline()
    # close label file
    labelsfile.close()

    # load the teachable machine model
    model_path = 'converted_keras/keras_model.h5'
    model = tfk.models.load_model(model_path, compile=False)

    # initialize webcam video object
    cap = cv2.VideoCapture(0)

    # width & height of webcam video in pixels -> adjust to your size
    # adjust values if you see black bars on the sides of capture window
    frameWidth = 1280
    frameHeight = 720

    # set width and height in pixels
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    # enable auto gain
    cap.set(cv2.CAP_PROP_GAIN, 0)
    cam = pyvirtualcam.Camera(width=width, height=height, fps=60)
    MainWindow(root, classes, model, cap, cam)
    #MainWindow(root, classes, model, cap)
    #MainWindow(root, classes, model, cap, virtualCamera)
    root.mainloop()
Exemple #15
0
    else:
        return np.tile(np.linspace(start, stop, height), (width, 1)).T


def get_gradation_3d(width, height, start_list, stop_list, is_horizontal_list):
    result = np.zeros((height, width, len(start_list)), dtype=np.float)

    for i, (start, stop, is_horizontal) in enumerate(
            zip(start_list, stop_list, is_horizontal_list)):
        result[:, :, i] = get_gradation_2d(start, stop, width, height,
                                           is_horizontal)

    return result


with pyvirtualcam.Camera(width, height, fps, print_fps=True) as cam:
    print(f'virtual cam started ({width}x{height} @ {fps}fps)')
    i = 0
    while True:
        # Things we want to draw:
        # 1. A color gradient.
        gradient = get_gradation_3d(width, height, (0, 0, 192),
                                    ((i * 2) % 255, 255, 64),
                                    (True, False, False))
        # 2. A binary-encoded frame counter.
        bits = '{0:012b}'.format(i)
        bit_size = 10
        red = np.array([255, 0, 0], np.uint8)
        white = np.array([255, 255, 255], np.uint8)

        # Create a new frame.
Exemple #16
0
    def make_virtual_webcam(
        self,
        preprocess=lambda frames, raw: frames[0],
        frames_stored=1,
        prepare=lambda frame: cv2.flip(frame, 1),
        log=lambda fps=0, ret=True: print(f"\rFPS: {fps}", end=""),
        fps_sample_length=sample_length,
        webcam_res=(None, None),
        finish=print,
    ):
        """Streams the preprocessed camera output into a virtual webcam device

        Args:
            preprocess (function, optional): Function to pass the frames through before streaming it. Defaults to lambda frames:frame[0].
            frames_stored (int, optional): The number of frames to pass into the preprocess function. Defaults to 1
            prepare (function, optional): The function to run on a frame before adding it to the buffer. Defaults to flipping it accross the y-axis.
            log (function, optional): The log function to pass the fps and ret values into. Defaults to lambda fps=0:print(f"\rFPS: {fps}", end='').
            fps_sample_length (int, optional): The sample length for the fps (if sample length is 10 it averages the last 10). Defaults to sample_length from constants.
            webcam_res (tuple, optional): The resolution of the webcam. Defaults to the resolution of the internal capture object.
            finish (function, optional): The function to run once the stream closes. Defaults to print.
        """
        currentFrame = 0
        frames = []

        last_time = time.time()
        times = []

        webcam_res = (
            webcam_res[0] or int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
            webcam_res[1] or int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
        )

        if prepare is None:
            prepare = lambda frame: frame
        if log is None:
            log = lambda fps=0, ret=True: None

        with pyvirtualcam.Camera(
            width=webcam_res[0],
            height=webcam_res[1],
            fps=int(self.cap.get(cv2.CAP_PROP_FPS)),
        ) as cam:
            print("Press Control C to stop")
            while True:
                try:
                    ret, raw = self.cap.read()
                    if not ret:
                        continue

                    frame = prepare(raw)

                    frames.insert(0, frame)
                    if len(frames) <= frames_stored:
                        continue
                    frames.pop()
                    output_frame = preprocess(frames=frames, raw=raw)

                    if self.lock:
                        with self.lock:
                            self.cur_frame = output_frame
                    else:
                        self.cur_frame = output_frame

                    if self.should_log:
                        currentFrame += 1
                        cur_time = time.time()
                        dt = cur_time - last_time
                        times.append(dt)
                        if len(times) > fps_sample_length:
                            times.pop(0)
                        log(fps=len(times) / sum(times), ret=ret)
                        last_time = cur_time

                    cam.send(
                        cv2.resize(
                            cv2.cvtColor(output_frame, cv2.COLOR_BGR2RGBA), webcam_res
                        )
                    )
                    cam.sleep_until_next_frame()

                except KeyboardInterrupt as e:
                    finish()
                    self.cap.release()
                    cv2.destroyAllWindows()
                    break
Exemple #17
0
      handle.controlWrite(request_type, request, value, index, data, timeout=FPIX_TIMEOUT)

    # Helper function to read a single frame via USB
    def readFrame():
      command(fr_req_cmd)
      frame = b""
      while True:
        data = handle.bulkRead(ENDPOINT, FPIX_MAX_TRANSFER,timeout=FPIX_TIMEOUT)
        frame += data
        if (len(data) < FPIX_MAX_TRANSFER) or ((data[-2] == 0xFF) and (data[-1] == 0xD9)):
          break
      return frame


    # Create a virtual camera
    virtual_camera = pyvirtualcam.Camera(width=320, height=240, fps=30)
    if virtual_camera is None:
      print("Unable to create virtual camera")
      sys.exit(1)

    # Reset the camera and reset our framecounter
    command(reset_cmd)
    frameIndex = 0

    # Loop to process frames
    while True:

      # Read frame from physical camera
      frame = readFrame()

      # Dump frame to disk
Exemple #18
0
# This script is the sample from the README.

import pyvirtualcam
import numpy as np

with pyvirtualcam.Camera(width=1280, height=720, fps=20,
                         print_fps=True) as cam:
    while True:
        frame = np.zeros((cam.height, cam.width, 4), np.uint8)  # RGBA
        frame[:, :, :3] = cam.frames_sent % 255  # grayscale animation
        frame[:, :, 3] = 255
        cam.send(frame)
        cam.sleep_until_next_frame()
Exemple #19
0
async def start_web_server(stop_event: Event, pipe: Connection) -> None:
    """
    Set up and run the web server main loop.

    Server will gracefully shut down when `stop_event` flag is set.

    Args:
        stop_event (Event): A flag that, when true, will graceully shut down the server
        pipe (Connection): Pipe connection to receive information from server
    """
    # All active RTC peer connections
    pcs: set[RTCPeerConnection] = set()

    async def show_frame(track: RemoteStreamTrack) -> None:
        """
        Get a frame from a `RemoteStreamTrack` and paint it to the pyvirtualcam video buffer.

        Args:
            track (RemoteStreamTrack): Video track from WebRTC connection
        """
        frame = await track.recv()

        # Format is a 2d array containing an RGBA tuples
        # 640x480
        if cam is not None:
            frame = _VIDEO_REFORMATTER.reformat(frame=frame, width=_CAMERA_WIDTH, height=_CAMERA_HEIGHT, format="rgba")
            cam.send(frame.to_ndarray())

        # @NOTE Not sure if we need this but I'm going to leave it in case we
        # ever need a case for it
        # cam.sleep_until_next_frame()

    def show_static_frame() -> None:
        """Paint static image to camera frame buffer."""
        if cam is None:
            raise RuntimeError('Trying to send frame to camera before initialization')

        cam.send(_NO_CAMERA_IMAGE_NDARRAY)

    def log(message: str, level: int = logging.INFO):
        """
        Send a log message through communication pipe.

        Args:
            message (str): Log message content
            level (int, optional): Logging level. Defaults to logging.INFO.
        """
        pipe.send(LogMessage(message, level))

    async def close_all_connections() -> int:
        """
        Close all open WebRTC connections and media streams.

        Returns:
            int: Number of connections that were closed
        """
        global is_cam_idle
        is_cam_idle = True

        for pc in pcs.copy():
            await pc.close()

        num_pcs = len(pcs)
        pcs.clear()

        return num_pcs

    # Rolling timeout that closes all peer connections after the client has not
    # responded for some time
    heartbeat_timeout = RollingTimeout(
        _STALE_CONNECTION_TIMEOUT, close_all_connections)

    @web.middleware
    async def logging_middleware(request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]) -> StreamResponse:
        """
        Send `LogMessage` through communication pipe for every HTTP request.

        Args:
            request (Request): HTTP request from http server
            handler ([type]): Handler to be executed after middleware

        Returns:
            StreamResponse: HTTP response after handler is executed
        """
        log(f"{request.method} {request.path} - {request.remote}", logging.DEBUG)
        return await handler(request)

    async def index(request: Request) -> StreamResponse:
        content = open(os.path.join(ROOT, "index.html"), "r").read()
        return web.Response(content_type="text/html", text=content)

    async def static(request: Request) -> StreamResponse:
        filename = os.path.join(ROOT, request.match_info['filename'])

        if not os.path.exists(filename):
            return web.Response(status=404)

        content = open(filename, 'r').read()
        mime = _MIMETYPES.guess_type(filename)[0]
        return web.Response(text=content, content_type=mime)

    async def close(request: Request) -> StreamResponse:
        num_connections = await close_all_connections()
        return web.Response(text=f"Closed {num_connections} connection(s)")

    async def offer(request: Request) -> StreamResponse:
        params = await request.json()

        if params['sdp'] is None or params['type'] is None:
            return web.Response(status=400, text="Required `sdp` and `type` are missing from request body.")

        if len(pcs) != 0:
            return web.Response(status=409, text='Attempting to make more than one connection simultaneously. Resource busy.')

        offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])

        pc = RTCPeerConnection()
        pcs.add(pc)

        log(f"Created for {request.remote}")

        @pc.on("datachannel")
        def on_datachannel(channel: RTCDataChannel):
            @channel.on("message")
            async def on_message(message):
                if isinstance(message, str):
                    if channel.label == 'latency':
                        # If we recieve a -1, then it is the first message
                        # and the rolling timout should be started
                        if message == '-1':
                            heartbeat_timeout.start()

                            global is_cam_idle
                            is_cam_idle = False
                        else:
                            round_trip_time = latency(int(message))
                            log(f"Latency {round_trip_time}ms", logging.DEBUG)
                            heartbeat_timeout.rollback()

                        await asyncio.sleep(_PING_INTERVAL)
                        try:
                            channel.send(str(timestamp()))
                        except InvalidStateError:
                            # Theres a chance the server will try to send a
                            # message after the connection is closed,
                            # raising an `InvalidStateError`. We should just
                            # ignore those.
                            pass

        @pc.on("connectionstatechange")
        async def on_connectionstatechange():
            log(f"Connection state is {pc.connectionState}")
            if pc.connectionState == "failed" or pc.connectionState == "closed":
                await pc.close()
                pcs.discard(pc)

        @pc.on("track")
        async def on_track(track: RemoteStreamTrack):
            log(f"Track {track.kind} received")

            if track.kind != "video":
                track.stop()
                return

            @track.on("ended")
            async def on_ended():
                log(f"Track {track.kind} ended")

                global is_cam_idle
                is_cam_idle = True

            while True:
                if track.readyState != "live":
                    break

                try:
                    await show_frame(track)
                except MediaStreamError as error:
                    if track.readyState == 'live':
                        raise error

        # handle offer
        await pc.setRemoteDescription(offer)

        # send answer
        answer = await pc.createAnswer()
        await pc.setLocalDescription(answer)

        return web.Response(
            content_type="application/json",
            text=json.dumps(
                {"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}
            ),
        )

    # Start HTTP server
    ssl_context = ssl.SSLContext()

    mkdir_local_app_data('certs')
    cert_file = resolve_local_app_data('certs', 'selfsigned.cert')
    key_file = resolve_local_app_data('certs', 'selfsigned.pem')

    if not ssl_certs_generated(cert_file, key_file):
        generate_ssl_certs(cert_file, key_file)

    ssl_context.load_cert_chain(cert_file, key_file)

    app = web.Application(middlewares=[logging_middleware])
    app.router.add_get("/", index)
    app.router.add_get(r'/{filename:.+}', static)
    app.router.add_post("/offer", offer)
    app.router.add_get('/close', close)

    runner = web.AppRunner(app, handle_signals=True)
    await runner.setup()

    site = web.TCPSite(runner, host=resolve_host(),
                       port=8080, ssl_context=ssl_context)
    await site.start()

    log(f"Server listening at https://{resolve_host()}:8080")

    # Acquire virtual camera
    camera_init_sucess = False
    for _ in range(_MAX_CAMERA_RETRY_COUNT):
        global cam
        try:
            cam = pyvirtualcam.Camera(_CAMERA_WIDTH, _CAMERA_HEIGHT, _CAMERA_FPS, _CAMERA_DELAY)
            camera_init_sucess = True
            break

        except RuntimeError as error:
            if error.args[0] != 'error starting virtual camera output':
                raise error

            log("Failed to acquire camera, retrying.", logging.WARN)
            await asyncio.sleep(_CAMERA_INIT_RETRY_INTERVAL)

    if not camera_init_sucess:
        raise RuntimeError("Failed to acquire camera.", logging.ERROR)

    # Main loop
    while stop_event is None or not stop_event.is_set():
        if is_cam_idle:
            show_static_frame()

        await asyncio.sleep(SLEEP_INTERVAL)

    # Clean up and close server
    if cam is not None:
        cam.close()

    await close_all_connections()
    await site.stop()
    await runner.shutdown()
    await runner.cleanup()
    await app.shutdown()
    await app.cleanup()
Exemple #20
0
    print('Please try again')
    cleaning_up()

# w - width of a resized perspective
# h - height of a resized perspective
w, h = perspective_size

# final video size
final_size = w + h + h

# resulting video
fps = 24
result = cv2.VideoWriter('Nigel.avi', cv2.VideoWriter_fourcc(*'MJPG'), fps,
                         (final_size, final_size))

with pyvirtualcam.Camera(width=final_size, height=final_size, fps=60) as cam:
    while (cam_front.isOpened()):
        ret_front, front = cam_front.read()
        ret_back, back = cam_back.read()
        ret_right, right = cam_right.read()
        ret_left, left = cam_left.read()

        try:
            # modify perspectives
            final_front = resized(front)
            final_back = resized(back)
            final_right = resized(right)
            final_left = resized(left)
        except cv2.error:
            # the camera is still open but no frames are coming
            break
Exemple #21
0
import cv2

# Create the haar cascade
faceCascade = cv2.CascadeClassifier(
    "cascades/haarcascade_frontalface_default.xml")
overlayImage = cv2.imread("images/smile_glasses.png", -1)

video = cv2.VideoCapture(0)
length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = video.get(cv2.CAP_PROP_FPS)

facePadding = 60

with pyvirtualcam.Camera(width=width, height=height, fps=fps,
                         print_fps=20) as cam:
    print(
        f'Virtual cam started: {cam.device} ({cam.width}x{cam.height} @ {cam.fps}fps)'
    )
    count = 0
    while True:
        # Restart video on last frame.
        if count == length:
            count = 0
            video.set(cv2.CAP_PROP_POS_FRAMES, 0)

        # Read video frame.
        ret, frame = video.read()
        if not ret:
            raise RuntimeError('Error fetching frame')
    def run(self):
        """ contains main while loop to constantly capture webcam, process, and output

        :return: None
        """

        listener = keyboard.Listener(on_press=self.on_press)
        listener.start(
        )  # start to listen for key presses on a separate thread

        with pyvirtualcam.Camera(width=self.width,
                                 height=self.height,
                                 fps=self.fps) as virtual_cam:
            # print status
            print('virtual camera started ({}x{} @ {}fps)'.format(
                virtual_cam.width, virtual_cam.height, virtual_cam.fps))
            virtual_cam.delay = 0
            frame_count = 0

            while True:
                frame_count += 1
                if self.key_pressed == 'q':
                    self.logger.endTimer()
                    sys.exit(0)

                # STEP 1: capture video from webcam
                ret, raw_frame = self.cam.read()
                raw_frame = cv2.flip(raw_frame, 1)

                # STEP 2: process frames
                if raw_frame is None:
                    continue

                # map keys to games:
                keymap = {'c': self.coin_game, 'a': self.asteroid_game}
                # check if key pressed corresponds to a game
                if self.key_pressed in keymap:
                    # end the old game
                    if self.game:
                        self.game.end()
                    self.scavenger = False
                    # if the new game is different, then start the new game
                    if keymap[self.key_pressed] != self.game:
                        keymap[self.key_pressed].start()
                        self.game = keymap[self.key_pressed]
                    else:
                        self.game = None

                    # reset the key pressed
                    self.key_pressed = ''
                # retro filter
                if self.key_pressed == 'f':
                    raw_frame = self.videofilter.start(raw_frame)
                    self.key_pressed = ''
                # scavenger hunt game
                if self.key_pressed == 's':
                    self.key_pressed = ''

                    self.scavenger = not self.scavenger
                    if self.game:
                        self.game.end()
                        self.game = None
                    self.need_to_find = ['Person', 'Glasses']

                # detect face position
                if frame_count % 3:
                    x, y, self.face_width, self.face_height = processing.face_detection(
                        raw_frame)
                    self.face_position = x, y

                # draw rectangle around face
                # cv2.rectangle(raw_frame, self.face_position, (self.face_position[0] + self.face_width,
                #                                               self.face_position[1] + self.face_height), (0, 255, 0), 2)

                # Face Sentiment: check if the api call thread is already running. If not, start it up
                if self.future_call and self.future_call.done():

                    self.face_sentiment = self.future_call.result()
                    self.future_call = self.executor.submit(
                        processing.face_sentiment, raw_frame)

                #logs current face sentiment
                self.logger.log_emotion(self.face_sentiment)

                # Object detection: check if the api call thread is already running. If not, start it up
                # only do this if the scav hunt game is running
                if self.scavenger and self.future_call_1 and self.future_call_1.done(
                ):
                    self.objects = self.future_call_1.result()
                    self.future_call_1 = self.executor.submit(
                        processing.localize_objects, raw_frame)

                    # remove found objects from the need to find list
                    found_objects = list(
                        set(self.objects) & set(self.need_to_find))
                    for object in found_objects:
                        self.need_to_find.remove(object)

                # write sentiment
                # cv2.putText(raw_frame, self.face_sentiment, (50, 100), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2,
                #             color=(0, 0, 255))

                # update pipe status
                self.happy_pipe.update_pipe(self.face_sentiment,
                                            self.coin_score)
                # show pipe on screen
                self.happy_pipe.overlay_pipe(raw_frame)
                # show coin score on screen
                raw_frame = self.coin_score.overlay_coins(raw_frame)

                # display need to find objects
                if self.scavenger:
                    pil_im = Image.fromarray(raw_frame)

                    draw = ImageDraw.Draw(pil_im)

                    font = ImageFont.truetype("assets\Pixeboy-z8XGD.ttf", 50)

                    # draw the text
                    if len(self.need_to_find) > 0:
                        # print(self.need_to_find)
                        draw.text((50, 65), "Show Me:", font=font)
                        for i in range(len(self.need_to_find)):
                            draw.text((50, 115 + 50 * i),
                                      self.need_to_find[i],
                                      font=font)
                    else:
                        draw.text((50, 65), "I am Satisfied", font=font)

                    raw_frame = cv2.cvtColor(
                        cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR),
                        cv2.COLOR_BGR2RGB)

                # convert frame to RGB
                color_frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2RGB)

                # add alpha channel
                out_frame_rgba = np.zeros((self.height, self.width, 4),
                                          np.uint8)
                out_frame_rgba[:, :, :3] = color_frame
                out_frame_rgba[:, :, 3] = 255

                if self.game == self.coin_game:
                    self.coin_game.update(
                        self.coin_score,
                        (self.face_position[0] + self.face_width // 2,
                         self.face_position[1] + self.face_height // 2))
                    self.coin_game.draw(out_frame_rgba)

                if self.game == self.asteroid_game:

                    self.asteroid_game.update((self.face_position[0], self.face_width, \
                                               self.face_position[1], self.face_height), raw_frame)
                    out_frame_rgba = self.asteroid_game.draw(out_frame_rgba)

                # STEP 3: send to virtual camera
                virtual_cam.send(out_frame_rgba)
                virtual_cam.sleep_until_next_frame()
Exemple #23
0
'''
    Camera Lagger by CRYP73R & ShiavngAG

'''

import cv2
import numpy as np
import time
import random
import pyvirtualcam

vid = cv2.VideoCapture(0)

with pyvirtualcam.Camera(width=640, height=480, fps=30) as cam:

    while True:
        rand = random.choice([0, 1, 2])
        ret, frame = vid.read()
        arr = np.array(frame)
        rgba = cv2.cvtColor(arr, cv2.COLOR_RGB2BGRA)
        time.sleep(rand)
        cam.send(rgba)
        cam.sleep_until_next_frame()

vid.release()
cv2.destroyAllWindows()