예제 #1
0
def video_capture(video_device_index):
    video_device_name = '/dev/video{}'.format(video_device_index)
    camera = Camera(video_device_name)
    try:
        yield camera
    finally:
        camera.close()
class WebcamThread(Thread):
    def __init__(self, device, width, height, color='RGB'):
        '''Intialize device
        '''
        self._cam = Camera(device, width, height)
        self.width, self.height = self._cam.width, self._cam.height
        self.running = True
        self.img = None
        self.t_wait = 1.0 / 60  # Webcam operates at 30FPS
        super().__init__()

    def run(self):
        '''Thread loop. Read continuously from cam buffer.
        '''
        while self.running:
            self.img = self._cam.get_frame()
            sleep(self.t_wait)
        self._cam.close()

    def capture(self, path_file):
        '''Capture image into a file
        '''
        image = self.get_img()
        image.save(path_file)

    def get_img(self):
        return Image.frombytes('RGB', (self.width, self.height), self.img,
                               'raw', 'RGB')

    def close(self):
        '''Stop webcam and thread
        '''
        self.running = False
 def __init__(self, device, width, height, color='RGB'):
     '''Intialize device
     '''
     self._cam = Camera(device, width, height)
     self.width, self.height = self._cam.width, self._cam.height
     self.running = True
     self.img = None
     self.t_wait = 1.0 / 60  # Webcam operates at 30FPS
     super().__init__()
예제 #4
0
    def __init__(self,
                 _num_leds_h=16,
                 _num_leds_v=24,
                 _ntsc=True,
                 feedback=False):
        self.num_leds_h = _num_leds_h
        self.num_leds_v = _num_leds_v
        self.ntsc = _ntsc
        self.leds = np.zeros(
            (_num_leds_v, _num_leds_h, 3))  #should be not necessary
        self.b64dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
        if self.ntsc:
            self.mode = 'NTSC'
            self.fps = 30
            self.width = 720
            self.height = 480
        else:
            self.mode = 'PAL-B'
            self.fps = 25
            self.width = 720
            self.height = 576
        self.format = 'UYVY'
        self.b = 3  # 3 2
        #self.color = '' #''smpte170'
        if (feedback):
            fb = 'verbose'
        else:
            fb = 'silent'

        self.scale = 1.
        self.device = '/dev/video0'
        self.w = int(self.width // self.scale)
        self.h = int(self.height // self.scale)

        self.game = NesTetris(_num_leds_h=_num_leds_h, _num_leds_v=_num_leds_v)

        #-p 25
        os.system(
            'v4l2-ctl -d {device} -s {m} --set-fmt-video width={w},height={h},pixelformat={pf} --{fb}'
            .format(
                #'v4l2-ctl -d {device} -p {fps} -s {m} --set-fmt-video width={w},height={h},pixelformat={pf} --{fb}'.format(
                device=self.device,
                fps=self.fps,
                m=self.mode,
                w=self.w,
                h=self.h,
                pf=self.format,
                fb=fb))
        #self.frame = Frame(self.device)
        self.frame = Camera(self.device)
예제 #5
0
def setup_camera():
    cam_id = '/dev/video1'  # type: str
    os.system('v4l2-ctl -d ' + cam_id + ' -c exposure_auto=1')
    time.sleep(0.5)
    camera = Camera(cam_id, 1280, 720)
    time.sleep(0.5)
    return camera
예제 #6
0
def run():
    # We assume that Raiden is already started
    previous_receiver_info = (0, 0)
    camera = Camera('/dev/video0', 1024, 576)
    camera.set_control_value(ControlIDs.CONTRAST, 10)
    camera.set_control_value(ControlIDs.SATURATION, 10)
    while True:
        address_id, nonce = start_scanning(camera)
        if (address_id, nonce) == previous_receiver_info:
            continue
        address = RECEIVER_LIST[address_id]
        send_payment(address, nonce)
        previous_receiver_info = (address_id, nonce)
예제 #7
0
import cv2
import io
import socket
import struct
import time
import pickle
import zlib
from PyV4L2Camera.camera import Camera
from PIL import Image
import numpy as np

client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('192.168.0.100', 1080))
# connection = client_socket.makefile('wb')

camera = Camera('/dev/video0', 320, 240)
img_size = [240, 320, 3]


img_counter = 0

encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]

while True:
    frame = camera.get_frame()
    # print(frame)
    # Decode the image
    # im = Image.frombytes('RGB', (camera.width, camera.height), frame, "raw", "BGR")
    # print(im)
    # Convert the image to a numpy array
    # cv_arr = np.asarray(im)
예제 #8
0
import numpy as np
from PIL import Image

from PyV4L2Camera.camera import Camera
from PyV4L2Camera.controls import ControlIDs

camera = Camera('/dev/video0', 1920, 1080)
controls = camera.get_controls()

for control in controls:
    print(control.name)

camera.set_control_value(ControlIDs.BRIGHTNESS, 48)

for _ in range(2):
    frame = camera.get_frame()

    # Decode the image
    im = Image.frombytes('RGB', (camera.width, camera.height), frame, 'raw',
                         'RGB')

    # Convert the image to a numpy array and back to the pillow image
    arr = np.asarray(im)
    im = Image.fromarray(np.uint8(arr))

    print(np.mean(arr[:, :, 0]))
    print(np.mean(arr[:, :, 1]))
    print(np.mean(arr[:, :, 2]))

    # Display the image to show that everything works fine
    im.show()
예제 #9
0
emotion_target_size = emotion_classifier.input_shape[1:3]

# starting lists for calculating modes
emotion_window = []

# starting video streaming

cv2.namedWindow('window_frame')
#video_capture = cv2.VideoCapture(0)
# Select video or webcam feed
cap = None

# from PyV4L2Camera.camera import Camera
# from PyV4L2Camera.controls import ControlIDs

camera = Camera('/dev/video0', 1920, 1080)
controls = camera.get_controls()

#
# Test yourself as a learning agent! Pass environment name as a command-line argument, for example:
#
# python keyboard_agent.py SpaceInvadersNoFrameskip-v4
#

env = gym.make('LunarLander-v2' if len(sys.argv) < 2 else sys.argv[1])

if not hasattr(env.action_space, 'n'):
    raise Exception('Keyboard agent only supports discrete action spaces')
ACTIONS = env.action_space.n
SKIP_CONTROL = 0  # Use previous control decision SKIP_CONTROL times, that's how you
# can test what skip is still usable.
예제 #10
0
# This script is to capture the images as the demo will see them, save them to
# disk for copying to training purposes

from PyV4L2Camera.camera import Camera
from PIL import Image

camera = Camera('/dev/video1')
width = camera.width
height = camera.height

counter = 0
folder = 1
outpath = 'captured_images/gear_{}/image_{}.jpg'


def image_bytes_to_image(image_bytes):
    image = Image.frombytes('RGB', (width, height), image_bytes, 'raw', 'RGB')

    side = 224
    top = (height - side) / 2 + 20
    left = (width - side) / 2 + 50
    bottom = top + side
    right = left + side

    image = image.crop((left, top, right, bottom))

    return image


while True:
    try:
예제 #11
0
#!/usr/bin/env python

from PyV4L2Camera.camera import Camera
import cv2
import numpy as np

camera = Camera('/dev/video0')


def update():
    frame = camera.get_frame()
    arr = np.frombuffer(frame, dtype=np.uint8)
    img = arr.reshape((1080, 1920, 3))
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    return img


def main():
    while True:
        img = update()
        cv2.imshow('preview', img)
        key = cv2.waitKey(1)
        if key == ord('q'):
            break
    print('DONE')


if __name__ == '__main__':
    main()
예제 #12
0
def get_uvc_device():
    return Camera('/dev/video0', 640, 480)
예제 #13
0
    #
    vehicle_offset_cm = offset_in_centimeters(vehicle_offset, 20,
                                              left_fitx[-1], right_fitx[-1])

    offset_angle = calculate_offset_angle(vehicle_offset_cm,
                                          maximum_support_vehicle_offset=5,
                                          maximum_wheel_angle=15)

    return vehicle_offset_cm


#     # rpi, ESC, STEER = setup_gpio()
# camera = Camera('/dev/video1', 360, 200)
os.system('v4l2-ctl -d /dev/video1 -c exposure_auto=1')
time.sleep(0.5)
camera = Camera('/dev/video1', 1280, 720)
# controls = camera.get_controls()

# speed = 1500
# angle = 90
# DEFAULT_CMD = '00/1500/90'
# STOP_CMD = '11/1500/90'

# for control in controls:
#     print(control.name + ":" + str(control.id))
camera.set_control_value(ControlIDs.BRIGHTNESS, 0)
camera.set_control_value(ControlIDs.CONTRAST, 15)
# camera.set_control_value(ControlIDs.BACKLIGHT_COMPENSATION, 50)
# camera.set_control_value(ControlIDs.SATURATION, 0)
# camera.set_control_value(ControlIDs.SATURATION, 0)
예제 #14
0
class StreamNES:
    # -s, --set - standard = < num >
    # pal or pal - X(X=B / G / H / N / Nc / I / D / K / M / 60)(V4L2_STD_PAL)
    # ntsc or ntsc - X(X=M / J / K)(V4L2_STD_NTSC)
    # secam or secam - X(X=B / G / H / D / K / L / Lc)(V4L2_STD_SECAM)

    def __init__(self,
                 _num_leds_h=16,
                 _num_leds_v=24,
                 _ntsc=True,
                 feedback=False):
        self.num_leds_h = _num_leds_h
        self.num_leds_v = _num_leds_v
        self.ntsc = _ntsc
        self.leds = np.zeros(
            (_num_leds_v, _num_leds_h, 3))  #should be not necessary
        self.b64dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
        if self.ntsc:
            self.mode = 'NTSC'
            self.fps = 30
            self.width = 720
            self.height = 480
        else:
            self.mode = 'PAL-B'
            self.fps = 25
            self.width = 720
            self.height = 576
        self.format = 'UYVY'
        self.b = 3  # 3 2
        #self.color = '' #''smpte170'
        if (feedback):
            fb = 'verbose'
        else:
            fb = 'silent'

        self.scale = 1.
        self.device = '/dev/video0'
        self.w = int(self.width // self.scale)
        self.h = int(self.height // self.scale)

        self.game = NesTetris(_num_leds_h=_num_leds_h, _num_leds_v=_num_leds_v)

        #-p 25
        os.system(
            'v4l2-ctl -d {device} -s {m} --set-fmt-video width={w},height={h},pixelformat={pf} --{fb}'
            .format(
                #'v4l2-ctl -d {device} -p {fps} -s {m} --set-fmt-video width={w},height={h},pixelformat={pf} --{fb}'.format(
                device=self.device,
                fps=self.fps,
                m=self.mode,
                w=self.w,
                h=self.h,
                pf=self.format,
                fb=fb))
        #self.frame = Frame(self.device)
        self.frame = Camera(self.device)

    def Frame_UYVY2YCbCr_PIL(self, w, h, frame_data):
        data = np.fromstring(frame_data, dtype='uint8')
        y = Image.frombytes('L', (w, h), data[1::2].copy())
        u = Image.frombytes('L', (w, h), data[0::4].copy().repeat(2, 0))
        v = Image.frombytes('L', (w, h), data[2::4].copy().repeat(2, 0))
        return Image.merge('YCbCr', (y, u, v))

    def read_frame_dec(self):
        self.leds = self.read_frame()
        #TODO convert to 64 color palette, thus the remainder does not work
        data_b64 = ''.join(self.b64dict[m] for n in self.leds for m in n)
        data_dec = base64.b64decode(data_b64)

        return data_dec

    def read_frame(self):

        #get a frame from the device
        #frame_data = self.frame.get_frame()
        while True:
            frame_data = self.frame.get_frame()
            if len(frame_data) == self.w * self.h * self.b:
                break

        #img = self.Frame_UYVY2YCbCr_PIL(self.w, self.h, frame_data)
        img = Image.frombytes('RGB', (self.w, self.h), frame_data, 'raw',
                              'RGB')

        #cut the frame to game size (depending on game) ane transform it for the leds
        #img_game = self.game.extract_game_area(img).filter(ImageFilter.SMOOTH).convert("HSV")
        img_game = self.game.extract_game_area(img, ntsc=self.ntsc)
        img_leds = self.game.transform_frame(img_game)
        #img to array conversion
        self.leds = np.array(img_leds)

        #debug:
        #self.leds = img_leds
        #img_game.convert("RGB").save("nes_cut.png", "PNG")
        #img_leds.convert("RGB").save("leds.png", "PNG")

        return self.leds

    # for debug:
    def read_frame0(self):
        frame_data = self.frame.get_frame()
        return frame_data

    def read_frame1(self):
        #frame_data = self.frame.get_frame()
        while True:
            frame_data = self.frame.get_frame()
            if len(frame_data) == self.w * self.h * self.b:
                break
        else:
            print("debug - ", "frame not correct", "frame_data_len:",
                  len(frame_data))
        return frame_data

    def read_frame2(self, frame_data):
        #img = self.Frame_UYVY2YCbCr_PIL(self.w, self.h, frame_data)
        img = Image.frombytes('RGB', (self.w, self.h), frame_data, 'raw',
                              'RGB')
        return img

    def read_frame3(self, img):
        #img_game = self.game.extract_game_area(img).filter(ImageFilter.SMOOTH).convert("HSV")
        img_game = self.game.extract_game_area(img, ntsc=self.ntsc)
        return img_game

    def read_frame4(self, img_game):
        img_leds = self.game.transform_frame(img_game)
        self.leds = img_leds
        return self.leds