class WebcamThread(Thread): def __init__(self, device, width, height, color='RGB'): '''Intialize device ''' self._cam = Camera(device, width, height) self.width, self.height = self._cam.width, self._cam.height self.running = True self.img = None self.t_wait = 1.0 / 60 # Webcam operates at 30FPS super().__init__() def run(self): '''Thread loop. Read continuously from cam buffer. ''' while self.running: self.img = self._cam.get_frame() sleep(self.t_wait) self._cam.close() def capture(self, path_file): '''Capture image into a file ''' image = self.get_img() image.save(path_file) def get_img(self): return Image.frombytes('RGB', (self.width, self.height), self.img, 'raw', 'RGB') def close(self): '''Stop webcam and thread ''' self.running = False
import numpy as np client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client_socket.connect(('192.168.0.100', 1080)) # connection = client_socket.makefile('wb') camera = Camera('/dev/video0', 320, 240) img_size = [240, 320, 3] img_counter = 0 encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90] while True: frame = camera.get_frame() # print(frame) # Decode the image # im = Image.frombytes('RGB', (camera.width, camera.height), frame, "raw", "BGR") # print(im) # Convert the image to a numpy array # cv_arr = np.asarray(im) # # resized = cv2.resize(cv_arr.copy(), (img_size[1], img_size[0])) # # result, frame = cv2.imencode('.jpg', frame, encode_param) # data = zlib.compress(pickle.dumps(frame, 0)) # data = pickle.dumps(frame, 0) data = pickle.dumps(frame, 0) size = len(data)
class StreamNES: # -s, --set - standard = < num > # pal or pal - X(X=B / G / H / N / Nc / I / D / K / M / 60)(V4L2_STD_PAL) # ntsc or ntsc - X(X=M / J / K)(V4L2_STD_NTSC) # secam or secam - X(X=B / G / H / D / K / L / Lc)(V4L2_STD_SECAM) def __init__(self, _num_leds_h=16, _num_leds_v=24, _ntsc=True, feedback=False): self.num_leds_h = _num_leds_h self.num_leds_v = _num_leds_v self.ntsc = _ntsc self.leds = np.zeros( (_num_leds_v, _num_leds_h, 3)) #should be not necessary self.b64dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' if self.ntsc: self.mode = 'NTSC' self.fps = 30 self.width = 720 self.height = 480 else: self.mode = 'PAL-B' self.fps = 25 self.width = 720 self.height = 576 self.format = 'UYVY' self.b = 3 # 3 2 #self.color = '' #''smpte170' if (feedback): fb = 'verbose' else: fb = 'silent' self.scale = 1. self.device = '/dev/video0' self.w = int(self.width // self.scale) self.h = int(self.height // self.scale) self.game = NesTetris(_num_leds_h=_num_leds_h, _num_leds_v=_num_leds_v) #-p 25 os.system( 'v4l2-ctl -d {device} -s {m} --set-fmt-video width={w},height={h},pixelformat={pf} --{fb}' .format( #'v4l2-ctl -d {device} -p {fps} -s {m} --set-fmt-video width={w},height={h},pixelformat={pf} --{fb}'.format( device=self.device, fps=self.fps, m=self.mode, w=self.w, h=self.h, pf=self.format, fb=fb)) #self.frame = Frame(self.device) self.frame = Camera(self.device) def Frame_UYVY2YCbCr_PIL(self, w, h, frame_data): data = np.fromstring(frame_data, dtype='uint8') y = Image.frombytes('L', (w, h), data[1::2].copy()) u = Image.frombytes('L', (w, h), data[0::4].copy().repeat(2, 0)) v = Image.frombytes('L', (w, h), data[2::4].copy().repeat(2, 0)) return Image.merge('YCbCr', (y, u, v)) def read_frame_dec(self): self.leds = self.read_frame() #TODO convert to 64 color palette, thus the remainder does not work data_b64 = ''.join(self.b64dict[m] for n in self.leds for m in n) data_dec = base64.b64decode(data_b64) return data_dec def read_frame(self): #get a frame from the device #frame_data = self.frame.get_frame() while True: frame_data = self.frame.get_frame() if len(frame_data) == self.w * self.h * self.b: break #img = self.Frame_UYVY2YCbCr_PIL(self.w, self.h, frame_data) img = Image.frombytes('RGB', (self.w, self.h), frame_data, 'raw', 'RGB') #cut the frame to game size (depending on game) ane transform it for the leds #img_game = self.game.extract_game_area(img).filter(ImageFilter.SMOOTH).convert("HSV") img_game = self.game.extract_game_area(img, ntsc=self.ntsc) img_leds = self.game.transform_frame(img_game) #img to array conversion self.leds = np.array(img_leds) #debug: #self.leds = img_leds #img_game.convert("RGB").save("nes_cut.png", "PNG") #img_leds.convert("RGB").save("leds.png", "PNG") return self.leds # for debug: def read_frame0(self): frame_data = self.frame.get_frame() return frame_data def read_frame1(self): #frame_data = self.frame.get_frame() while True: frame_data = self.frame.get_frame() if len(frame_data) == self.w * self.h * self.b: break else: print("debug - ", "frame not correct", "frame_data_len:", len(frame_data)) return frame_data def read_frame2(self, frame_data): #img = self.Frame_UYVY2YCbCr_PIL(self.w, self.h, frame_data) img = Image.frombytes('RGB', (self.w, self.h), frame_data, 'raw', 'RGB') return img def read_frame3(self, img): #img_game = self.game.extract_game_area(img).filter(ImageFilter.SMOOTH).convert("HSV") img_game = self.game.extract_game_area(img, ntsc=self.ntsc) return img_game def read_frame4(self, img_game): img_leds = self.game.transform_frame(img_game) self.leds = img_leds return self.leds