def test_screengear(monitor, options, colorspace): """ Tests ScreenGear's playback capabilities with custom defined dimensions -> passes if fails with ScreenShotError """ try: # define dimensions of screen w.r.t to given monitor to be captured # Open Live Screencast on current monitor stream = ScreenGear(monitor=monitor, logging=True, colorspace=colorspace, **options).start() # playback i = 0 while i < 20: frame = stream.read() if frame is None: break if i == 10: if colorspace == "COLOR_BGR2INVALID": # test wrong colorspace value stream.color_space = 1546755 else: # test invalid colorspace value stream.color_space = "red" i += 1 # clean resources stream.stop() except Exception as e: if (monitor in [-1, 3]): logger.exception(e) else: pytest.fail(str(e))
class VideoGet: """ Class that continuously gets frames from a VideoCapture object with a dedicated thread. """ def __init__(self): self.stopped = False self.stream = ScreenGear().start() self.frame_queue = Queue(maxsize=200) def grab_screen(self): st_time = time.time() frame = self.stream.read() return frame def start(self): Thread(target=self.get, args=(), daemon=True).start() return self def get(self): while not self.stopped: st_time = time.time() self.frame = self.grab_screen() if self.frame_queue.full(): self.frame_queue.get() self.frame_queue.put([time.time(), self.frame]) # print("grab time taken: ", time.time()-st_time) time.sleep(0.05) def stop(self): self.stream.stop() self.stopped = True
def test_screengear(): """ Tests ScreenGear's playback capabilities with custom defined dimensions -> passes if fails with ScreenShotError """ try: # define dimensions of screen w.r.t to given monitor to be captured options = {'top': 40, 'left': 0, 'width': 100, 'height': 100} #Open Live Screencast on current monitor stream = ScreenGear(monitor=1, logging=True, colorspace='COLOR_BGR2GRAY', **options).start() #playback i = 0 while (i > 10): frame = stream.read() if frame is None: break i += 1 #clean resources stream.stop() except Exception as e: if platform.system() == 'Linux' or platform.system() == 'Windows': logger.exception(e) else: pytest.fail(str(e))
def main(): # Stores if we are waiting for the holdoff timer to return global holdoff_good # Stores if ctrl + c has been pressed global running # define dimensions of screen w.r.t to given monitor to be captured options = { 'top': args.top, 'left': args.left, 'width': args.width, 'height': args.length } # open video stream with defined parameters stream = ScreenGear(monitor=args.monitor, logging=True, **options).start() # Setup SIGINT handler for ctrl + c in a slightly more elegant way than # try/except with KeyboardInterrupt signal.signal(signal.SIGINT, signal_handler) catch_times = [] previous_catch = time() # Loop until ctrl + c is pressed while running: # read frames from stream frame = stream.read() # check for frame if Nonetype if frame is None: print('Error grabing frame data! Exiting.') break # Knock out the color from the image to make thresholding easier frame[:, :, 0] = np.zeros([frame.shape[0], frame.shape[1]]) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Set all pixles dimmer than args.threshold to 0 _, frame = cv2.threshold(frame, args.threshold, 255, cv2.THRESH_BINARY) # If we are still inside the holdoff period we skip the frame summing # and checking if holdoff_good: # Sum up all pixles in the frame, when the bobber is underwater # the scene should be all 0s. frame_sum = frame.sum() if frame_sum == 0: Timer(args.delay, holdoff_good_callback).start() if not args.debug: Timer(args.recast, cast).start() holdoff_good = False if args.debug: print('Caught!') else: pyautogui.click(button='right') cur_catch = time() catch_times.append(cur_catch - previous_catch) previous_catch = cur_catch print( f'Total Catches {len(catch_times)}, Avg Catch Time: {round(average(catch_times), 5)}, STD: {round(std(catch_times), 5)}, Min: {round(min(catch_times), 5)}, Max: {round(max(catch_times), 5)}', end='\r') # Show output window if we are in debug mode if args.debug or args.show: cv2.imshow("Output Frame", frame) # check for 'q' key if pressed key = cv2.waitKey(1) & 0xFF if key == ord("q"): print('') break # close output window cv2.destroyAllWindows() # safely close video stream stream.stop()
#stream = VideoGear(source="resources/test.mp4").start() # define tweak flags options = { 'flag': 0, 'copy': False, 'track': False, 'compression_param': cv2.IMREAD_COLOR } # Definir Netgear server server = NetGear(address="192.168.XX.XX", port="5454", protocol='tcp', pattern=1, logging=True, **options) while True: try: frame = stream.read() if frame is None: break server.send(frame) except KeyboardInterrupt: break server.close()
class DTLApp(App): state = False interval = 1.0 stream = None writer = None timer = None def load_custom_config(self): path = get_path('config', parent=True) + '/cfg.json' try: config = json.load(open(path)) except: config = DEFAULT_CONFIG json.dump(config, open(path, 'w')) return config def build(self): self.custom_cfg = self.load_custom_config() self.interval = self.custom_cfg['interval'] self.title = 'Desktop Time Lapse' layout = BoxLayout(orientation='vertical') h_layout_1 = BoxLayout(orientation='horizontal') h_layout_1.add_widget(Label(text='Interval:', size_hint=(0.25, 1))) self.text_input = FloatInput(text=str(self.interval), size_hint=(0.25, 1), multiline=False) h_layout_1.add_widget(self.text_input) set_btn = Button(text='Set', size_hint=(0.5, 1)) set_btn.bind(on_press=self.set_btn_press) h_layout_1.add_widget(set_btn) layout.add_widget(h_layout_1) h_layout_2 = BoxLayout(orientation='horizontal') h_layout_2.add_widget(Label(text='State:')) self.state_label = Label(text='Idle') h_layout_2.add_widget(self.state_label) layout.add_widget(h_layout_2) h_layout_3 = BoxLayout(orientation='horizontal') h_layout_3.add_widget(Label(text='Duration:')) self.rt_label = Label(text='0') h_layout_3.add_widget(self.rt_label) h_layout_3.add_widget(Label(text='VTime:')) self.vt_label = Label(text='0') h_layout_3.add_widget(self.vt_label) layout.add_widget(h_layout_3) h_layout_4 = BoxLayout(orientation='horizontal') h_layout_4.add_widget(Label(text='Frames:', size_hint=(0.25, 1))) self.frame_label = Label(text='0', size_hint=(0.25, 1)) h_layout_4.add_widget(self.frame_label) self.record_btn = Button(text='Start Record', size_hint=(0.5, 1)) h_layout_4.add_widget(self.record_btn) layout.add_widget(h_layout_4) self.record_btn.bind(on_press=self.record_btn_press) self.event = Clock.schedule_interval(self.update_label, 0.1) return layout def set_btn_press(self, instance): try: interval = float(self.text_input.text) if int(interval / 0.01) > 0: self.interval = 0.01 * int(interval / 0.01) self.text_input.text = str(self.interval) except: pass def record_btn_press(self, instance): if not self.state: self.state_label.text = 'Record' self.record_btn.text = 'Stop Record' self.start_time = datetime.datetime.now() self.stream = ScreenGear(monitor=1, **{ 'THREADED_QUEUE_MODE': False }).start() output_params = { '-vcodec': 'libx264', '-crf': 0, '-preset': 'fast' } self.video_path = get_path('video', parent=True) + '/' + \ self.start_time.strftime("%Y-%m-%d %H_%M_%S.mp4") self.writer = WriteGear(output_filename=self.video_path, compression_mode=True, **output_params) self.count = 0 self.count_frames = 0 self.timer = TimerThread(self.interval, self.record_image) self.timer.start() else: self.timer.stopped.set() self.state_label.text = 'Idle' self.record_btn.text = 'Start Record' self.stream.stop() self.stream = None self.writer.close() self.writer = None self.start_time = None self.state = not self.state def on_stop(self): if self.writer: self.writer.close() if self.stream: self.stream.stop() def record_image(self): if not self.state: return frame = self.stream.read() self.writer.write(frame) self.count_frames += 1 def update_label(self, dt): if not self.state: return self.frame_label.text = str(self.count_frames) d_time = str(datetime.datetime.now() - self.start_time) self.rt_label.text = d_time[:d_time.find('.')] v_time = str(datetime.timedelta(seconds=int(self.count_frames // 25))) self.vt_label.text = v_time