def set_cam(): graph = FilterGraph() devices = graph.get_input_devices() device_ids = [x[1] if x[1] is not None else x[0] for x in devices] filepath = "setting.txt" if os.path.isfile(filepath): print("setting.txt存在。") f = open('setting.txt', 'r') device_id = f.read() return device_ids.index(device_id) else: print("setting.txt不存在。") for i in range(len(devices)): main_camera = camera.Camera(i) frame = main_camera.read_bgr() cv2.imshow('frame', frame) value = easygui.ynbox("Is this the video camera?") if value is True: fp = open("setting.txt", "a") fp.write(str(devices[i][1])) fp.close() main_camera.stop() cv2.destroyAllWindows() return i else: continue return None
def VideoCaptureDeviceToUse(self, value): graph = FilterGraph() self.__VideoCaptureDeviceToUse = value try: device =graph.get_input_devices().index(value) except ValueError as e: device = graph.get_input_devices().index("Integrated Webcam")#use default camera if the name of the camera that I want to use is not in my list return device
def __init__(self, device_id): self.graph = FilterGraph() self.graph.add_video_input_device(device_id) self.graph.add_sample_grabber(self.img_cb) self.graph.add_null_render() self.graph.prepare_preview_graph() self.graph.run() self.image_grabbed = None self.image_done = threading.Event()
def refresh_camera_list(self): from pygrabber.dshow_graph import FilterGraph graph = FilterGraph() cams = graph.get_input_devices() camera_choices = [("{}: {}".format(cam_id, cam_name), cam_id) for cam_id, cam_name in enumerate(cams)] self.settings.cam_id.change_choice_list(camera_choices) del graph
def set_camera(self, *args): from utils.selected_instrument import camera_indx try: from pygrabber.dshow_graph import FilterGraph self.ids.dropdown_item.text = FilterGraph().get_input_devices( )[camera_indx] except: pass
def connect(self): S =self.settings from pygrabber.dshow_graph import FilterGraph self.graph = FilterGraph() self.graph.add_input_device(S['cam_id']) S['cam_name'] = self.graph.get_input_devices()[S['cam_id']] formats = [("{}: {} {}x{} {}bit".format(*x), x[0]) for x in self.graph.get_formats()] S.format.change_choice_list(formats) self.graph.set_format(S['format']) self.img_buffer = [] self.graph.add_sample_grabber(self.image_callback) self.graph.add_null_render() self.graph.prepare() self.graph.run()
def open_camera_list(self, inst): menu = DropDown(auto_width=False, width=200) try: from pygrabber.dshow_graph import FilterGraph device_list = FilterGraph().get_input_devices() except: device_list = [] for i, each in enumerate(device_list): btn = DropItem(text=each, icon="webcam", font_size=15) btn.camera_indx = i btn.bind(on_release=lambda btn: self.set_item(menu, btn)) menu.add_widget(btn) menu.spacing = 0 menu.add_widget(MDSeparator()) menu.open(inst)
def add_camera(self, *args): try: import utils.selected_instrument as ut from pygrabber.dshow_graph import FilterGraph device = FilterGraph().get_input_devices()[ut.camera_indx] except: ut.camera_indx = 0 st = "selected_instr = '{}'\ncamera_indx = {}\nmanual_instr = {}".format( ut.selected_instr, 0, ut.manual_instr) with open('utils/selected_instrument.py', 'w') as file: file.write(st) source = ut.camera_indx # "https://192.168.43.1:8080/video" self.cap = cv2.VideoCapture(source) cam = Camera(self.cap) self.ids.feed.add_widget(cam)
# The following code connect to the first capture device, list all the available formats, then allows you to pick one. # After selecting a format the live video from the camera is shown. from pygrabber.dshow_graph import FilterGraph from tkinter import Tk if __name__ == "__main__": graph = FilterGraph() graph.add_video_input_device(0) formats = graph.get_input_device().get_formats() print(f"Available formats for {graph.get_input_device().Name}") for f in formats: print(f) format_id = input("Enter a format id: ") graph.get_input_device().set_format(int(format_id)) graph.add_default_render() graph.prepare_preview_graph() graph.run() root = Tk() root.withdraw() # hide Tkinter main window root.mainloop()
# The following code uses the sample grabber filter to capture single images from the camera. # To capture an image, the method grab_frame is called. The image will be retrieved from the callback function passed # as parameter to the add_sample_grabber method. In this case, the image captured is shown using the function # imshow of opencv. from pygrabber.dshow_graph import FilterGraph import cv2 if __name__ == "__main__": graph = FilterGraph() cv2.namedWindow('Image', cv2.WINDOW_NORMAL) graph.add_video_input_device(0) graph.add_sample_grabber(lambda image: cv2.imshow("Image", image)) graph.add_null_render() graph.prepare_preview_graph() graph.run() print("Press 'C' or 'c' to grab photo, another key to exit") while cv2.waitKey(0) in [ord('c'), ord('C')]: graph.grab_frame() graph.stop() cv2.destroyAllWindows() print("Done")
def show_image(image): global start_time capture_time = datetime.now() - start_time image_file_name = path.join( OUTPUT_FOLDER, str(capture_time.seconds * 1000 + int(capture_time.microseconds / 1000)) + ".jpg") cv2.imwrite(image_file_name, image) print(f"Image {image_file_name} written") if __name__ == "__main__": event = Event() capture_thread = Thread(target=capture_photos_loop, args=(event, )) graph = FilterGraph() devices = graph.get_input_devices() print(f"Connecting to device {devices[CAMERA_INDEX]}") graph.add_video_input_device(CAMERA_INDEX) graph.add_sample_grabber(lambda image: show_image(image)) graph.add_null_render() graph.prepare_preview_graph() graph.run() capture_thread.start() input( f"Capturing images every {PAUSE_BETWEEN_CAPTURE}s, press ENTER to terminate." ) event.set() capture_thread.join()
class Camera: def __init__(self, device_id): self.graph = FilterGraph() self.graph.add_video_input_device(device_id) self.graph.add_sample_grabber(self.img_cb) self.graph.add_null_render() self.graph.get_input_device().set_format() self.graph.prepare_preview_graph() self.graph.run() self.image_grabbed = None self.image_done = threading.Event() def img_cb(self, image): self.image_grabbed = np.flip(image, 2) self.image_done.set() def capture(self): self.graph.grab_frame() def wait_image(self): self.image_done.wait(1000) return self.image_grabbed
# This code lists the cameras connected to your PC: from pygrabber.dshow_graph import FilterGraph graph = FilterGraph() print(graph.get_input_devices())
from __future__ import print_function from pygrabber.dshow_graph import FilterGraph from tkinter import Tk, Message # tested under Python 2 and 3 def on_closing(event=0): graph.stop() try: root.destroy() except: pass graph = FilterGraph() graph.add_input_device(0) graph.add_default_render() graph.prepare() graph.run() root = Tk() w = Message(root, text="Close this window for stopping the program") w.pack() root.protocol("WM_DELETE_WINDOW", on_closing) # root.withdraw() # hide Tkinter main window root.mainloop()
# This code shows a screen with the live image from the first camera in your PC. # We add to the graph two filters: one is a source filter corresponding to the first camera connected to your PC, # the second is the default render, that shows the images from the camera in a window on the screen. # Then we call prepare, that connects the two filters together, and run, to execute the graph. # Finally, we need a method to pause the program while watching the camera video. # I use the Tkinter mainloop function which fetches and handles Windows events, so the application does't seem frozen. from pygrabber.dshow_graph import FilterGraph from tkinter import Tk graph = FilterGraph() graph.add_video_input_device(0) graph.add_default_render() graph.prepare_preview_graph() graph.run() root = Tk() root.withdraw() # hide Tkinter main window root.mainloop()
class PyGrabberCameraHW(HardwareComponent): name = 'pygrabber_camera' def setup(self): self.settings.New('cam_id', dtype=int, initial=0, choices=(0,)) self.settings.New('cam_name', dtype=str, ro=True) self.settings.New('format', dtype=int, choices=(0,)) self.add_operation('Refresh_Cameras', self.refresh_camera_list) def connect(self): S =self.settings from pygrabber.dshow_graph import FilterGraph self.graph = FilterGraph() self.graph.add_input_device(S['cam_id']) S['cam_name'] = self.graph.get_input_devices()[S['cam_id']] formats = [("{}: {} {}x{} {}bit".format(*x), x[0]) for x in self.graph.get_formats()] S.format.change_choice_list(formats) self.graph.set_format(S['format']) self.img_buffer = [] self.graph.add_sample_grabber(self.image_callback) self.graph.add_null_render() self.graph.prepare() self.graph.run() def threaded_update(self): self.graph.grab_frame() time.sleep(0.1) def disconnect(self): self.settings.disconnect_all_from_hardware() if hasattr(self, 'graph'): del self.graph def refresh_camera_list(self): from pygrabber.dshow_graph import FilterGraph graph = FilterGraph() cams = graph.get_input_devices() camera_choices = [("{}: {}".format(cam_id, cam_name), cam_id) for cam_id, cam_name in enumerate(cams)] self.settings.cam_id.change_choice_list(camera_choices) del graph def image_callback(self, img): self.img_buffer.append(img) if len(self.img_buffer) > 20: self.img_buffer = self.img_buffer[-20:]
import matplotlib.pyplot as plt import numpy as np from pygrabber.dshow_graph import FilterGraph image_done = threading.Event() image_grabbed = None def img_cb(image): global image_done global image_grabbed image_grabbed = np.flip(image, 2) image_done.set() graph = FilterGraph() screen_recorder_id = next(device[0] for device in enumerate(graph.get_input_devices()) if device[1] == "screen-capture-recorder") graph.add_input_device(screen_recorder_id) graph.add_sample_grabber(img_cb) graph.add_null_render() graph.prepare() graph.run() input("Press ENTER to capture a screenshot") graph.grab_frame() image_done.wait(1000) graph.stop() plt.imshow(image_grabbed) plt.show()
class Camera: def __init__(self, device_id): self.graph = FilterGraph() self.graph.add_video_input_device(device_id) self.graph.add_sample_grabber(self.img_cb) self.graph.add_null_render() self.graph.prepare_preview_graph() self.graph.run() self.image_grabbed = None self.image_done = threading.Event() def img_cb(self, image): self.image_grabbed = np.flip(image, 2) self.image_done.set() def read(self): self.graph.grab_frame() self.image_done.wait(1000) return self.image_grabbed def read_bgr(self): self.read() return np.flip(self.read(), 2) def stop(self): self.graph.stop() self.image_done.clear()
def videoCaptureDevices(self): '''Returns a list of video capture devices''' graph = FilterGraph() vidDevices = graph.get_input_devices() return vidDevices
# since the image returned by the sample grabber filter has the BGR format, but mathplotlib requires the RGB fromat. import threading import matplotlib.pyplot as plt import numpy as np from pygrabber.dshow_graph import FilterGraph image_done = threading.Event() image_grabbed = None def img_cb(image): global image_done global image_grabbed image_grabbed = np.flip(image, 2) image_done.set() graph = FilterGraph() graph.add_video_input_device(0) graph.add_sample_grabber(img_cb) graph.add_null_render() graph.prepare_preview_graph() graph.run() input("Press ENTER to grab photo") graph.grab_frame() image_done.wait(1000) graph.stop() plt.imshow(image_grabbed) plt.show()
video_changed_event = threading.Event() exit_event = threading.Event() OBS_VIRTUAL_CAM_DLL_PATH = os.path.join( os.getcwd(), 'OBS-VirtualCam\\bin\\32bit\\obs-virtualsource.dll') created_virtual_cam_flag = threading.Event() logging.basicConfig( filename='app.log', filemode='w', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') # Load camera devices cam_to_id = {} try: graph = FilterGraph() cam_to_id = {name: id for id, name in enumerate(graph.get_input_devices())} except Exception as e: logging.error(e) def is_admin(): try: return ctypes.windll.shell32.IsUserAnAdmin() except: return False def run_as(command): if is_admin(): try:
from __future__ import print_function from pygrabber.dshow_graph import FilterGraph import cv2 # tested and works with Python 3, works not in Python 2 def show_image(image): cv2.imshow("Image", image) graph = FilterGraph() cv2.namedWindow('Image', cv2.WINDOW_NORMAL) graph.add_input_device(0) graph.add_sample_grabber(show_image) graph.add_null_render() graph.prepare() graph.run() print("Press 'q' or 'ESC' to exit") k = cv2.waitKey(1) while k not in [27, ord('q')]: graph.grab_frame() k = cv2.waitKey(1) graph.stop() cv2.destroyAllWindows() print("Done")