def __init__(self, **kwargs): """Read arguments (and change settings) and initialize modules.""" # Default Data Inputs self.image = None self.plant_db = DB() # Default Parameter Inputs self.params = Parameters() # Load keyword argument inputs self._data_inputs(kwargs) self._parameter_inputs(kwargs) self.args = kwargs # Set remaining arguments to defaults self._set_defaults() # Changes based on inputs if self.args['calibration_img'] is not None: # self.coordinates = True self.args['coordinates'] = True if self.args['GUI']: self.args['save'] = False self.args['text_output'] = False if self.args['app']: self.args['verbose'] = False self.args['from_env_var'] = True self.plant_db.app = True # Remaining initialization self.p2c = None self.capture = Capture().capture self.final_marked_image = None self.plant_db.tmp_dir = None
def main(): # Instantiate Classes detector = FaceDetector(FACE_CLASSIFIER_PATH, EYE_CLASSIFIER_PATH) model = FaceModel() display = Display() capture = Capture() oldTime = time.time() i = 0 subprocess.call(['speech-dispatcher']) while True: # Calculate time difference (dt), update oldTime variable newTime = time.time() dt = newTime - oldTime oldTime = newTime # Grab Frames frames = capture.read() # Detect face 20% of the time, eyes 100% of the time if i % 10 is 0: rects = detector.detect(frames) else: rects = detector.detect(frames, model.getPreviousFaceRects()) i += 1 # Add detected rectangles to model model.add(rects) # Render display.renderScene(frames['display'], model, rects) display.renderEyes(frames['color'], model)
def main(): current_time = datetime.now() current_month_and_yr = datetime.now().strftime("%B %Y") new_dir = make_directory(DESKTOP_PATH, current_month_and_yr) #capture = cv2.VideoCapture(-1) # needed dimensions scr_w, scr_h = get_monitor_res() # codec #fourcc = cv2.VideoWriter_fourcc(*"XVID") # path combined_save_path = new_dir + "\\combined_" + current_time.strftime( DATE_FILE_FORMAT) + FILE_EXT cap_class = Capture(-1, combined_save_path, [scr_w, scr_h]) #writer capture = cap_class.capture writer = cap_class.get_writer() while True: img = ImageGrab.grab() img_np = np.array(img) screen_frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB) # Capture frame-by-frame ret, cam_frame = capture.read() if not ret: break cv2.imshow("Frame", cam_frame) cam_frame_resized = cap_class.resize(cam_frame) cam_frame_merged = cap_class.merge_frame(cam_frame_resized) screen_frame_merged = cap_class.merge_frame(screen_frame) output = cap_class.get_output(cam_frame_merged, screen_frame_merged) writer.write(output) key = cv2.waitKey(1) & 0xFF if key == ord("q"): break # When everything done, release the capture capture.release() writer.release() cv2.destroyAllWindows() # send email on certain date if datetime.now().day == SET_DAY: host, port, address, password = get_settings(SETTINGS_FILE) sender = EmailSender(host, port, address, password) message = "Monthly Email of Video Capture" sender.send_message(message=message, dt_file_path=new_dir, attach=True)
def _get(self, url, sub_dir): capture = Capture(sub_dir, self.conf) browser = Browser(url, sub_dir, self.conf) links = [] try: capture.run() sleep(3) html = browser.get() links = self._get_links(html) sleep(30) except WebDriverException: self._create_exception_file(traceback.format_exc(), sub_dir) except KeyboardInterrupt: self._create_exception_file(traceback.format_exc(), sub_dir) finally: capture.kill() browser.close() return links
def windowDidLoad(self): NSWindowController.windowDidLoad(self) device = [ device for device in frida.get_device_manager().enumerate_devices() if device.type == 'local' ][0] self.processList = ProcessList(device) self.capture = Capture(device) self.processCombo.setUsesDataSource_(True) self.processCombo.setDataSource_(self.processList) self.capture.setDelegate_(self) self.callTableView.setDataSource_(self.capture.calls) self.capture.calls.setDelegate_(self) self.loadDefaults() self.updateAttachForm_(self)
def post_display(): global frame_number # Write frames if necessary if dump_frames: frame_interval = 4 if (frame_number % frame_interval) == 0: w = glutGet(GLUT_WINDOW_WIDTH) h = glutGet(GLUT_WINDOW_HEIGHT) global capture if capture is None: capture = Capture(w, h) glPixelStorei(GL_PACK_ALIGNMENT, 1) capture.record(glReadPixels(0, 0, w, h, GL_RGBA, GL_BYTE)) else: if capture is not None: capture.create_movie() capture = None glutSwapBuffers()
def __start_capture(self): print("Start capture") self.capture = Capture() self.capture.startCapture()
def __init__(self, splitter, parent=None): super(SideMenu, self).__init__(parent) self.capture = Capture() self._constants = Constants() self._splitter = splitter self._init_menu_buttons()
def capture(self): """Capture image from camera.""" image_filename = Capture().capture() self.plant_db.getcoordinates() self.images['original'] = self.load(image_filename)
import requests import cv2 from Capture import Capture import numpy as np from PlantDetection import PlantDetection from farmware_tools import device import CeleryPy import time #x=DB() #y=x.get_image(95) device.set_pin_io_mode(1, 4) weeder = (20, 553, -402) CeleryPy.move_absolute((500, 440, 0), (0, 0, 0), 150) #send_message(message=str(os.environ), message_type='success', channel='toast') file = Capture().capture() img2 = cv2.imread(file, 1) def create_mask( image, lowergreen, uppergreen ): ##función para crear máscara a partir de valores máximos y minimos de HSV imghsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV_FULL) mask = cv2.inRange(imghsv, lowergreen, uppergreen) return mask def normalize(array): b, g, r = cv2.split(array) minimum = np.min(b) maximum = np.max(b)
import time import sys import cv2 from multiprocessing import Process from Capture import Capture from Servo import Servo x_angle = 90 y_angle = 90 x_servo = Servo(23) y_servo = Servo(24) cam = Capture() def img(): while True: cam.get_img() cv2.imshow('test', cam.img) cv2.waitKey(50) img_process = Process(target=img) img_process.start() while True:
import queue import threading import cv2 from Capture import Capture from Client import Client from Gesture import Gesture que = queue.Queue() gesture_rec = Gesture("modelv13.h5") camera = Capture() client = Client('127.0.0.1', 12345) data = {'status':''} client.get_data(que) frame_count = 0 display_msg = False display_time = False display_info = False time = 0 bullets = 0 rounds = 0 player_move = '' opp_move = '' msg = "" stat_data = dict() if not que.empty(): data = que.get() while True: try:
from Capture import Capture from Edge import Edge from Transform import Transform from Threshold import Threshold from PDF import pdf from pis import four_point_transform import numpy as np import rect import cv2 import imutils list = Capture() print list Edge(list) Transform(list) Threshold(list) pdf(list)
def takePicture(self, userName): capture = Capture() capture.main(userName)