class __Game: def __init__(self, width, height): self.window = Window('NeurAlbertaTech', width, height) self.window_manager = WindowManager(self.window) # Set the window icon window_icon = pygame.image.load("neuralbertatech_logo.png") pygame.display.set_icon(window_icon) def run(self): clock = pygame.time.Clock() pygame.display.set_mode((0, 0)) #, pygame.FULLSCREEN while (1): events = pygame.event.get() for event in events: if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE: self.window.close() return if event.type == pygame.QUIT: self.window.close() return self.window_manager.run(events) clock.tick(50)
def __init__(self, width, height): self.window = Window('NeurAlbertaTech', width, height) self.window_manager = WindowManager(self.window) # Set the window icon window_icon = pygame.image.load("neuralbertatech_logo.png") pygame.display.set_icon(window_icon)
def __init__(self): builder = Gtk.Builder() builder.add_objects_from_file('app.glade', ('winConnections', )) self.window = builder.get_object('winConnections') self.connection_list = builder.get_object('listConnections') self.ent_host = builder.get_object('entHost') self.ent_port = builder.get_object('entPort') self.ent_user = builder.get_object('entUser') self.ent_password = builder.get_object('entPassword') self.ent_database = builder.get_object('entDatabase') self.ent_remote_host = builder.get_object('entRemoteHost') self.ent_remote_user = builder.get_object('entRemoteUser') self.file_remote_key = builder.get_object('fileRemoteKey') self.connections = Connections() for connection in self.connections.get_connections(): self.connection_list.add(ListBoxRowWithData(connection)) if self.connections.count() == 0: self.on_add_connection(None) builder.connect_signals({ 'onAddConnection': self.on_add_connection, 'onListSelected': self.on_list_selected, 'onRemoveConnection': self.on_remove_connection, 'onSaveConnection': self.on_save_connection, 'onConnect': self.on_connect }) WindowManager.add_window(self.window) self.window.show_all()
def on_connect(self, obj): try: local_port = self.ent_port.get_text() tunnel = None if self.ent_remote_host.get_text(): tunnel = SSHTunnelForwarder( self.ent_remote_host.get_text(), ssh_username=self.ent_remote_user.get_text(), ssh_pkey=self.file_remote_key.get_filename(), remote_bind_address=('127.0.0.1', int(self.ent_port.get_text()))) tunnel.start() local_port = tunnel.local_bind_port conn = psycopg2.connect(database=self.ent_database.get_text(), user=self.ent_user.get_text(), host=self.ent_host.get_text(), port=local_port, password=self.ent_password.get_text()) QueryWindow(conn, tunnel) WindowManager.remove(self.window) self.window.destroy() except SSHException as e: print e Alert('Unable to create SSH tunnel', self.window) except Exception as e: print e Alert('Unable to connect to server.', self.window)
def __init__(self): GameState( ) # initialize the singleton before threading to avoid race conditions self.root = tk.Tk() self.windowManager = WindowManager() self.display = Display(self.root, self.windowManager) self.inputHandler = InputHandler(self.display.widget)
def __init__(self, capture=None, face_img_path=None, should_mirror=False): self._window_manager = WindowManager(self.on_keypress) self._capture_manager = CaptureManager(cv2.VideoCapture(0)) \ if capture is None else CaptureManager(capture) self._window_name = 'FaceOff' self._should_mirror = should_mirror self._face_tracker = FaceTracker() self._show_face_rect = False self._swap_face = True self._template_face = None if face_img_path is not None: self._template_face = cv2.imread(face_img_path)
class GameDriver: def __init__(self): GameState( ) # initialize singletons before threading to avoid race conditions AssetLoader() MusicPlayer() self.root = tk.Tk() self.windowManager = WindowManager() self.display = Display(self.root, self.windowManager) self.inputHandler = InputHandler(self.display.widget) def initAssets(self): AssetLoader().loadAssets() AssetLoader().loadSaves() AssetLoader().loadSettings() # send events for loaded settings if AssetLoader().getSettings() is not None: for setting in AssetLoader().getSettings(): GameState().onSettingChange(setting[0], setting[1]) else: MusicPlayer().playNext(AssetLoader().getMusicPath('title')) self.windowManager.load() GameState().unlockGameMode() def mainloop(self): # start off separate thread to load assets GameState().lockGameMode(GameMode.isLoading) t = threading.Thread(target=self.initAssets) t.daemon = True t.start() # run update-draw loop forever dt = 0.0 while True: try: time.sleep(max(Globals.Timestep - dt, 0.0)) timeElapsed = max(Globals.Timestep, dt) time1 = time.time() self.display.draw() keypresses = self.inputHandler.getKeyPresses() self.windowManager.update(timeElapsed, keypresses) self.root.update() time2 = time.time() dt = time2 - time1 except tk.TclError: # window was closed sys.exit() except SystemExit: break # thrown on main menu exit except: # some other exception occurred if Globals.IsDev: traceback.print_exc() sys.exit()
def __init__(self): self._thread_mode = True self._windowManager = WindowManager('Minimum Viable Product', self.on_keypress) self._amount_frames = 0 self._success_finding_contours = 0 # DroidCam URL # url = 'http://192.168.55.78:4747/video' url = 0 self._captureManager = CaptureManager( cv2.VideoCapture(url), self._windowManager, False)
def __init__(self): self._screen = Wnck.Screen.get_default() self._screen.force_update() self._window_manager: WindowManager = WindowManager() self._window_manager.start() self._windows_switcher: Optional[WindowsSwitcher] = None
class GameDriver: def __init__(self): self.root = tk.Tk() self.windowManager = WindowManager(Globals.NumCols, Globals.NumRows) self.display = Display(self.root, self.windowManager) self.inputHandler = InputHandler(self.display.getWidget()) def mainloop(self): while True: try: time.sleep(Globals.Timestep) # TODO only sleep Timestep - computation time keypresses = self.inputHandler.getKeyPresses() self.windowManager.update(Globals.Timestep, keypresses) self.display.draw() self.root.update() except tk.TclError: # window was closed sys.exit() except: # some other exception occurred if Globals.IsDev: traceback.print_exc() sys.exit()
def __init__(self): super().__init__() self.setupUi(self) self.trayIcon = QSystemTrayIcon(self) self.trayIcon.setIcon(QIcon('ui/icon.png')) self.trayIcon.activated.connect(self.restore_window) self.WM = WindowManager() self.pre_window = self.WM.get_fore_window() self.rnn = RNN() self.runState = False self.startButton.clicked.connect(self.btn_clk_start) self.startState = True self.trainButton.clicked.connect(self.btn_clk_train) self.helpButton.clicked.connect(self.btn_clk_help) self.helpState = True self.timer = QTimer(self) self.timer.start(200) self.timer.timeout.connect(self.run)
def __init__(self, conn, tunnel): self.cursor = conn.cursor() self.last_detail_table = '' self.last_query_table = '' self.current_tab = 0 self.current_page = 0 self.current_table = '' self.store = None self.tunnel = tunnel builder = Gtk.Builder() builder.add_objects_from_file('app.glade', ('winQuery', )) builder.connect_signals({ 'on_table_row_selected': self.on_table_row_selected, 'on_tab_selected': self.on_tab_selected, 'on_previous_page': self.on_previous_page, 'on_next_page': self.on_next_page, 'on_filter_activate': self.on_filter_activate, 'on_run_query': self.on_run_query, 'on_data_key_press': self.on_data_key_press }) self.query_tabs = builder.get_object('queryTabs') self.ent_data_filter = builder.get_object('entDataFilter') self.txt_query = builder.get_object('txtQuery') self.list_tables = builder.get_object('listTables') self.fetch_tables() self.data_view = DataView(builder.get_object('dataTree'), conn) self.query_view = QueryView(builder.get_object('queryTree'), builder.get_object('lblQueryInfo'), conn) self.window = builder.get_object('winQuery') WindowManager.add_window(self.window) self.window.show_all()
def __init__(self, version, parent=None): QtGui.QMainWindow.__init__(self, parent) self.version = version self.modules = [] self.viewers = {} self.menus = MenuManager(self) self.actions = ActionManager(self) self.createUI() self.createActions() self.createMenus() self.themes = ThemeManager(self) self.plugins = {} self.setContextMenuPolicy(QtCore.Qt.NoContextMenu) self.dockWidgets = [] self.windowManager = WindowManager(self) self.statusBar().showMessage( self.tr("Gorgon: Protein Visualization Suite")) self.setWindowTitle(self.tr("Gorgon - v" + self.version)) pathname = os.path.abspath(os.path.dirname(sys.argv[0])) self.setWindowIcon(QtGui.QIcon(pathname + '/gorgon.ico'))
class MVP(object): def __init__(self): self._thread_mode = True self._windowManager = WindowManager('Minimum Viable Product', self.on_keypress) self._amount_frames = 0 self._success_finding_contours = 0 # DroidCam URL # url = 'http://192.168.55.78:4747/video' url = 0 self._captureManager = CaptureManager( cv2.VideoCapture(url), self._windowManager, False) # self._curveFilter = filters.BGRPortraCurveFilter() # self._convolutionFilter = filters.FindEdgesFilter() # self._imageProcessor = image_processor.SimpleImageProcessor() # self._objectDetector = object_detector.SimpleObjectDetector() def run(self): """Run the main loop.""" threadn = cv2.getNumberOfCPUs() pool = ThreadPool(processes=threadn) pending = deque() # latency = StatValue() # frame_interval = StatValue() # last_frame_time = clock() # TODO: Camera Calibration, Video Stabilization self._windowManager.create_window() while self._windowManager.is_window_created: self._captureManager.enter_frame() original = self._captureManager.original self._captureManager.frame = original # if original is not None: # output = self.process_and_detect(original) # self._captureManager.frame = output§ while len(pending) > 0 and pending[0].ready(): output = pending.popleft().get() # latency.update(clock() - t0) cv2.putText(output, "threaded : " + str(self._thread_mode), (15, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2) # draw_str(res, (20, 40), "latency : %.1f ms" % (latency.value * 1000)) # draw_str(res, (20, 60), "frame interval : %.1f ms" % (frame_interval.value * 1000)) self._captureManager.frame = output self._captureManager.exit_frame() if len(pending) < threadn: # ret, frame = cap.read() # t = clock() # frame_interval.update(t - last_frame_time) # last_frame_time = t if self._thread_mode: task = pool.apply_async(self.process_and_detect, (original.copy(),)) else: task = DummyTask(self.process_and_detect(original)) pending.append(task) self._captureManager.exit_frame() self._windowManager.process_events() def process_and_detect(self, src): self._amount_frames += 1 # filters.strokeEdges(src, src) # self._curveFilter.apply(src, src) # self._convolutionFilter.apply(src, src) # self._imageProcessor.process(src, src) # self._objectDetector.detect(src) # TODO: Image Preprocessor: removing shadows, small blobs, noise, enhancing, etc # TODO: Image Processor processing = self.image_processing_template_one(src) # gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) # filtering = cv2.bilateralFilter(gray, 1, 10, 120) # TODO: Object Detector output = cv2.cvtColor(processing, cv2.COLOR_GRAY2BGR) success_detect = self.try_detect(input=processing, output=output, post_detect_fn=self.post_detect_draw) if not success_detect: # TODO: image_processing_template_two pass # TODO: Get 4-contours square counts If zero # TODO: [For 3D] Wrapping & Transformations # TODO: to be continued return output def image_processing_template_one(self, src): # TODO: Color space: GRAYSCALE, HSV, ... gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) # TODO: Convolution, Blurring, ... # filtering = cv2.bilateralFilter(gray, 1, 10, 120) filtering = self.image_filtering(gray) # TODO: Edge detection # edges = cv2.Canny(gray, 10, 250) edges = self.edge_detection(filtering) # TODO: Morphological operations # kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7)) # closed = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel) closed = self.morphological_transformations(edges) return closed def image_filtering(self, src): diameter = self._windowManager.get_trackbar_value('diameter(for bilateralFilter)') sigma_color = self._windowManager.get_trackbar_value('sigmaColor(for bilateralFilter)') sigma_space = self._windowManager.get_trackbar_value('sigmaSpace(for bilateralFilter)') filtering = cv2.bilateralFilter(src, diameter, sigma_color, sigma_space) return filtering def edge_detection(self, src): threshold_min = self._windowManager.get_trackbar_value('threshold min(for Canny edge detection)') threshold_max = self._windowManager.get_trackbar_value('threshold max(for Canny edge detection)') edges = cv2.Canny(src, threshold_min, threshold_max) return edges def morphological_transformations(self, edges): kernel_size = self._windowManager.get_trackbar_value('kernel size(morphological structuring element)') kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size, kernel_size)) closed = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel) return closed def try_detect(self, input, output, post_detect_fn): success_standard = self.detect_standard_rects(input, output, post_detect_fn) if not success_standard: # TODO: detect_rects_by_lines pass return success_standard def detect_standard_rects(self, input, output, post_detect_fn): contour_area_points = self._windowManager.get_trackbar_value('Contour area min amount points (*100)') approx_edges_amount = self._windowManager.get_trackbar_value('Approx edges amount') _, contours, h = cv2.findContours(input, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) rects_count = 0 for cont in contours: if cv2.contourArea(cont) > contour_area_points * 100: arc_len = cv2.arcLength(cont, True) approx = cv2.approxPolyDP(cont, 0.1 * arc_len, True) if len(approx) == approx_edges_amount: post_detect_fn(output, approx) rects_count += 1 if rects_count > 0: self._success_finding_contours += 1 color_yellow = (0, 255, 255) percent_success_finding = round((self._success_finding_contours / self._amount_frames) * 100, 2) cv2.putText(output, str(percent_success_finding) + "%", (15, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, color_yellow, 2) return rects_count > 0 @staticmethod def post_detect_draw(output, approx): cv2.drawContours(output, [approx], -1, (255, 0, 0), 2) def on_keypress(self, keycode): """Handle a keypress. space -> Take a screenshot. tab -> Start/stop recording a screencast. escape -> Quit. """ if keycode == 32: # space # self._captureManager.write_image('screenshot.png') self._thread_mode = not self._thread_mode elif keycode == 9: # tab if not self._captureManager.is_writing_video: self._captureManager.start_writing_video( 'screencast.avi') else: self._captureManager.stop_writing_video() elif keycode == 27: # escape self._windowManager.destroy_window()
class Main(QMainWindow, form_class): def __init__(self): super().__init__() self.setupUi(self) self.trayIcon = QSystemTrayIcon(self) self.trayIcon.setIcon(QIcon('ui/icon.png')) self.trayIcon.activated.connect(self.restore_window) self.WM = WindowManager() self.pre_window = self.WM.get_fore_window() self.rnn = RNN() self.runState = False self.startButton.clicked.connect(self.btn_clk_start) self.startState = True self.trainButton.clicked.connect(self.btn_clk_train) self.helpButton.clicked.connect(self.btn_clk_help) self.helpState = True self.timer = QTimer(self) self.timer.start(200) self.timer.timeout.connect(self.run) def restore_window(self, reason): if reason == QSystemTrayIcon.DoubleClick: self.trayIcon.hide() self.showNormal() def btn_clk_start(self): if self.startState: self.startState = False self.startButton.setText('Stop') self.runState = True self.back = Background() self.back.show() self.trayIcon.setVisible(True) self.hide() else: self.startState = True self.startButton.setText('Start') self.runState = False self.back.close() self.timer.stop() def btn_clk_train(self): if os.path.isfile('params.pkl'): os.remove('params.pkl') time.sleep(2) self.rnn = RNN() self.rnn.train() QMessageBox.information(self, "RNN", "train finished") def btn_clk_help(self): if self.helpState: self.helpState = False self.helpButton.setText('go to tray icon') QMessageBox.information(self, "Help", "*****@*****.**") else: self.helpState = True self.helpButton.setText('Help') self.trayIcon.setVisible(True) self.hide() def get_hash(self, text): hash = hashlib.md5() hash.update(text.encode()) return hash.hexdigest() def run(self): try: with open("data.txt", 'a') as f: cur_window = self.WM.get_fore_window().split()[0] if cur_window != self.pre_window: self.pre_window = cur_window cur_window_hash = self.get_hash(cur_window) f.write(" " + cur_window_hash) if self.runState: target_windows = list( self.rnn.test([cur_window_hash], 3)) target_windows.append(cur_window_hash) print(target_windows) self.WM.set_window(self.WM.find_window('dimmer')) all_windows = list(self.WM.get_windows()) for t_window in target_windows: for i, window in enumerate(all_windows): if t_window == self.get_hash( window.split()[0]): self.WM.set_window( self.WM.find_window(all_windows[i])) except: pass
#!/usr/bin/python import sys from libpyGORGON import PDBAtom,Vector3DFloat from PyQt4 import QtGui, QtCore from window_manager import WindowManager from main_window_form import MainWindowForm app = QtGui.QApplication(sys.argv) window = MainWindowForm("2.0") window.addModule(WindowManager(window)) window.showMaximized() cAlphaViewer=window.viewers['calpha'] rawAtom=PDBAtom('AAAA', 'A', 1, 'CA') rawAtom.setPosition(Vector3DFloat(-1, 0, 0)) rawAtom = cAlphaViewer.renderer.addAtom(rawAtom) a = PDBAtom('AAAA', 'A', 2, 'CA') a.setPosition(Vector3DFloat(1, 0, 0)) print 'a:', a b = cAlphaViewer.renderer.addAtom(a) print 'b-1:', b if not cAlphaViewer.loaded: cAlphaViewer.loaded = True cAlphaViewer.emitModelLoaded() del a #This line causes a segmentation fault later on, which is weird because we never use the atom at this location in memory after this #Thus, unless there's a change, I'll have to be extremely careful about Python's garbage collection rawAtom.setColor(1, 0, 0, 1) #Red
def __init__(self): self._window_manager = WindowManager('Minimum Viable Product', self._on_keypress) self._recorder = ImageDataRecorder(self._window_manager) self._second_phase_activate = False
class App(object): def __init__(self): self._window_manager = WindowManager('Minimum Viable Product', self._on_keypress) self._recorder = ImageDataRecorder(self._window_manager) self._second_phase_activate = False def _on_keypress(self, keycode): """Handle a keypress. space -> Take a screenshot. tab -> Start/stop recording a screencast. escape -> Quit. """ if keycode == 32: # space self._recorder.write_image('screenshot.png') elif keycode == 9: # tab if not self._recorder.is_writing_video: self._recorder.start_writing_video( 'screencast.avi') else: self._recorder.stop_writing_video() elif keycode == 27: # escape self._recorder.stop() while self._recorder.processed: pass self._window_manager.destroy_window() elif keycode == 115: # s self._second_phase_activate = True else: print(keycode) def _recorder_show(self): while self._recorder.processed and self._window_manager.is_window_created: self._window_manager.process_events() original = self._recorder.original_capture processing = self._recorder.processing_capture roi = self._recorder.roi_capture self._window_manager.show(original, processing, roi) self._recorder.release_frame() def run_simple(self): self._window_manager.create_window() self._recorder.start(0, 1000) self._recorder_show() def run_algorithm(self): self._window_manager.create_window() uart = uart_control.UartControl() # first phase: 4 faces base_count = 3 current_face = 0 led = 10 while current_face < 4: # TODO: uart.enable_led(led) self._recorder.start(current_face, base_count) # TODO: show recording process, ... self._recorder_show() time.sleep(4) uart.disable_led() time.sleep(1) # TODO: rotate 90 uart.start_step(1600) time.sleep(4) current_face += 1 # how to wait: by keypress 's' #while not self._second_phase_activate: # self._window_manager.process_events() time.sleep(10) # second phase: 2 faces while current_face < 6: # TODO: uart.enable_led(led) self._recorder.start(current_face, base_count) # TODO: show recording process, ... self._recorder_show() time.sleep(4) uart.disable_led() time.sleep(1) # TODO: rotate 180 uart.start_step(3200) time.sleep(8) current_face += 1 # print(len(self._recorder.data)) def test_uart(self): uart = uart_control.UartControl() uart.enable_led(10) uart.start_step(6400) time.sleep(12.8) uart.disable_led()
class App(object): def __init__(self): self._window_manager = WindowManager('Minimum Viable Product', self._on_keypress) self._recorder = ImageDataRecorder(self._window_manager) self._second_phase_activate = False def _on_keypress(self, keycode): """Handle a keypress. space -> Take a screenshot. tab -> Start/stop recording a screencast. escape -> Quit. """ if keycode == 32: # space self._recorder.write_image('screenshot.png') elif keycode == 9: # tab if not self._recorder.is_writing_video: self._recorder.start_writing_video( 'screencast.avi') else: self._recorder.stop_writing_video() elif keycode == 27: # escape self._recorder.stop() while self._recorder.processed: pass self._window_manager.destroy_window() elif keycode == 115: # s self._second_phase_activate = True else: print(keycode) def _recorder_show(self): while self._recorder.processed and self._window_manager.is_window_created: self._window_manager.process_events() original = self._recorder.original_capture processing = self._recorder.processing_capture roi = self._recorder.roi_capture self._window_manager.show(original, processing, roi) self._recorder.release_frame() def run_simple(self): self._window_manager.create_window() self._recorder.start(0, 1000) self._recorder_show() def run_algorithm(self): self._window_manager.create_window() uart = uart_control.UartControl() # first phase: 4 faces base_count = 5 current_face = 1 led = 10 # with SpeakerManager() as speaker: # speaker.say('Начинаем получение данных', 3) while current_face <= 4: # TODO: # uart.enable_led(led)] self._recorder.start(current_face, base_count) # TODO: show recording process, ... self._recorder_show() time.sleep(4) # uart.disable_led() # time.sleep(1) # TODO: rotate 90 uart.start_step(1600) time.sleep(4) current_face += 1 # how to wait: by keypress 's' # while not self._second_phase_activate: # self._window_manager.process_events() with SpeakerManager() as speaker: speaker.say('Переверните упаковку', 3) time.sleep(10) # second phase: 2 faces while current_face <= 6: # TODO: # uart.enable_led(led) self._recorder.start(current_face, base_count) # TODO: show recording process, ... self._recorder_show() time.sleep(4) # uart.disable_led() # time.sleep(1) # TODO: rotate 180 uart.start_step(3200) time.sleep(8) current_face += 1 print(len(self._recorder.data)) image_folder = './images/' for key in self._recorder.data.keys(): image_filename = image_folder + 'image2_' + str(key) + '.png' print(image_filename) cv2.imwrite(image_filename, self._recorder.data[key])
from chatbot import Chatbot from order_maker import OrderMaker from window import Window from window_manager import WindowManager import threading import time bot = Chatbot() orderMaker = OrderMaker() window = WindowManager() window.startWindows() #threadServer = threading.Thread(target=startServer(), args=()) #threadWindows = threading.Thread(target=window.startWindows(), args=()) #threadWindows.start() #threadServer.start()
class VideoCaptureApp(object): def __init__(self, capture=None, face_img_path=None, should_mirror=False): self._window_manager = WindowManager(self.on_keypress) self._capture_manager = CaptureManager(cv2.VideoCapture(0)) \ if capture is None else CaptureManager(capture) self._window_name = 'FaceOff' self._should_mirror = should_mirror self._face_tracker = FaceTracker() self._show_face_rect = False self._swap_face = True self._template_face = None if face_img_path is not None: self._template_face = cv2.imread(face_img_path) def run(self): self._window_manager.create_window(self._window_name) while self._window_manager.is_window_created(self._window_name): self._capture_manager.enter_frame() frame = self._capture_manager.frame if frame is None: print "get None frame!" break # process frame # detect face self._face_tracker.update(frame) face_num = len(self._face_tracker.faces) face_rect = None if face_num == 0 else \ self._face_tracker.faces[0].face_rect if self._show_face_rect: txt_str = 'face_num: {}'.format(face_num) for face in self._face_tracker.faces: x, y, w, h = face.face_rect cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) # only show the 1st face break if face_rect is not None and self._swap_face and \ self._template_face is not None: x, y, w, h = face_rect template_face = self._template_face.copy() template_face = cv2.resize(template_face, (w, h)) # simply paste # frame[y:y+h,x:x+w] = template_face # or use seamless clone mask = 255 * np.ones(template_face.shape, template_face.dtype) center = (x + w / 2, y + h / 2) frame = cv2.seamlessClone(template_face, frame, mask, center, \ cv2.MIXED_CLONE) # frame = cv2.seamlessClone(template_face, frame, mask, center, \ # cv2.NORMAL_CLONE) # show frame window if self._should_mirror: # horizontal flipping frame = cv2.flip(frame, 1) # draw text if self._show_face_rect: cv2.putText(frame, txt_str, (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 0), 2) self._window_manager.show_window(self._window_name, frame) self._capture_manager.exit_frame() self._window_manager.process_event() def on_keypress(self, keycode): if keycode == ord('m'): self._should_mirror = not self._should_mirror elif keycode == ord('s'): self._swap_face = not self._swap_face elif keycode == ord('f'): self._show_face_rect = not self._show_face_rect elif keycode == 27: # Escape self._window_manager.destroy_all_window()
def set_foreground_window(pid=None, cmdline=None, class_name=None, window_text=None): WindowManager(pid, cmdline, class_name, window_text).set_foreground()
def __init__(self): self.root = tk.Tk() self.windowManager = WindowManager(Globals.NumCols, Globals.NumRows) self.display = Display(self.root, self.windowManager) self.inputHandler = InputHandler(self.display.getWidget())