def test_press_release(self): keyboard.press("a") self.assertEqual(self.flush_events(), [(KEY_DOWN, "a")]) keyboard.release("a") self.assertEqual(self.flush_events(), [(KEY_UP, "a")]) keyboard.press("shift+a") self.assertEqual(self.flush_events(), [(KEY_DOWN, "shift"), (KEY_DOWN, "a")]) keyboard.release("shift+a") self.assertEqual(self.flush_events(), [(KEY_UP, "a"), (KEY_UP, "shift")]) keyboard.press_and_release("a") self.assertEqual(self.flush_events(), [(KEY_DOWN, "a"), (KEY_UP, "a")])
def write_word(self, word): ''' Apaga a palavra que esta sendo digitada e escreve a palavra recebida :param word: Palavra que ira ser escrita :return: None ''' keyboard.press_and_release("ctrl + backspace") keyboard.write(word) keyboard.press_and_release("space")
topLeft = (int(topLeft[0]), int(topLeft[1])) cX = int((topLeft[0] + bottomRight[0]) / 2.0) cY = int((topLeft[1] + bottomRight[1]) / 2.0) frame = cv2.flip(frame, 1) if cY < LINE: cv2.putText(frame, "Up", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3) else: cv2.putText(frame, "Down" + str(current_time), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3) current_time += time.time() if current_time > TIME and markerID == 5: keyboard.press_and_release("enter") current_time = 0 cv2.namedWindow('Frame', cv2.WINDOW_AUTOSIZE) #cv2.rectangle(frame, (W // 4, H // 4), ((W // 4) * 3, (H // 4) * 3), (0, 0, 255), 3) cv2.line(frame, (0, LINE), (W, LINE), (0, 255, 0), 2) cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF if key == 27: break print('fps - ', 1 / (time.time() - timeCheck)) cv2.destroyAllWindows() vs.stop()
def copy_and_paste_list_file(self): #コピペ操作 keyboard.press_and_release('windows+e') time.sleep(5) keyboard.press_and_release('ctrl+l, backspace') time.sleep(5) keyboard.write(HOME_DIR + "\Downloads") keyboard.press_and_release('enter') time.sleep(5) keyboard.press_and_release('ctrl+l') keyboard.press_and_release('tab') time.sleep(5) keyboard.write(RIYOUSYA_LIST_KEYWORD) time.sleep(5) keyboard.press_and_release('enter') time.sleep(3) keyboard.press_and_release('tab') time.sleep(3) keyboard.press_and_release('tab') time.sleep(3) keyboard.press_and_release('down,up') #利用者一括ファイルのコピー keyboard.press_and_release('ctrl+c') keyboard.press_and_release('ctrl+l, backspace') time.sleep(5) keyboard.write(PWD + "\work") keyboard.press_and_release('enter') time.sleep(5) keyboard.press_and_release('ctrl+l') time.sleep(5) keyboard.press_and_release('tab') time.sleep(3) keyboard.press_and_release('tab') time.sleep(3) keyboard.press_and_release('tab') time.sleep(3) keyboard.press_and_release('ctrl+v')
import subprocess as sp sp.Popen(['notepad.exe', path]) try: data = pd.read_csv('Key_abbr.csv') except: print('unable to find the required files') abr = data['Press'] result = data['Result'] x = np.array(abr) y = np.array(result) keyboard.add_hotkey( 'shift+"', lambda: (keyboard.write('"'), keyboard.release('shift'), keyboard.press_and_release('left arrow'))) keyboard.add_hotkey( "'", lambda: (keyboard.write("'"), keyboard.release('shift'), keyboard.press_and_release('left arrow'))) keyboard.add_hotkey( 'shift+{', lambda: (keyboard.write('}'), keyboard.release('shift'), keyboard.press_and_release('left arrow'))) keyboard.add_hotkey( '[', lambda: (keyboard.write(']'), keyboard.release('shift'), keyboard.press_and_release('left arrow'))) keyboard.add_hotkey( 'shift+(', lambda: (keyboard.write(')'), keyboard.release('shift'), keyboard.press_and_release('left arrow'))) keyboard.add_abbreviation(x[0], y[0]) keyboard.add_abbreviation(x[1], y[1])
import webbrowser import keyboard import time import csv with open('whatsappPhone.csv', 'r') as csvFile: reader = csv.reader(csvFile) for row in reader: print(row[1]) csvFile.close() webbrowser.open_new_tab('https://web.whatsapp.com/send?phone=' + row[0] + '&text=' + row[1]) print("apretando esc") keyboard.press_and_release('esc') print("ya apreto esc") # https://web.whatsapp.com/send?phone=553513220999&text=oasdoasodas
def press(k): time.sleep(pause_time * 0.001) keyboard.press_and_release(k)
def Build(struct): global blockBuffer print("Starting build in" ,sleepBuffer, "seconds...") time.sleep(sleepBuffer) print("Building...") for i in struct: time.sleep(blockBuffer) x = '/tp @p ~'+str(len(i)*-1)+' ~ ~1' keyboard.press_and_release('t') time.sleep(blockBuffer) keyboard.write(x) time.sleep(blockBuffer) keyboard.press_and_release('enter') time.sleep(blockBuffer) for i in i: y = '/setblock ~ ~-2 ~ '+i keyboard.press_and_release('t') time.sleep(blockBuffer) keyboard.write('/tp @p ~1 ~ ~') time.sleep(blockBuffer) keyboard.press_and_release('enter') time.sleep(blockBuffer) keyboard.press_and_release('t') time.sleep(blockBuffer) keyboard.write(y) time.sleep(blockBuffer) keyboard.press_and_release('enter') time.sleep(blockBuffer)
def wig_split(): keyboard.press_and_release('p')
def type_in_chat(msg: str): keyboard.press_and_release('enter') time.sleep(0.25) keyboard.write(msg, 0.01) keyboard.press_and_release('enter') time.sleep(0.1)
def single_button(key): keyboard.press_and_release(key) return
# print corresponding gestures which are in their ranges font = cv2.FONT_HERSHEY_SIMPLEX if areacnt < 2000: cv2.putText(frame, 'Put hand in the box', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA) else: print(arearatio) if arearatio < 12: cv2.putText(frame, '0', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA) elif arearatio < 50: cv2.putText(frame, '1', (0, 50), font, 2, (0, 0, 255), 3, cv2.LINE_AA) keyboard.press_and_release('space + r') # show the windows cv2.imshow('mask', mask) cv2.imshow('frame', frame) except: pass k = cv2.waitKey(5) & 0xFF if k == 27: break cv2.destroyAllWindows() cap.release()
def hit_home_scroll(): keyboard.press_and_release('i') mouse.move(3400, 1000, absolute=True, duration=0.01)
def denote_mine(): time.sleep(0.4) keyboard.press_and_release('d')
searchbutton = browser.find_element_by_css_selector("#search") searchbutton.send_keys("Beyonce Dangerous") startsearch = browser.find_element_by_css_selector("#search-icon-legacy > yt-icon") startsearch.click() time.sleep(5) firstresult = browser.find_element_by_css_selector("#video-title > yt-formatted-string") firstresult.click() keyboard.press("f") time.sleep(20) keyboard.press("f") time.sleep(0.5) #searchbutton.send_keys(ctrl+a) #seachbutton.send_keys(keyboard.press("BACKSPACE")) searchbutton1 = browser.find_element_by_css_selector("#search") #actionChains = ActionChains(browser) #actionChains.double_click(searchbutton1).perform() searchbutton1.click() keyboard.press_and_release("CONTROL+a") time.sleep(1) keyboard.press("BACKSPACE") time.sleep(3) searchbutton1.send_keys("GIBUS IS THA BOSS!")
def test_press_and_release(self): keyboard.press_and_release('a') self.do([], d_a+u_a)
def camera_lock(): keyboard.press_and_release('y')
elif proximo2: pg.click(pg.center(proximo2)) self.acertos = 1 self.saldo += self.renda self.teste = 1 print("Relatorio" f"\nSaldo = {self.saldo}" f"\nAcertos {self.acertos}" f"\nErros no bt1 {self.errosbt1}" f"\nErros no bt2 {self.errosbt2}" f"\nErros no bt3 {self.errosbt3}") break elif falha == 2: kb.press_and_release('f5') self.teste = 1 break falha += 1 self.errosbt3 += 1 bot = Instagram() while True: for _ in range(20): bot.loop() kb.press_and_release('f5') sleep(4)
#combo position com_1_icon_pos = {'left': 861, 'top': 593, 'width': 18, 'height': 18} com_2_icon_pos = {'left': 837, 'top': 593, 'width': 18, 'height': 18} com_3_icon_pos = {'left': 815, 'top': 593, 'width': 18, 'height': 18} com_4_icon_pos = {'left': 791, 'top': 593, 'width': 18, 'height': 18} com_5_icon_pos = {'left': 746, 'top': 593, 'width': 18, 'height': 18} #combo note count com1 = 5 com2 = 6 com3 = 7 com4 = 8 com5 = 10 pag.moveTo(x=200, y=200, duration=0.0) pag.mouseDown() pag.mouseUp() kb.press_and_release('1') print("jam is now on!") time.sleep(1) kb.press_and_release('f1') print("go!") #freestyle_jam(com1,com_1_icon_pos) #freestyle_jam(com2,com_2_icon_pos) #freestyle_jam(com3,com_3_icon_pos) #freestyle_jam(com4,com_4_icon_pos) freestyle_jam(com5, com_5_icon_pos)
print(words) vocab[words[0]] = words[-2] # print(vocab) # while 1: # print(pyautogui.position()) # sleep(1) # pyautogui.displayMousePosition() topLeftCorner = (363, 457) bottomRightCorner = (1448, 616) LEFT = topLeftCorner[0] TOP = topLeftCorner[1] WIDTH = bottomRightCorner[0] - topLeftCorner[0] HEIGHT = bottomRightCorner[1] - topLeftCorner[1] keyboard.wait('esc') while not keyboard.is_pressed('q'): img = pyautogui.screenshot(region=(LEFT, TOP, WIDTH, HEIGHT)) output = pytesseract.image_to_string(img).lower() output = re.split('\n|/', output)[0] keyboard.write(vocab[output]) keyboard.press_and_release('enter') sleep(0.1) # C:\Program Files\Tesseract-OCR # x, y = 363 457 # x, y, 1448 616
import keyboard import winsound import mouse import time # press right shift to turn on and turn off while 1: # TURN ON, block until scroll lock(70) has been stroked keyboard.wait(70) frequency = 2000 # Set Frequency To 2000 Hertz duration = 100 # Set Duration, 1000 ms == 1 second for i in range(3): winsound.Beep(frequency, duration) while 1: if keyboard.is_pressed(70): break keyboard.press_and_release('1, 2, 3, 4, 5, R') time.sleep(4) # TURN OFF, block until scroll lock(70) has been stroked frequency = 2000 duration = 1000 winsound.Beep(frequency, duration)
def scanSingle(self): """Single gesture scanner """ try: clearfunc(self.cam) except: pass uic.loadUi('UI_Files/scan_single.ui', self) self.setWindowTitle(self.title) if (self.scan_single.clicked.connect(self.scanSingle)): controlTimer(self) self.pushButton_2.clicked.connect(lambda: clearfunc(self.cam)) self.linkButton.clicked.connect(openimg) self.scan_single.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor)) try: self.exit_button.clicked.connect(lambda: clearfunc(self.cam)) except: pass self.exit_button.clicked.connect(self.quitApplication) img_text = '' while True: ret, frame = self.cam.read() frame = cv2.flip(frame, 1) try: frame = cv2.resize(frame, (321, 270)) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) img1 = cv2.rectangle(frame, (150, 50), (300, 200), (0, 255, 0), thickness=2, lineType=8, shift=0) except: keyboard.press_and_release('esc') height1, width1, channel1 = img1.shape step1 = channel1 * width1 # create QImage from image qImg1 = QImage(img1.data, width1, height1, step1, QImage.Format_RGB888) # show image in img_label try: self.label_3.setPixmap(QPixmap.fromImage(qImg1)) slider1 = self.trackbar.value() except: pass lower_blue = np.array([0, 0, 0]) upper_blue = np.array([179, 255, slider1]) imcrop = img1[52:198, 152:298] hsv = cv2.cvtColor(imcrop, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, lower_blue, upper_blue) cv2.namedWindow("mask", cv2.WINDOW_NORMAL) cv2.imshow("mask", mask) cv2.setWindowProperty("mask", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) cv2.resizeWindow("mask", 118, 108) cv2.moveWindow("mask", 894, 271) hwnd = winGuiAuto.findTopWindow("mask") win32gui.SetWindowPos( hwnd, win32con.HWND_TOP, 0, 0, 0, 0, win32con.SWP_NOMOVE | win32con.SWP_NOSIZE | win32con.SWP_NOACTIVATE) try: self.textBrowser.setText("\n\n\t" + str(img_text)) except: pass img_name = "1.png" save_img = cv2.resize(mask, (image_x, image_y)) cv2.imwrite(img_name, save_img) img_text = predictor() if cv2.waitKey(1) == 27: break self.cam.release() cv2.destroyAllWindows()
def steruj(do_sterowania): while True: key = do_sterowania.get() if key == Key.DOWN: keyboard.press_and_release(key.value) keyboard.press_and_release(key.value) keyboard.press_and_release(key.value) keyboard.press_and_release(key.value) keyboard.press_and_release(key.value) keyboard.press_and_release(key.value) keyboard.press_and_release(key.value) keyboard.press_and_release(key.value) keyboard.press_and_release(key.value) keyboard.press_and_release(key.value) print(key)
import keyboard keyboard.press_and_release('shift+s, space') keyboard.write('The quick brown fox jumps over the lazy dog.') keyboard.add_hotkey('ctrl+shift+a', print, args=('triggered', 'hotkey')) # Press PAGE UP then PAGE DOWN to type "foobar". keyboard.add_hotkey('page up, page down', lambda: keyboard.write('foobar')) # Blocks until you press esc. keyboard.wait('esc') # Record events until 'esc' is pressed. recorded = keyboard.record(until='esc') # Then replay back at three times the speed. keyboard.play(recorded, speed_factor=3) # Type @@ then press space to replace with abbreviation. keyboard.add_abbreviation('@@', '*****@*****.**') # Block forever, like `while True`. keyboard.wait()
def scanSent(self): """sentence formation module """ try: clearfunc(self.cam) except: pass uic.loadUi('UI_Files/scan_sent.ui', self) self.setWindowTitle(self.title) self.exit_button_2.clicked.connect(self.quitApplication) if (self.scan_sen.clicked.connect(self.scanSent)): controlTimer(self) self.speech_gen.clicked.connect(to_speech) try: self.stop.clicked.connect(lambda: clearfunc2(self.cam)) except: pass self.linkButton.clicked.connect(openimg) self.scan_sen.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor)) self.exit_button_2.setCursor( QtGui.QCursor(QtCore.Qt.PointingHandCursor)) self.stop.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor)) self.save.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor)) self.speech_gen.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor)) #try: #self.exit_button.clicked.connect(lambda:clearfunc(self.cam)) #except: #pass img_text = '' append_text = '' new_text = '' finalBuffer = [] counts = 0 while True: ret, frame = self.cam.read() frame = cv2.flip(frame, 1) try: frame = cv2.resize(frame, (331, 310)) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) img = cv2.rectangle(frame, (150, 50), (300, 200), (0, 255, 0), thickness=2, lineType=8, shift=0) except: keyboard.press_and_release('esc') keyboard.press_and_release('esc') height, width, channel = img.shape step = channel * width # create QImage from image qImg = QImage(img.data, width, height, step, QImage.Format_RGB888) # show image in img_label try: self.label_3.setPixmap(QPixmap.fromImage(qImg)) slider = self.trackbar.value() except: pass lower_blue = np.array([0, slider, 0]) upper_blue = np.array([179, 255, 255]) imcrop = img[52:198, 152:298] hsv = cv2.cvtColor(imcrop, cv2.COLOR_BGR2HSV) mask1 = cv2.inRange(hsv, lower_blue, upper_blue) cv2.namedWindow("mask", cv2.WINDOW_NORMAL) cv2.imshow("mask", mask1) cv2.setWindowProperty("mask", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) cv2.resizeWindow("mask", 118, 108) cv2.moveWindow("mask", 713, 264) img_name = "1.png" save_img = cv2.resize(mask1, (image_x, image_y)) cv2.imwrite(img_name, save_img) img_text = predictor() hwnd = winGuiAuto.findTopWindow("mask") win32gui.SetWindowPos( hwnd, win32con.HWND_TOP, 0, 0, 0, 0, win32con.SWP_NOMOVE | win32con.SWP_NOSIZE | win32con.SWP_NOACTIVATE) try: self.textBrowser.setText("\n " + str(img_text)) except: pass if cv2.waitKey(1) & 0xFF == ord('c'): try: counts += 1 append_text += img_text new_text += img_text self.textBrowser_4.setText(new_text) except: append_text += '' if (len(append_text) > 1): finalBuffer.append(append_text) append_text = '' else: finalBuffer.append(append_text) append_text = '' try: self.save.clicked.connect( lambda: saveBuff(self, self.cam, finalBuffer)) except: pass if cv2.waitKey(1) == 27: break if keyboard.is_pressed('shift+s'): if (len(finalBuffer) >= 1): f = open("temp.txt", "w") for i in finalBuffer: f.write(i) f.close() break self.cam.release() cv2.destroyAllWindows() if os.path.exists('temp.txt'): QtWidgets.QMessageBox.about( self, "Information", "File is temporarily saved. Please click Speech Button ") try: self.textBrowser.setText(" ") except: pass
#利用者一括ファイル batch_user_update_file_button = browser.find_element_by_xpath( "/html/body/div[1]/div/div[2]/div[2]/div[1]/div/div/form/div[2]/table/tbody/tr/td[3]/button[2]" ) batch_user_update_file_button.click() time.sleep(5) csvfiles = glob.glob(HOME_DIR + "\Downloads\*.csv") csvfiles.sort(key=os.path.getmtime) upload_csv_file_path = csvfiles[-1] # Update CSV Records csvu = CSVUpdater() upload_csv_work_file_path = csvu.update_csv(upload_csv_file_path) time.sleep(15) # Upload File browser.find_element_by_id("UserCsv").send_keys(upload_csv_work_file_path) batch_user_upload_file_button = browser.find_element_by_xpath( '//*[@id="btnAdd"]') batch_user_upload_file_button.click() time.sleep(4) keyboard.press_and_release('tab,tab,enter') time.sleep(15) browser.close() os.remove(upload_csv_file_path) os.remove(upload_csv_work_file_path) except Exception as e: print("######## Exception ########") print(e.message)
def go_to_province(province): """ Automates keyboard shortcuts to generate log. :param province: Province number where battle occurs :return: True when all commands were executed """ keyboard.press_and_release('esc') # exit messages keyboard.press_and_release('g') # go to screen keyboard.write(str(province)) # select province keyboard.press_and_release('enter') # confirm keyboard.press_and_release('c') # view casualities keyboard.press_and_release('esc') # back to map keyboard.press_and_release('d') # try to add PD return True
def cut(): angle = request.args.get("angle") print("Angle:", angle) keyboard.press_and_release(angle) return json.dumps({})
if hand is not None: thresh, segmented = hand #cv2.imshow("thresh",thresh) #uncomment this line to see the threshed image (x, y, w, h) = cv2.boundingRect(segmented) if (h > height // 2): h = height // 2 cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 255, 0), 2) #remove this if you dont wish to #see the contours num_frames += 1 #code from line 97 to has to be changed for games having different controls if ((x + w / 2) < width / 2 and l[len(l) - 1] != 'left'): ky.press_and_release('w') #'w' key is pressed l.append('left') print( 'left' ) #comment this if you dont want to print where your hand currently is if ((x + w / 2) > width / 2 and l[len(l) - 1] != 'cen'): l.append('cen') print( 'cen' ) #comment this if you dont want to print where your hand currently is cv2.imshow('my_window', clone) #close the program when 'esc' key is pressed if ((cv2.waitKey(1) & 0xFF) == 27):
def get_fileinfo(self, alt_tab: bool = True, timeout=5): const_general = 'General' const_complete_name = 'Complete name' try: cb_old = clipboard.get() or '' except ostk.pywintypes.error as e: warnings.warn(f'{e}\n{e!r}') time.sleep(1) t0 = time.time() clipboard.clear() self.focus() keyboard.press_and_release('ctrl+f1') for _ in range(5): keyboard.press_and_release('shift+tab') keyboard.press_and_release('enter') self.gasp() keyboard.press_and_release('alt+p, esc') self.gasp() if alt_tab: keyboard.press_and_release('alt+tab') while True: if time.time() - t0 > timeout: raise TimeoutError self.gasp() try: text = clipboard.get() or '' clipboard.clear() clipboard.set(cb_old) lines = text.splitlines() line0 = getitem_default(lines, 0) line1 = getitem_default(lines, 1) if line0 == const_general and ( line1.startswith(const_complete_name) or line1.startswith('Unique ID')): break except clipboard.OpenError: warnings.warn('clipboard open error') except ostk.pywintypes.error as e: warnings.warn(f'{e}\n{e!r}') time.sleep(1) space_and_colon = ' : ' space_and_slash = ' / ' data = current_node = {} for line in lines: if not line: continue if space_and_colon in line: k, v = line.split(space_and_colon, maxsplit=1) k: str = k.strip().lower() if space_and_slash in v: v = v.split(space_and_slash) else: k, v = 'stream', line.strip().lower() type_name = v.split(' #')[0] if type_name == 'general': current_node = data else: current_node = {} data.setdefault(type_name, []).append(current_node) current_node[k] = v try: data['path'] = data['complete name'] video_stream_0: dict = data['video'][0] data['vc'] = video_stream_0['format'].lower() data['vbd'] = int( str_remove_suffix(video_stream_0['bit depth'], ' bits')) str_frame_rate = 'frame rate' str_original_frame_rate = 'original frame rate' if str_frame_rate in video_stream_0: data['fps'] = float(video_stream_0[str_frame_rate].split()[0]) elif str_original_frame_rate in video_stream_0: data['fps'] = float( video_stream_0[str_original_frame_rate].split()[0]) data['pix_fmt'] = video_stream_0['color space'].lower() + \ video_stream_0['chroma subsampling'].replace(':', '') + \ video_stream_0.get('scan type', 'p') data['h'] = int( str_remove_suffix(video_stream_0['height'], ' pixels').replace(' ', '')) data['w'] = int( str_remove_suffix(video_stream_0['width'], ' pixels').replace(' ', '')) except KeyError as e: print(repr(e)) self._cache['fileinfo'] = data return data
def initialize_keyboard(): keyboard.wait('esc') keyboard.press_and_release('esc') keyboard.press_and_release('esc')
time.sleep(2) # TODO: Update this count whenever enter a new month cycle total_months = 7 # download data in .csv format for each month current_month = 3 for x in range(total_months - current_month + 1): temp_month = driver.find_element_by_xpath("/html/body/div[4]/div[4]/ul/li[%s]" % current_month) time.sleep(2) temp_month.click() time.sleep(2) export_data.click() time.sleep(240) # update this if (current_month == 3): keyboard.press_and_release('down') keyboard.press_and_release('enter') else: keyboard.press_and_release('enter') print("Current month is: %s" % current_month) time_frame.click() time.sleep(2) current_month += 1 # all .csv files are downloaded to the /Downloads folder now parse them into the master .csv time.sleep(3) os.chdir("/Users/patrickutz/Downloads") file_list = glob.glob("tweet_*") # clear the master file f = open("/Users/patrickutz/Downloads/washabstract_twitter_data.csv", "w")
def worker(input_q, output_q, cap_params, frame_processed, poses): print(">> loading frozen model for worker") detection_graph, sess = detector_utils.load_inference_graph() sess = tf.Session(graph=detection_graph) print(">> loading keras model for worker") try: model, classification_graph, session = classifier.load_KerasGraph( "F:\Realtime_Hand_tracking\cnn\models\hand_poses_wGarbage_100.h5") except Exception as e: print(e) detection_centres_x = [] detection_centres_y = [] is_centres_filled = False detected = False index = 0 detection_area = [] start_flag = False flag_start = pause_time = 0 sensitivity = gui_sensitivity area = centre_x = centre_y = 0 detection = "" direction = "" while True: predicted_label = "" frame = input_q.get() if (frame is not None): frame_processed += 1 boxes, scores = detector_utils.detect_objects(frame, detection_graph, sess) # get region of interest res = detector_utils.get_box_image(cap_params['num_hands_detect'], cap_params['score_thresh'], scores, boxes, cap_params['im_width'], cap_params['im_height'], frame) # get boundary box if pause_time == 0: centre_x, centre_y, area = detector_utils.draw_box_on_image(cap_params['num_hands_detect'], cap_params["score_thresh"], scores, boxes, cap_params['im_width'], cap_params['im_height'], frame) if pause_time > 0: pause_time -= 1 if is_centres_filled: detection_centres_x = detection_centres_x[1:10] detection_centres_y = detection_centres_y[1:10] detection_area = detection_area[1:10] detection_centres_x.append(centre_x) detection_centres_y.append(centre_y) detection_area.append(area) else: detection_centres_x.append(centre_x) detection_centres_y.append(centre_y) detection_area.append(area) if pause_time == 0: index += 1 if index >= sensitivity: index = 0 is_centres_filled = True if index == 0: predicted_label = classify(res, model, classification_graph, session, poses) #print(predicted_label) if predicted_label == "Start" and flag_start == 0: #print("Start") detection = "Start tracking" start_flag = True flag_start = 1 if detected: detection_centres_x = [] detection_centres_y = [] is_centres_filled = False index = 0 detected = False detection_area = [] frame_processed = 0 pause_time = 30 centres_x = detection_centres_x.copy() centres_y = detection_centres_y.copy() areas = detection_area.copy() centres_x = [v for v in centres_x if v] centres_y = [v for v in centres_y if v] areas = [a for a in areas if a] # angle_coordinate if len(centres_x) > 3 and is_centres_filled and len(centres_y) > 3 and len(areas) > 3 and start_flag : flag = 0 dX = centres_x[-1] - centres_x[0] dY = centres_y[-1] - centres_y[0] if dX > 20 and dY > 20: m = dY / dX angle = math.degrees(math.atan(m)) if angle < 45: flag = 1 elif angle > 45: flag = 2 if dX > 100 and (abs(dY) < 20 or flag == 1): direction = "Right" keyboard.press_and_release('right') detected = True #print(direction) elif -dX > 100 and (abs(dY) < 20 or flag == 1): direction = "Left" keyboard.press_and_release('left') detected = True #print(direction) elif dY > 50 and (abs(dX) < 10 or flag == 2): direction = "Down" detected = True #print(direction) elif -dY > 50 and (abs(dX) < 10 or flag == 2): direction = "Up" detected = True #print(direction) elif areas[-1] - 3000 > areas[0] and abs(dX) < 30 and abs(dY) < 20: direction = "Zoom in" detected = True #print(direction) elif areas[-1] < areas[0] - 3000 and abs(dX) < 10 and abs(dY) < 20: direction = "Zoom out" detected = True #print(direction) output_q.put((frame, direction,predicted_label)) sess.close()