def __init__(self): self.mouse = PyMouse() self.keyboard = PyKeyboard() self.sleep = 0.5 time.sleep(1)
def right_click(): c=[0,1] random.shuffle(c) if c==1: PyMouse().click(960,540,button=2)
x_center, y_center = max_loc2[0] + w2 // 2, max_loc2[1] + h2 // 2 else: # 边缘检测 img_rgb = cv2.GaussianBlur(img_rgb, (5, 5), 0) canny_img = cv2.Canny(img_rgb, 1, 10) H, W = canny_img.shape # 消去小跳棋轮廓对边缘检测结果的干扰 for k in range(max_loc1[1], max_loc1[1] + 65): for b in range(max_loc1[0], max_loc1[0] + 25): canny_img[k][b] = 0 img_rgb, x_center, y_center = get_center(canny_img) plt.plot(x_center, y_center, 'b.') plt.plot(center1_loc[0], center1_loc[1], 'r.') distance = np.sqrt( (center1_loc[0] - x_center) ** 2 + (center1_loc[1] - y_center) ** 2 ) # 激活total_control界面 title=u'UNKNOWN-GENERIC_A15 (仅限非商业用途)' w1hd=win32gui.FindWindow(0,title) w2hd=win32gui.FindWindowEx(w1hd,None,None,None) win32gui.SetForegroundWindow(w2hd) # 窗口置于前端 m = PyMouse() m.move(300,600) # 鼠标移动到x,y位置 # dragTime = 0.0042*distance + 0.0118 dragTime = 0.0035*distance + 0.0132 pag.dragTo(300,600, dragTime) # 按压 count += 1 if count > 100 : break
def mouse_move(x,y):#移动鼠标至位置(x,y) PyMouse().move(x,y)
def mouse_shoot_sniper(x=960,y=540):#移动鼠标至位置(x,y)并射击一次 PyMouse().move(x,y) time.sleep(0.1) PyMouse().click(960,540)
def main(ns): cap = cv2.VideoCapture(ns.video_device_num) # creating camera object assert cap.isOpened(), ( "cv2.VideoCapture(X) cannot connect with a webcam on your machine." " Specify a video device by number, by looking at /dev/videoX," " where X is the relevant number") print("Hit Escape to exit") try: # initialize a mouse device mouse = PyMouse() wii = wiimote.WiiMote(ns.wii_mac_addr, 'Nintendo RVL-CNT-01') drag_mouse = False background = None # last_mouse_update_time = 0 # last_mouse_xy = None # time_delta = 0 # xy = None background = capture_background( (acquire_frame(cap)) for _ in iter(int, 1)) print('staring detector') while (cap.isOpened()): key = cv2.waitKey(30) & 0xff if key == 27: print("ending") break elif key == ord('d'): drag_mouse = ~drag_mouse elif key == ord('w'): webbrowser.open('https://stephaneginier.com/sculptgl/') # acquire a frame from the video device img = acquire_frame(cap) cv2.imshow('frame', img) img = binarize(img) cv2.imshow('binarized', img) img = remove_noise_morphology(img) background = capture_background2(img, background) kernel = np.ones((25, 25)) img = background_difference( img, cv2.morphologyEx(background, cv2.MORPH_DILATE, kernel)) _, img = cv2.threshold(img, 70, 255, cv2.THRESH_BINARY) # img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel) # kernel = np.ones((20, 20)) # img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel) xy = select_nearest_largest_contour_centroid(img, mouse.position()) # cv2.imshow('fg', fg) # cv2.imshow('binarized', binarized) # l = np.zeros(img.shape + (3, )) # l[:, :, 1] = img + background # l[:, :, 0] = img # l[:, :, 2] = background # cv2.imshow('light detector', l) cv2.imshow('light detector', img) cv2.imshow('background', background) # cv2.imshow('backgroundimg', img) if xy is not None: click_mouse = wii.buttons['A'] drag_mouse = wii.buttons['B'] vertical_scroll = \ (wii.buttons['Plus'] and 1) \ or (wii.buttons['Minus'] and -1) update_mouse_position(mouse, *xy, drag=drag_mouse, click=click_mouse, vertical_scroll=vertical_scroll, bounding_box=((8, 141), (1914, 990))) # if xy is None: # # record time # time_delta = time.time() - last_mouse_update_time # else: # if \ # time_delta > 1 \ # and time_delta < 3 \ # and last_mouse_xy \ # and True: #euclidean(xy, last_mouse_xy) < 200: # # and time_delta < .75 \ # print('click', drag_mouse) # drag_mouse = True # ~drag_mouse # else: # drag_mouse = False # time_delta = 0 # last_mouse_update_time = time.time() # last_mouse_xy = xy # update_mouse_position(mouse, *xy, drag=drag_mouse) # # try: # # print(time_delta, euclidean(last_mouse_xy, xy)) # # except: # # print('init') # print('--') finally: cap.release()
def continuously_print_mouse_position(): m = PyMouse() while True: print(m.position())
def __init__(self): self.m = PyMouse()
def initializeParameters(self): with open('./.pathToATPlatform', 'r') as textFile: self.pathToATPlatform = textFile.readline() with open(self.pathToATPlatform + 'parameters', 'r') as parametersFile: for line in parametersFile: if line[:line.find('=') - 1] == 'timeGap': self.timeGap = int(line[line.rfind('=') + 2:-1]) elif line[:line.find('=') - 1] == 'backgroundColour': self.backgroundColour = line[line.rfind('=') + 2:-1] elif line[:line.find('=') - 1] == 'textColour': self.textColour = line[line.rfind('=') + 2:-1] elif line[:line.find('=') - 1] == 'scanningColour': self.scanningColour = line[line.rfind('=') + 2:-1] elif line[:line.find('=') - 1] == 'selectionColour': self.selectionColour = line[line.rfind('=') + 2:-1] elif line[:line.find('=') - 1] == 'filmVolume': self.filmVolumeLevel = int(line[line.rfind('=') + 2:-1]) elif line[:line.find('=') - 1] == 'musicVolume': self.musicVolumeLevel = int(line[line.rfind('=') + 2:-1]) elif not line.isspace(): print 'Niewłaściwie opisane parametry' print 'Błąd w pliku parameters w linii', line self.timeGap = 1500 self.backgroundColour = 'white' self.textColour = 'black' self.scanningColour = '#E7FAFD' self.selectionColour = '#9EE4EF' self.filmVolumeLevel = 100 self.musicVolumeLevel = 40 with open(self.pathToATPlatform + 'spellerParameters', 'r') as parametersFile: for line in parametersFile: if line[:line.find('=') - 1] == 'voice': self.voice = line[line.rfind('=') + 2:-1] elif line[:line.find('=') - 1] == 'vowelColour': self.vowelColour = line[line.rfind('=') + 2:-1] elif line[:line.find('=') - 1] == 'polishLettersColour': self.polishLettersColour = line[line.rfind('=') + 2:-1] elif not line.isspace(): print 'Niewłaściwie opisane parametry' print 'Błąd w pliku spellerParameters w linii', line self.voice = 'False' self.vowelColour = 'False' self.polishLettersColour = 'False' self.labels = [ 'A B C D E F G H I J K L M N O P R S T U W Y Z SPECIAL_CHARACTERS UNDO SPEAK SAVE SPACJA OPEN EXIT' .split(), '1 2 3 4 5 6 7 8 9 0 + - * / = % $ & . , ; : " ? ! @ # ( ) [ ] { } < > ~ UNDO SPEAK SAVE SPACJA OPEN EXIT' .split() ] self.colouredLabels = ['A', 'E', 'I', 'O', 'U', 'Y'] self.numberOfRows = [4, 5] self.numberOfColumns = [8, 9] self.flag = 'row' self.rowIteration = 0 self.columnIteration = 0 self.countRows = 0 self.countColumns = 0 self.maxNumberOfRows = 2 self.maxNumberOfColumns = 2 self.numberOfPresses = 1 self.subSizerNumber = 0 self.mouseCursor = PyMouse() self.mousePosition = self.winWidth - 8, self.winHeight - 8 self.mouseCursor.move(*self.mousePosition) mixer.init() self.typewriterKeySound = mixer.Sound(self.pathToATPlatform + 'sounds/typewriter_key.wav') self.typewriterForwardSound = mixer.Sound( self.pathToATPlatform + 'sounds/typewriter_forward.wav') self.typewriterSpaceSound = mixer.Sound(self.pathToATPlatform + 'sounds/typewriter_space.wav') if self.voice == 'True': self.phones = glob.glob(self.pathToATPlatform + 'sounds/phone/*') self.phoneLabels = [ item[item.rfind('/') + 1:item.rfind('.')] for item in self.phones ] self.sounds = [ mixer.Sound(self.sound) for self.sound in self.phones ] self.SetBackgroundColour('black')
def __init__(self): self.mouse = PyMouse() self.keyboard = PyKeyboard() self.dr = webdriver.Chrome() self.dr.maximize_window() self.dr.get('http://39.106.93.201/signIn')
def init(destaddr): global online, sleep_time, hm, mouse, keyboard, status, clipboard_open, sock global screen_bound_ui global pymousepos, pymousepos_old, mouse_pos_hide, margin global my_address, my_port, my_port_file, my_address_port, my_address_port_file global dest_address, dest_port, dest_port_file, dest_address_port, dest_address_port_file global SOCKET_SND_BUF_SIZE, SOCKET_RCV_BUF_SIZE, is_mouse_left_down, is_files_ready, mlock, needmovmouse, ratio_pos, destination_ip_port global destination_ip_port_file global controled, controled_ip global file_receive_path global debug_out, debug_con, debug_esc cp = ConfigParser.ConfigParser() cp.read('conf.conf') config_section = 'info' debug_section = 'debug' # not global destination_ip = [0,0,0,0,0] online = [0,0,0,0,0] destination_ip[1] = cp.get(config_section, 'right') destination_ip[2] = cp.get(config_section, 'left') destination_ip[3] = cp.get(config_section, 'up') destination_ip[4] = cp.get(config_section, 'down') my_port = cp.getint(config_section, 'port') my_port_file = cp.getint(config_section, 'file_port') SOCKET_SND_BUF_SIZE = cp.getint(config_section, 'SOCKET_SND_BUF_SIZE') SOCKET_RCV_BUF_SIZE = cp.getint(config_section, 'SOCKET_RCV_BUF_SIZE') margin = cp.getint(config_section, 'margin') file_receive_path = cp.get(config_section, 'file_receive_path') debug_out = cp.getint(debug_section, 'output') debug_con = 1 - cp.getint(debug_section, 'connection') debug_esc = cp.getint(debug_section, 'escape_hot_key') destination_ip_port = [("127.0.0.1", my_port), ("127.0.0.1", my_port), ("127.0.0.1", my_port), ("127.0.0.1", my_port), ("127.0.0.1", my_port)] destination_ip_port_file = [("127.0.0.1", my_port_file), ("127.0.0.1", my_port_file), ("127.0.0.1", my_port_file), ("127.0.0.1", my_port_file), ("127.0.0.1", my_port_file)] dest_port = 8001 dest_port_file = 8002 # for speed up # i: should unify with status and online index, which start with 1 for i in [1, 2, 3, 4]: #TODO: check if ip correct if destination_ip[i] != '0': online[i] = True else: online[i] = False destination_ip_port[i] = (destination_ip[i], dest_port) destination_ip_port_file[i] = (destination_ip[i], dest_port_file) print online my_address = '0.0.0.0' dest_address = destaddr # TODO: tell others my port number # online = [False, True, True, True, True] # null, right left up down, [0] means nothing, start from 1, status = 0 mouse = PyMouse() keyboard = PyKeyboard() sleep_time = 0 clipboard_open = False hm = pyHook.HookManager() screen_bound_ui = (0, 0) # attention: mouse_pos_hide is used in Hook mouse_pos_hide = (0, 0) pymousepos = pymousepos_old = [] controled = False controled_ip = "" my_address_port = (my_address, my_port) my_address_port_file = (my_address, my_port_file) dest_address_port = (dest_address, my_port) dest_address_port_file = (dest_address, my_port_file) is_mouse_left_down = False is_files_ready = False ratio_pos = (1, 1)
#Opencv and dependencies import cv2 import numpy as np #our libraries import util as ut import svm_train as st import hand_util as hu #other dependencies from pymouse import PyMouse import moosegesture as mges #PyMouse the library to control mouse movements from python m1 = PyMouse() #capturing device cam = int(raw_input("Enter Camera Index : ")) cap = cv2.VideoCapture(cam) #training the svm model = st.trainSVM(3, 40, 'TrainData') #initilizing values thresh = 120 frame_count = 0 color = (0, 0, 255) res = ut.get_screen_res() w_screen = int(res['w']) + 200 h_screen = int(res['h']) + 200
def handle_watch(self, message, r_name='i', remind=False): self.speak_dialog('starting') keyboard = Controller() m = PyMouse() right = Key.right left = Key.left recognizer = cv2.face.LBPHFaceRecognizer_create() recognizer.read('/home/craghack/Documents/camera/trainer.yml') faceCascade = cv2.CascadeClassifier( "/home/craghack/Documents/camera/haarcascade_frontalface_default.xml" ) #font = cv2.FONT_HERSHEY_SIMPLEX names = ['unknown'] ids = open("/home/craghack/Documents/camera/ids.txt", "r") for line in ids: names.append(line.lower().split(' ')[2].rstrip()) ids.close() # Initialize and start realtime video capture cam = cv2.VideoCapture(0) #cam.set(3, 640) # set video widht #cam.set(4, 480) # set video height minW = 0.1 * cam.get(3) minH = 0.1 * cam.get(4) emoji = None cmd = subprocess.check_output("wmctrl -l | grep \".png\" | cut -b 23-", shell=True).decode("utf-8").rstrip() if not cmd == "": emoji = cmd.split('.')[0] os.system("wmctrl -a \"" + cmd + "\"") else: emoji = "suspicious" os.system( "xdg-open /home/craghack/Downloads/emojis/suspicious.png") sleep(.25) m.click(350, 460) sleep(.25) m.move(799, 479) if emoji == "happy": keyboard.press(left) keyboard.release(left) emoji = "suspicious" elif emoji == "suprised": keyboard.press(right) keyboard.release(right) emoji = "suspicious" self.speak_dialog('watching') confident = False try: count = 0 recognized = [] while not confident: rval, frame = cam.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(int(minW), int(minH))) for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) ID, confidence = recognizer.predict(gray[y:y + h, x:x + w]) confidence = 100 - confidence # Confidence starts backwards for some reason name = names[ID - 1] if (name == "unknown") and (confidence > 50): # confident = True if emoji == "suspicious": keyboard.press(left) keyboard.release(left) sleep(.75) keyboard.press(right) keyboard.release(right) elif emoji == "happy": keyboard.press(right) keyboard.release(right) sleep(.75) keyboard.press(right) keyboard.release(right) emoji = "suspicious" recognized.append("unknown") self.speak_dialog('unknown') elif (confidence > 15): # confident = True if name not in recognized: if emoji == "suspicious": keyboard.press(right) keyboard.release(right) emoji = "happy" elif emoji == "happy": keyboard.press(right) keyboard.release(right) sleep(.75) keyboard.press(left) keyboard.release(left) recognized.append(name) #dialog = id + '.face' response = {'name': name} self.speak_dialog('recognized', data=response) if not r_name == 'i': if r_name in recognized: confident = True else: if emoji == "happy": sleep(.5) keyboard.press(left) keyboard.release(left) emoji = "suspicious" elif len(recognized) != 0 and len(recognized) >= len(faces): count += 1 if (count >= 50): confident = True else: count = 0 cam.release() except Exception as e: print(e) keyboard.press(Key.esc) keyboard.release(Key.esc) os.system("wmctrl -a \"start-mycroft.sh debug\"") os.system("wmctrl -a \"craghack@Cyclops: ~/mycroft-core\"") if not remind: sleep(1) keyboard.press(Key.esc) keyboard.release(Key.esc) os.system("wmctrl -a \"start-mycroft.sh debug\"") os.system("wmctrl -a \"craghack@Cyclops: ~/mycroft-core\"") return confident
import numpy as np import cv2 import sys from PyQt5 import QtGui, QtCore, QtWidgets from threading import Thread from pymouse import PyMouse __author__ = 'rbanalagay' MOUSE = PyMouse() OFFSET = 200 COLOR = (0, 0, 255) BOX_SIZE = 500 CIRCLE_RADIUS = 30 cap = cv2.VideoCapture(0) BOX_CORNERS = ((OFFSET, OFFSET), (OFFSET + BOX_SIZE, OFFSET), (OFFSET, OFFSET + BOX_SIZE), (OFFSET + BOX_SIZE, OFFSET + BOX_SIZE)) KERNEL = np.ones((10, 10), np.uint8) def sort_corners(corners): center = np.zeros(2) for corner in corners: center += np.array(corner) center /= 4.0 top_points = []
def simulate_click_pc(): m = PyMouse() m.click(150, 650, 1)
def initializeParameters(self): with open('./.pathToAP', 'r') as textFile: self.pathToAP = textFile.readline() sys.path.append(self.pathToAP) from reader import reader reader = reader() reader.readParameters() parameters = reader.getParameters() for item in parameters: try: setattr(self, item[:item.find('=')], int(item[item.find('=') + 1:])) except ValueError: setattr(self, item[:item.find('=')], item[item.find('=') + 1:]) self.colorlegend = { '0': '#E5D9D9', '1': '#5545EA', '2': '#B229B7', '3': '#13CE1A', '4': '#CE1355', '5': '#F9F504', '6': '#FF7504', '7': '#FF0404', '8': '#000000' } self.options = ['restart', 'exit'] self.winstate = False self.move_info = False self.revert = False self.gamesize = (4, 5) #do ustalania self.indexes = np.arange(1, self.gamesize[0] * self.gamesize[1] / 2 + 1) self.game = memory.Memory_game(self.gamesize) self.labels = self.game.displayfield.flatten() self.numberOfRows = self.gamesize[0] + 1 self.numberOfColumns = self.gamesize[1] self.flag = 'row' self.pressFlag = False self.rowIteration = 0 self.columnIteration = 0 self.countRows = 0 self.countColumns = 0 self.maxNumberOfColumns = 2 self.maxNumberOfRows = 2 self.numberOfPresses = 1 self.subSizerNumber = 0 if self.control != 'tracker': self.mouseCursor = PyMouse() self.mousePosition = self.winWidth - 8 - self.xBorder, self.winHeight - 8 - self.yBorder if self.switchSound.lower() != 'off' or self.pressSound.lower( ) != 'off': mixer.init() self.switchingSound = mixer.Sound(self.pathToAP + '/sounds/switchSound.ogg') self.pressingSound = mixer.Sound(self.pathToAP + '/sounds/pressSound.ogg') self.SetBackgroundColour('black') self.path = self.pathToAP
import win32gui import subprocess import time import win32api import win32con from pymouse import PyMouse from pykeyboard import PyKeyboard import os import sys from PyQt5.QtWidgets import QApplication, QMainWindow m = PyMouse() # 创建鼠标实例 k = PyKeyboard() # 创建键盘实例 def qq_load(account, password, qq_exe): qq_exe = qq_exe subprocess.Popen([qq_exe]) # 用subprocess模块打开qq程序 time.sleep(3) # 给qq留点启动时间 handle = win32gui.FindWindow(None, 'QQ') # 获取窗口的句柄,参数1:类名,参数2:标题 left, top, right, bottom = win32gui.GetWindowRect(handle) print(left, top, right, bottom) time.sleep(2) new_x = int(left + (right - left) / 2) - 70 # 账号输入框坐标 new_y = int(top + (bottom - top) / 2) + 10 # 账号输入框坐标 m.click(new_x, new_y, 1, 2) # 移动鼠标到账号输入框并按下鼠标 # 模拟键盘输入字符串 print('%s 正在登陆……' % account) time.sleep(3)
def __init__(self): self.mouse = PyMouse() self.pykeybord = PyKeyboard()
def test(): m = PyMouse() already_clicked = False with mss.mss() as screen: original_bar = { "left": 60, "top": 880, "width": 205 + (635 - 60), "height": 200, } bar = { "left": 60, "top": 880, "width": 205 + (635 - 60), "height": 200 } start_time = time() time_last_clicked = -inf iterations = 500 (resample_width, resample_height) = (40, 25) # (resample_width, resample_height) = (400, 5) # (resample_width, resample_height) = (300, 2) acc_map = dict() # accumulative sum of top and left neighbours first_click_complete = False mouse_y = bar["top"] + bar["height"] for iteration in range(0, iterations): # while True: image = grab_image(screen, bar) resampled_image = resize_image(image, resample_width, resample_height) update_accumulative_map(resampled_image, acc_map) acc_x_position, confidence = search_for_ball( acc_map, resample_width, resample_height) mouse_x = convert_to_mouse_x_position( acc_x_position, resample_width, bar["left"], bar["left"] + bar["width"], ) confidence_threshold = 0.05 confidence_threshold = 0.03 confidence_threshold = 0.14424235 confidence_threshold = 0.05 if confidence >= confidence_threshold: pass # print(mouse_x, confidence) else: pass # print("NOPE") if (confidence >= confidence_threshold) and ( time() - time_last_clicked > 0.1): # if not first_click_complete: # bar["top"] -= 140 # mouse_y -= 0 # first_click_complete = True m.click(mouse_x, mouse_y) time_last_clicked = time() print("clicked:", (mouse_x, mouse_y)) end_time = time() duration = end_time - start_time print( "duration:", duration, " iterations/sec:", float(iterations) / duration, )
def command(com, args, tw_user=None): global e if (e.is_set()): # System Tray Icon exit must trigger this exit(0) if not com or not isinstance(com, str): return False original_com = com global inactive global user_full_name global user_prefix global config_file userin.twitter_user = tw_user com = com.upper() com = re.sub(r'([^\s\w]|_)+', '', com).strip() print("You: " + com) if inactive and com not in ("DRAGONFIRE", "DRAGON FIRE", "WAKE UP", "HEY"): return True if USER_ANSWERING['status']: if com.startswith("FIRST") or com.startswith( "THE FIRST") or com.startswith("SECOND") or com.startswith( "THE SECOND") or com.startswith( "THIRD") or com.startswith("THE THIRD"): USER_ANSWERING['status'] = False selection = None if com.startswith("FIRST") or com.startswith("THE FIRST"): selection = 0 elif com.startswith("SECOND") or com.startswith("THE SECOND"): selection = 1 elif com.startswith("THIRD") or com.startswith("THE THIRD"): selection = 2 if USER_ANSWERING['for'] == 'wikipedia': with nostderr(): search_query = USER_ANSWERING['options'][selection] try: wikiresult = wikipedia.search(search_query) if len(wikiresult) == 0: userin.say( "Sorry, " + user_prefix + ". But I couldn't find anything about " + search_query + " in Wikipedia.") return True wikipage = wikipedia.page(wikiresult[0]) wikicontent = "".join([ i if ord(i) < 128 else ' ' for i in wikipage.content ]) wikicontent = re.sub(r'\([^)]*\)', '', wikicontent) userin.define_and_execute( ["sensible-browser", wikipage.url], search_query) userin.say(wikicontent) return True except requests.exceptions.ConnectionError: userin.define_and_execute( [" "], "Wikipedia connection error.") userin.say( "Sorry, " + user_prefix + ". But I'm unable to connect to Wikipedia servers.") return True except Exception: return True if com in ("DRAGONFIRE", "DRAGON FIRE", "WAKE UP", "HEY"): tts_kill() inactive = False userin.define([" "], " ") userin.say(choice([ "Yes, " + user_prefix + ".", "Yes. I'm waiting.", "What is your order?", "Ready for the orders!", user_prefix + ", tell me your wish." ])) elif "GO TO SLEEP" == com: tts_kill() inactive = True userin.define_and_execute( ["echo"], "Dragonfire deactivated. To reactivate say 'Dragonfire!' or 'Wake Up!'") userin.say("I'm going to sleep") elif com in ("ENOUGH", "SHUT UP"): print("Dragonfire quiets.") tts_kill() elif VirtualAssistant.exact_match(com): return True # the request has been handled elif VirtualAssistant.in_match(com): return True # the request has been handled elif ("SEARCH" in com or "FIND" in com) and VirtualAssistant.search_command(com): pass # the request has been handled elif com in ("MY TITLE IS LADY", "I'M A LADY", "I'M A WOMAN", "I'M A GIRL"): tts_kill() config_file.update({ 'gender': 'female' }, Query().datatype == 'gender') user_prefix = "My Lady" userin.define([" "], " ") userin.say("Pardon, " + user_prefix + ".") elif com in ("MY TITLE IS SIR", "I'M A MAN", "I'M A BOY"): tts_kill() config_file.update({ 'gender': 'male' }, Query().datatype == 'gender') user_prefix = "Sir" userin.define([" "], " ") userin.say("Pardon, " + user_prefix + ".") elif com.startswith("CALL ME "): tts_kill() callme_config = config_file.search(Query().datatype == 'callme') if callme_config: config_file.update({ 'title': original_com[8:].lower() }, Query().datatype == 'callme') else: config_file.insert({ 'datatype': 'callme', 'title': original_com[8:].lower() }) user_prefix = original_com[8:].lower().encode("utf8") userin.define([" "], " ") userin.say("Pardon, " + user_prefix + ".") # only for The United States today but prepared for all countries. Also # only for celsius degrees today. --> by Radan Liska :-) elif "WHAT" in com and "TEMPERATURE" in com: tts_kill() capture = re.search( "(?:WHAT IS|WHAT'S) THE TEMPERATURE (?:IN|ON|AT|OF)? (?P<city>.*)", com) if capture: city = capture.group('city') owm = pyowm.OWM("16d66c84e82424f0f8e62c3e3b27b574") reg = owm.city_id_registry() weather = owm.weather_at_id( reg.ids_for(city)[0][0]).get_weather() fmt = "The temperature in {} is {} degrees celsius" msg = fmt.format(city, weather.get_temperature('celsius')['temp']) userin.define_and_execute([" "], msg) userin.say(msg) elif "FILE MANAGER" in com or "OPEN FILES" == com: tts_kill() userin.define_and_execute(["dolphin"], "File Manager") # KDE neon userin.define_and_execute(["pantheon-files"], "File Manager") # elementary OS userin.define_and_execute(["nautilus", "--browser"], "File Manager") # Ubuntu userin.say("File Manager") elif "OPEN CAMERA" == com: tts_kill() userin.define_and_execute(["kamoso"], "Camera") # KDE neon userin.define_and_execute(["snap-photobooth"], "Camera") # elementary OS userin.define_and_execute(["cheese"], "Camera") # Ubuntu userin.say("Camera") elif "OPEN CALENDAR" == com: tts_kill() userin.define_and_execute(["korganizer"], "Calendar") # KDE neon userin.define_and_execute(["maya-calendar"], "Calendar") # elementary OS userin.define_and_execute(["orage"], "Calendar") # Ubuntu userin.say("Calendar") elif "OPEN CALCULATOR" == com: tts_kill() userin.define_and_execute(["kcalc"], "Calculator") # KDE neon userin.define_and_execute(["pantheon-calculator"], "Calculator") # elementary OS userin.define_and_execute(["gnome-calculator"], "Calculator") # Ubuntu userin.say("Calculator") elif "SOFTWARE CENTER" in com: tts_kill() userin.define_and_execute(["plasma-discover"], "Software Center") # KDE neon userin.define_and_execute( ["software-center"], "Software Center") # elementary OS & Ubuntu userin.say("Software Center") elif com.startswith("KEYBOARD "): tts_kill() with nostdout(): with nostderr(): k = PyKeyboard() for character in original_com[9:]: k.tap_key(character) k.tap_key(" ") elif com == "ENTER": tts_kill() with nostdout(): with nostderr(): k = PyKeyboard() k.tap_key(k.enter_key) elif com == "NEW TAB": tts_kill() with nostdout(): with nostderr(): k = PyKeyboard() k.press_keys([k.control_l_key, 't']) elif com == "SWITCH TAB": tts_kill() with nostdout(): with nostderr(): k = PyKeyboard() k.press_keys([k.control_l_key, k.tab_key]) elif com in ("CLOSE", "ESCAPE"): tts_kill() with nostdout(): with nostderr(): k = PyKeyboard() k.press_keys([k.control_l_key, 'w']) k.tap_key(k.escape_key) elif com == "GO BACK": tts_kill() with nostdout(): with nostderr(): k = PyKeyboard() k.press_keys([k.alt_l_key, k.left_key]) elif com == "GO FORWARD": tts_kill() with nostdout(): with nostderr(): k = PyKeyboard() k.press_keys([k.alt_l_key, k.right_key]) elif com == "SCROLL LEFT": tts_kill() with nostdout(): with nostderr(): m = PyMouse() m.scroll(0, -5) elif com == "SCROLL RIGHT": tts_kill() with nostdout(): with nostderr(): m = PyMouse() m.scroll(0, 5) elif com == "SCROLL UP": tts_kill() with nostdout(): with nostderr(): m = PyMouse() m.scroll(5, 0) elif com == "SCROLL DOWN": tts_kill() with nostdout(): with nostderr(): m = PyMouse() m.scroll(-5, 0) elif com in ("PLAY", "PAUSE", "SPACEBAR"): tts_kill() with nostdout(): with nostderr(): k = PyKeyboard() k.tap_key(" ") elif "SHUTDOWN THE COMPUTER" == com: tts_kill() userin.define(["sudo", "poweroff"], "Shutting down") userin.say("Shutting down") userin.execute(3) elif com in ("GOODBYE", "GOOD BYE", "BYE BYE", "SEE YOU LATER", "CATCH YOU LATER"): tts_kill() userin.define([" "], " ") userin.say("Goodbye, " + user_prefix) # raise KeyboardInterrupt thread.interrupt_main() elif "WIKIPEDIA" in com and ("SEARCH" in com or "FIND" in com): tts_kill() with nostderr(): capture = re.search( "(?:SEARCH|FIND) (?P<query>.*) (?:IN|ON|AT|USING)? WIKIPEDIA", com) if capture: search_query = capture.group('query') try: wikiresult = wikipedia.search(search_query) if len(wikiresult) == 0: userin.say("Sorry, " + user_prefix + ". But I couldn't find anything about " + search_query + " in Wikipedia.") return True wikipage = wikipedia.page(wikiresult[0]) wikicontent = "".join([ i if ord(i) < 128 else ' ' for i in wikipage.content ]) wikicontent = re.sub(r'\([^)]*\)', '', wikicontent) userin.define_and_execute( ["sensible-browser", wikipage.url], search_query) userin.say(wikicontent) except requests.exceptions.ConnectionError: userin.define_and_execute( [" "], "Wikipedia connection error.") userin.say( "Sorry, " + user_prefix + ". But I'm unable to connect to Wikipedia servers." ) except wikipedia.exceptions.DisambiguationError as disambiguation: USER_ANSWERING['status'] = True USER_ANSWERING['for'] = 'wikipedia' USER_ANSWERING['reason'] = 'disambiguation' USER_ANSWERING['options'] = disambiguation.options[:3] notify = "Wikipedia disambiguation. Which one of these you meant?:\n - " + \ disambiguation.options[0] message = user_prefix + ", there is a disambiguation. Which one of these you meant? " + \ disambiguation.options[0] for option in disambiguation.options[1:3]: message += ", or " + option notify += "\n - " + option notify += '\nSay, for example: "THE FIRST ONE" to choose.' userin.define_and_execute([" "], notify) userin.say(message) except BaseException: pass elif "YOUTUBE" in com and ("SEARCH" in com or "FIND" in com): tts_kill() with nostdout(): with nostderr(): capture = re.search( "(?:SEARCH|FIND) (?P<query>.*) (?:IN|ON|AT|USING)? YOUTUBE", com) if capture: search_query = capture.group('query') info = youtube_dl.YoutubeDL({}).extract_info( 'ytsearch:' + search_query, download=False, ie_key='YoutubeSearch') if len(info['entries']) > 0: youtube_title = info['entries'][0]['title'] youtube_url = "https://www.youtube.com/watch?v=%s" % ( info['entries'][0]['id']) userin.define(["sensible-browser", youtube_url], youtube_title) youtube_title = "".join([ i if ord(i) < 128 else ' ' for i in youtube_title ]) else: youtube_title = "No video found, " + user_prefix + "." userin.define(" ", " ") userin.execute(0) userin.say(youtube_title) time.sleep(5) k = PyKeyboard() k.tap_key(k.tab_key) k.tap_key(k.tab_key) k.tap_key(k.tab_key) k.tap_key(k.tab_key) k.tap_key('f') elif ("GOOGLE" in com or "WEB" in com) and "IMAGE" not in com and ("SEARCH" in com or "FIND" in com): tts_kill() with nostdout(): with nostderr(): capture = re.search( "(?:SEARCH|FIND) (?P<query>.*) (?:IN|ON|AT|USING)? (?:GOOGLE|WEB)?", com) if capture: search_query = capture.group('query') tab_url = "http://google.com/?#q=" + search_query userin.define_and_execute( ["sensible-browser", tab_url], search_query) userin.say(search_query) elif ("GOOGLE" in com or "WEB" in com) and "IMAGE" in com and ("SEARCH" in com or "FIND" in com): tts_kill() with nostdout(): with nostderr(): capture = re.search("(?:SEARCH IMAGES OF|FIND IMAGES OF|SEARCH|FIND) " "(?P<query>.*) (?:IN|ON|AT|USING)? " "(?:GOOGLE|WEB|GOOGLE IMAGES|WEB IMAGES)?", com) if capture: search_query = capture.group('query') tab_url = "http://google.com/?#q=" + search_query + "&tbm=isch" userin.define(["sensible-browser", tab_url], search_query) userin.execute(0) userin.say(search_query) else: tts_kill() arithmetic_response = arithmeticParser(com) if arithmetic_response: userin.define([" "], " ") userin.say(arithmetic_response) else: learn_response = learn_.respond(original_com) if learn_response: userin.define([" "], " ") userin.say(learn_response) else: if not omniscient_.respond(original_com, not args["silent"], userin, user_prefix, args["server"]): dc_response = dc.respond(original_com, user_prefix) if dc_response: userin.define([" "], " ") userin.say(dc_response)
def __init__(self): super().__init__() self.mouse = PyMouse() self.keyboard = PyKeyboard() self.connections = []
def __init__(self): self.m = PyMouse() self.k = PyKeyboard()
def mouse_shoot1(x=960,y=540):#移动鼠标至位置(x,y)并射击一次 PyMouse().click(x,y)
def handle_home(self, message): keyboard = Controller() m = PyMouse() right = Key.right left = Key.left reminder = message.data['reminder'] reminder = (' ' + reminder).replace(' my ', ' your ').strip() reminder = (' ' + reminder).replace(' our ', ' your ').strip() # self.settings['wigh_reminders'] = [] # return if 'wigh_reminders' in self.settings: self.settings['wigh_reminders'].append(reminder) if len(self.settings['wigh_reminders']) > 1: self.speak_dialog('home') return else: self.settings['wigh_reminders'] = [reminder] # reminders = [] # reminders.append(message.data['reminder']) # self.speak_dialog('home') try: recognizer = cv2.face.LBPHFaceRecognizer_create() recognizer.read('/home/craghack/Documents/camera/trainer.yml') faceCascade = cv2.CascadeClassifier( "/home/craghack/Documents/camera/haarcascade_frontalface_default.xml" ) #font = cv2.FONT_HERSHEY_SIMPLEX names = ['unknown'] ids = open("/home/craghack/Documents/camera/ids.txt", "r") for line in ids: names.append(line.split(' ')[2].rstrip()) ids.close() # Initialize and start realtime video capture cam = cv2.VideoCapture(0) #cam.set(3, 640) # set video widht #cam.set(4, 480) # set video height minW = 0.1 * cam.get(3) minH = 0.1 * cam.get(4) emoji = None cmd = subprocess.check_output( "wmctrl -l | grep \".png\" | cut -b 23-", shell=True).decode("utf-8").rstrip() if not cmd == "": emoji = cmd.split('.')[0] os.system("wmctrl -a \"" + cmd + "\"") else: emoji = "suspicious" os.system( "xdg-open /home/craghack/Downloads/emojis/suspicious.png && sleep 1" ) sleep(.25) m.click(350, 460) sleep(.25) m.move(799, 479) if emoji == "happy": keyboard.press(left) keyboard.release(left) emoji = "suspicious" elif emoji == "suprised": keyboard.press(right) keyboard.release(right) emoji = "suspicious" # self.speak_dialog('detection.face') self.speak_dialog('home') confident = False count = 0 recognized = [] while not confident: rval, frame = cam.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(int(minW), int(minH))) for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) id, confidence = recognizer.predict(gray[y:y + h, x:x + w]) confidence = 100 - confidence # Confidence starts backwards for some reason id = names[id - 1] if (id == "unknown") and (confidence > 50): # confident = True if emoji == "suspicious": keyboard.press(left) keyboard.release(left) sleep(.75) keyboard.press(right) keyboard.release(right) elif emoji == "happy": keyboard.press(right) keyboard.release(right) sleep(.75) keyboard.press(right) keyboard.release(right) recognized.append("unknown") self.speak_dialog('seen.face') elif (confidence > 35): # confident = True if id not in recognized: if emoji == "suspicious": keyboard.press(right) keyboard.release(right) emoji = "happy" elif emoji == "happy": keyboard.press(right) keyboard.release(right) sleep(.75) keyboard.press(left) keyboard.release(left) recognized.append(id) dialog = id + '.face' self.speak_dialog(dialog) text = "" for reminder in self.settings['wigh_reminders']: text += reminder + ", and " # self.settings['wigh_reminders'].remove(reminder) self.settings['wigh_reminders'] = [] response = {'reminder': text[:-6]} self.speak_dialog('remind', data=response) if len(recognized) != 0 and len(recognized) >= len(faces): count += 1 if (count >= 50): confident = True else: count = 0 cam.release() sleep(1) keyboard.press(Key.esc) keyboard.release(Key.esc) os.system("wmctrl -a start-mycroft.sh debug") except: self.settings['wigh_reminders'] = []
def mouse_shootn(x=960,y=540,t=1):#移动鼠标至位置(x,y)并射击t秒 PyMouse().press(x,y) time.sleep(t) PyMouse().release(960,540)#在中心位置处停止射击
def irMouse(): """Open webcam, track IR movements, and move the mouse pointer accordingly. Returns: 0 on success, -1 otherwise """ retval = 0 # Open up a webcam capture capture = cv2.VideoCapture(0) # Reduce video size for faster processing capture.set(cv2.CAP_PROP_FRAME_WIDTH, 600) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 450) # Create windows cv2.namedWindow('Hue', flags=cv2.WINDOW_AUTOSIZE) cv2.namedWindow('Saturation', flags=cv2.WINDOW_AUTOSIZE) cv2.namedWindow('Value', flags=cv2.WINDOW_AUTOSIZE) cv2.namedWindow('Composite', flags=cv2.WINDOW_AUTOSIZE) cv2.namedWindow('Tracking', flags=cv2.WINDOW_AUTOSIZE) # Spread out the windows a bit so they're not directly on top of each other cv2.moveWindow('Tracking', 0, 0) cv2.moveWindow('Composite', 400, 0) cv2.moveWindow('Value', 0, 340) cv2.moveWindow('Hue', 400, 340) cv2.moveWindow('Saturation', 800, 340) # Add trackbars to make on-the-fly testing easier. After you've found # values that work for your own environment, you'll probably want to change # the default values here cv2.createTrackbar('hmin', 'Hue', 80, 179, lambda *args: None) cv2.createTrackbar('hmax', 'Hue', 179, 179, lambda *args: None) cv2.createTrackbar('smin', 'Saturation', 190, 255, lambda *args: None) cv2.createTrackbar('smax', 'Saturation', 254, 255, lambda *args: None) cv2.createTrackbar('vmin', 'Value', 174, 255, lambda *args: None) cv2.createTrackbar('vmax', 'Value', 255, 255, lambda *args: None) mouse = PyMouse() screenSize = mouse.screen_size() newLoc = (0, 0) print("Running, press Esc to exit...") # Loop infinitely reading webcam data until user presses Escape while True: ret, frame = capture.read() frame = cv2.flip(frame, 1) if ret == True: # Convert capture to hue, saturation, value hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) hue, sat, val = cv2.split(hsv) # Get threshold values from trackbars hmin = cv2.getTrackbarPos('hmin', 'Hue') hmax = cv2.getTrackbarPos('hmax', 'Hue') smin = cv2.getTrackbarPos('smin', 'Saturation') smax = cv2.getTrackbarPos('smax', 'Saturation') vmin = cv2.getTrackbarPos('vmin', 'Value') vmax = cv2.getTrackbarPos('vmax', 'Value') # Apply thresholding values hthresh = cv2.inRange(np.array(hue), np.array(hmin), np.array(hmax)) sthresh = cv2.inRange(np.array(sat), np.array(smin), np.array(smax)) vthresh = cv2.inRange(np.array(val), np.array(vmin), np.array(vmax)) # AND value and hue composite = cv2.bitwise_and(vthresh, hthresh) # Do some morphological transformations to clean up image kernel = np.ones((5, 5), np.uint8) composite = cv2.dilate(composite, kernel, iterations=1) composite = cv2.morphologyEx(composite, cv2.MORPH_CLOSE, kernel) # Use big kernel for blurring argRad = 55 composite = cv2.GaussianBlur(composite, (argRad, argRad), 0) prevLoc = newLoc # Get the maximum location (_, _, _, newLoc) = cv2.minMaxLoc(composite) # Only proceed if we are NOT at the top left (i.e., default) corner if newLoc != (0, 0) and prevLoc != (0, 0): # Calculate x-axis and y-axis changes between prevLoc and newLoc delta = np.subtract(newLoc, prevLoc) # Calculate actual distance between prevLoc and newLoc distance = cv2.norm(newLoc, prevLoc) # Has the IR pointer moved a "reasonable" distance? If so, move the mouse pointer if distance > 3: if args.opt_verbose == True: print("IR pointer moved {0}".format(distance)) # Set the scale factor: bigger IR moves == bigger mouse moves if distance > 20: scaleFactor = 1.7 elif distance > 9: scaleFactor = 1.4 elif distance > 6: scaleFactor = 1.2 else: scaleFactor = 1.0 # Get the mouse pointer's current location curPtr = mouse.position() if args.opt_verbose == True: print("\tMouse pointer is currently at {0}".format( curPtr)) # Calculate the new mouse pointer location newPtrX = int(curPtr[0] - (delta[0] * distance * scaleFactor)) newPtrY = int(curPtr[1] + (delta[1] * distance * scaleFactor)) # Sanity check the new pointer location values if newPtrX < 0: newPtrX = 0 if newPtrX > screenSize[0]: newPtrX = screenSize[0] if newPtrY < 0: newPtrY = 0 if newPtrY > screenSize[1]: newPtrY = screenSize[1] # Move the mouse pointer mouse.move(newPtrX, newPtrY) # Show cursor movement if (newPtrX != 0 and newPtrY != 0): pag.moveTo(newPtrX, newPtrY) if args.opt_verbose == True: print("\tMoved mouse pointer to {0}, {1}".format( newPtrX, newPtrY)) # Draw circle around what we're tracking cv2.circle(frame, newLoc, 10, (128, 255, 0), 2) # Display results in windows #cv2.imshow('Hue', hthresh) #cv2.imshow('Saturation', sthresh) #cv2.imshow('Value', vthresh) cv2.imshow('Composite', composite) cv2.imshow('Tracking', frame) # Esc key pressed? if cv2.waitKey(5) & 0xff == 27: break else: print("Webcam capture failed!") retval = -1 break # End while loop print("Exiting...") cv2.destroyAllWindows() capture.release() return retval
def see_around(): PyMouse().move(1920,540)
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2020-01-19 17:02 # @Author : Jinnan Huang # @Site : # @File : ordinate.py # @Software: PyCharm from pymouse import PyMouse mou = PyMouse() a = mou.position() mou.move(3723, 750) print(a)
# -*- coding: utf-8 -*- import win32api, win32con import time from pymouse import PyMouse from pykeyboard import PyKeyboard print("程序启动成功,请确认玩课网视频播放页面处于最大化且可见状态。\n") mouse = PyMouse() keyboard = PyKeyboard() x_dim = win32api.GetSystemMetrics(win32con.SM_CXSCREEN) y_dim = win32api.GetSystemMetrics(win32con.SM_CYSCREEN) print("屏幕大小获取成功:X轴大小%d, Y轴大小%d。\n" % (x_dim, y_dim)) print("接下来需要确定“下一节”按钮、播放速度调整按钮、二倍速播放按钮的位置。\n") print("首先确定“下一节”按钮位置,请将鼠标移动到“下一节”按钮上,十秒后程序将自动读取按钮坐标。") for i in range(0, 10): rt = 10 - i print("%d" % rt) time.sleep(1) x_click, y_click = mouse.position() print("已确定“下一节”按钮位置:X轴位置%d, Y轴位置%d。\n" % (int(x_click), int(y_click))) print("接下来确定播放速度调整按钮位置,请将鼠标移动到播放速度调整按钮按钮上,十秒后程序将自动读取按钮坐标。") for i in range(0, 10): rt = 10 - i print("%d" % rt) time.sleep(1) x_double_pre, y_double_pre = mouse.position() print("已确定播放速度调整按钮位置:X轴位置%d, Y轴位置%d。\n" % (int(x_double_pre), int(y_double_pre)))
def GetPosition(): while 1 == 1: m = PyMouse() tmp = m.position() print(tmp) time.sleep(2)