def helloworld():
    from xdo import Xdo
    xdo = Xdo()
    win_id = xdo.select_window_with_click()

    from pprint import pprint
    # pprint(win_id)
    pprint(xdo.get_window_name(win_id))

    win_id = xdo.search_windows('.+Chromium')
    pprint(win_id)
示例#2
0
    def cmd_openApp(self, fn, name=None):
        """
        Open desktop file with "dex" command, then try to focus the window
        """
        subprocess.Popen(['dex', fn], close_fds=True)

        if name:
            # Hopefully the app has started by now
            time.sleep(3)

            # Try to bring it to the front
            #
            # Note: we can't use the pid from the Popen since
            # that's the pid of dex, not the program we started
            xdo = Xdo()
            for windowId in xdo.search_windows(winname=name.encode("utf-8")):
                xdo.activate_window(windowId)
示例#3
0
async def term(editor, args):
    command = args.command
    if command:
        command += "\n"
    window_name = r"^Tilix:".encode("utf-8")
    xdo = Xdo()
    editor_window = xdo.get_active_window()
    windows = xdo.search_windows(window_name, only_visible=True)
    if not windows:
        return error("Tilix window not found")
    term = windows[0]
    xdo.focus_window(term)
    if command:
        xdo.enter_text_window(term, command.encode("utf-8"))
        xdo.focus_window(editor_window)
    else:
        xdo.raise_window(term)
    return result()
示例#4
0
#!/usr/bin/env python3

import os, sys
from pprint import pprint

from xdo import Xdo

xdo = Xdo()

# win_id = xdo.select_window_with_click()
# print(win_id)
print(xdo.search_windows(winclass='Chrome'.encode(), only_visible=True))

# xdo.enter_text_window(win_id, 'Python rocks!')
示例#5
0
class EggnoggGym():
    """
    Class for the environement
    
    Args:
        None
        
    Attributes:
        monitor (dict): the coodinates of the screen :top, left, width, height
        sct (func): <function mss.factory.mss(**kwargs)>
    """    
    def __init__(self, need_pretrained, device):
        # xwininfo -name eggnoggplus
        self.monitor = {"top": 70, "left": 64, "width": 1440, "height":960}
        self.sct = mss()
        self.resize_factor = self.monitor['width']//240 #width 240, height 160
        self.pil2tensor = transforms.ToTensor()
        self.device = device


        self.delay = int(130e3)
        self.xdo = Xdo()
        self.win_id = max(self.xdo.search_windows(winname=b'eggnoggplus'))

        #swap to window
        self.xdo.activate_window(self.win_id)
        self.xdo.send_keysequence_window_down(self.win_id, b'v')
        self.xdo.send_keysequence_window_up(self.win_id, b'v')

        #init observation network
        self.observation = Observation(need_pretrained=need_pretrained).to(device)

        #init noop prev_action
        self.prev_action = [[2,2], #x_action
                            [2,2], #y_action
                            [False, False], #jump_action
                            [False, False]] #stab_action

        #grab first 4 frames
        self.states = self.get_single_state()[0]
        for _ in range(3):
            self.states = torch.cat((self.states, self.get_single_state()[0]), dim=2) # pylint: disable=no-member



    def act(self, action_tensors):
        #Transforms action_tensor to string for xdo
        #coord: 0 -> left, right, noop (right,left,noop for player2)
        #       1 -> up, down, noop
        #       2 -> jump press
        #       3 -> stab press
        x_action = Categorical(action_tensors[0]).sample()
        y_action = Categorical(action_tensors[1]).sample()
        
        jump_action = action_tensors[2] < torch.rand((2,1), device=self.device)# pylint: disable=no-member
        stab_action = action_tensors[3] < torch.rand((2,1), device=self.device)# pylint: disable=no-member

        string_press = []
        string_lift = []

        #x action
        if x_action[0] == 0:
            string_press.append('q')
        elif x_action[0] == 1:
            string_press.append('d')
        elif x_action[0] == 2 or x_action[0] != self.prev_action[0][0]:
            string_lift.extend(['q','d'])

        if x_action[1] == 0:
            string_press.append('right') #reversed
        elif x_action[1] == 1:
            string_press.append('left') #reversed
        elif x_action[1] == 2 or x_action[1] != self.prev_action[0][1]:
            string_lift.extend(['left','right'])

        #y action
        if y_action[0] == 0:
            string_press.append('z')
        elif y_action[0] == 1:
            string_press.append('s')
        elif y_action[0] == 2 or y_action[0] != self.prev_action[1][0]:
            string_lift.extend(['z','s'])

        if y_action[1] == 0:
            string_press.append('up')
        elif y_action[1] == 1:
            string_press.append('down')
        elif y_action[1] == 2 or y_action[1] != self.prev_action[1][1]:
            string_lift.extend(['up','down'])
        
        #jump action
        if jump_action[0]:
            string_press.append('v')
        else:
            string_lift.append('v')

        if jump_action[1]:
            string_press.append('n')
        else:
            string_lift.append('n')
        
        #stab action
        if stab_action[0]:
            string_press.append('b')
        else:
            string_lift.append('b')
        
        if stab_action[1]:
            string_press.append(',')
        else:
            string_lift.append(',')
        
        #update previous actions
        self.prev_action = [x_action, y_action, jump_action, stab_action]

        #send inputs to eggnogg
        for lift in string_lift:
            pyautogui.keyUp(lift, _pause=False)
        for press in string_press:
            pyautogui.keyDown(press, _pause=False)



    def get_single_state(self):
        with self.sct:
            sct_img = self.sct.grab(self.monitor)
    
            # Create the Image
            state = Image.frombytes("RGB",
                                  sct_img.size,
                                  sct_img.bgra,
                                  "raw",
                                  "BGRX")
            state = state.resize((state.size[0]//self.resize_factor,
                              state.size[1]//self.resize_factor))
            state = self.pil2tensor(state)

            r1 = r2 = 0
            is_terminal = False
            #p1 wins, red water, bottom right
            if state[0, state.shape[1]-1, state.shape[2]-1] == 1.0:
                is_terminal = True
                r1 = 1.0
                r2 = -1.0
            #p2 wins, green water, bottom left
            elif state[1, state.shape[1]-1, 0] == 1.0:
                is_terminal = True
                r1 = -1.0
                r2 = 1.0
            
            state = state.unsqueeze(0)
            #b,3,320,480
            state = state.unsqueeze(2)
            #b,3,1,320,480

            #flip image and swap red and green channels
            state_inversed = state.flip([-1])[:,[1,0,2],:,:,:]

            #cat state and inversed on batch dimension
            state = torch.cat((state, state_inversed), dim=0)# pylint: disable=no-member
        return state.to(self.device).detach_(), (r1, r2), is_terminal


    def reset(self):
        pyautogui.write('zqsdvbn,')
        pyautogui.keyUp('up')
        pyautogui.keyUp('left')
        pyautogui.keyUp('down')
        pyautogui.keyUp('right')

        pyautogui.keyDown('f5')
        pyautogui.keyUp('f5')

    def step(self, action_tensor):
        #remove oldest state
        self.states = self.states.split([1,3], dim=2)[1]
        #2,3,3,320,480

        #act
        self.act(action_tensor)

        #get state
        state, reward, is_terminal = self.get_single_state()

        self.states = torch.cat((self.states, state), dim=2)# pylint: disable=no-member
        #2,3,4,320,480
        with torch.autograd.no_grad():
            obs = self.observation(self.states)

        return obs, reward, is_terminal
示例#6
0
root_window = d.screen().root
primary_output_id = randr.get_output_primary(root_window).output
for output_id in randr.get_screen_resources(root_window).outputs:
    o = randr.get_output_info(root_window, output_id, 0)
    if o.crtc != 0:
        c = randr.get_crtc_info(root_window, o.crtc, 0)
        monitors_list[o.name] = { 'width': c.width, 'height': c.height, 'x': c.x, 'y': c.y, 'primary': False }
        if output_id == primary_output_id:
            monitors_list[o.name]['primary'] = True
            primary_monitor = o.name
    else:
        monitors_list_inactive.append(o.name)

# list windows
xdo = Xdo()
windows_list = xdo.search_windows(winname = b'.*')
windows_list.sort()

# getting informations about monitor / window to capture
monitor = None
window = None
if args.source:
    try:
        source_int = int(args.source)
    except:
        source_int = None
    if args.source == 'list-mon':
        print(list_monitors(monitors_list))
        sys.exit(0)
    elif args.source == 'list-win':
        print(list_windows(windows_list))
示例#7
0
def start_ns(url):

    # First, create the copy of the hard drive
    if DEBUG:
        print "creating the HD"

    # kill any previous binaries
    for proc in psutil.process_iter():
        if proc.name() == "previous":
            proc.kill()

    tar = tarfile.open(PRISTINE_HARD_DISK, 'r:gz')
    tar.extractall(ACTUAL_HARD_DISK_DIR)

    if DEBUG:
        print "done creating the HD"

    with pyvirtualdisplay.smartdisplay.SmartDisplay(visible=0,
                                                    size=(1300,
                                                          900)) as v_display:
        if DEBUG:
            print v_display
            print v_display.display

        with easyprocess.EasyProcess(PREVIOUS_BINARY) as previous:

            time.sleep(.50)
            xdo = Xdo()

            done = False
            while not done:
                wins = xdo.search_windows('Previous')
                # check if alive
                if not previous.is_alive():
                    print "Error, couldn't boot emulator"
                    return
                if wins:
                    done = True
                    window = wins[0]
                else:
                    time.sleep(.50)

            # Get the relative X and Y for the window
            loc = xdo.get_window_location(window)

            print "Booting up"

            # wait until we get to the login screen
            wait_until_login_screen(xdo, loc, window, v_display)

            print "Finally booted"

            current_image = v_display.waitgrab()
            send_screen_shot(current_image)

            log_into_ns(xdo, window)

            print "Waiting to log in"

            # wait for the screen to load
            wait_until_login_desktop(xdo, loc, window, v_display)

            print "Logged in"

            current_image = v_display.waitgrab()
            send_screen_shot(current_image)

            # Send the data
            send_url_to_ns(url)

            send_copy_command(xdo, window)

            time.sleep(.50)

            current_image = v_display.waitgrab()
            send_screen_shot(current_image)

            print "Starting the old dog"

            send_start_www_command(xdo, window)

            # Wait until WWW is ready
            wait_until_www_ready(xdo, loc, window, v_display)

            print "That old dog is ready"

            current_image = v_display.waitgrab()
            send_screen_shot(current_image)

            # Close out the terminal
            # Command-q is the keyboard shortcut to quit
            xdo.send_keysequence_window(window, "Super_L+q")
            time.sleep(.50)
            select_open_document(xdo, loc, window)

            # Wait until Document Inspector is open
            print "Sending your input"
            wait_until_document_inspector_open(xdo, loc, window, v_display)

            current_image = v_display.waitgrab()
            send_screen_shot(current_image)

            move_to_address_field(xdo, loc, v_display)
            time.sleep(.50)
            xdo.click_window(window, 1)

            current_image = v_display.waitgrab()
            send_screen_shot(current_image)

            # Paste that shit!
            # Command-v is the keyboard shortcut
            xdo.send_keysequence_window(window, "Super_L+v")
            time.sleep(.50)

            current_image = v_display.waitgrab()
            send_screen_shot(current_image)

            print "That old dog can still visit URLs!"

            # if DEBUG:
            #     input("Wait for debugging")

            # Open it!
            xdo.send_keysequence_window(window, "Return")

            print "tbl would be proud"

            # Wait for 5 seconds, taking screenshots
            for i in xrange(5):
                time.sleep(1)
                current_image = v_display.waitgrab()
                send_screen_shot(current_image)

        # Crazy hack b/c something is sending an error message:
        # "XIO:  fatal IO error 0 (Success) on X server ":1001""
        # Which is freaking crazy, doesn't make any
        devnull = open(os.devnull, 'w')
        os.dup2(devnull.fileno(), 2)
示例#8
0
idle_counter = 0
res_x = 124
res_y = 50
cords = {
    'top': 70,
    'left': 5,
    'width': display_width,
    'height': display_height
}
win_name = "GAMEWINDOWNAME"
predict = False
home_folder = "/home/user0/autodrive-game/"
sample_count = 0

# Get window id for sending key events to the application window
print(len(xdo.search_windows(win_name.encode())))
win_id = xdo.search_windows(win_name.encode())[0]
print("WIN ID:" + str(win_id))

# Initialize display for monitoring
gameDisplay = pygame.display.set_mode((int(3 * res_x), int(res_y)))
pygame.display.set_caption('Driving')

# Key mappings
joy_keys = []
joy_keys.append(0)  #accel
joy_keys.append(0)  #break
joy_keys.append(0)  #sharp turn (not used for training)
joy_keys.append(0)  #left
joy_keys.append(0)  #right
示例#9
0
from xdo import Xdo
import matplotlib.pyplot as plt
import pyautogui

delay = int(130e3)
xdo = Xdo()
win_id = max(xdo.search_windows(winname=b'eggnoggplus'))

xdo.activate_window(win_id)

xdo.send_keysequence_window_down(win_id, b'v', 0)
xdo.send_keysequence_window_up(win_id, b'v', 0)
"""
plt.pause(2)
xdo.send_keysequence_window_down(win_id, b'a+d+Left+Right+w+s+Up+Down+v+n+comma',0)
print(1)
xdo.send_keysequence_window_up(win_id, b'a+d+Left+Right+w+s+Up+Down+v+n+comma',delay)
print(2)
#xdo.send_keysequence_window_up(win_id, b'a+d+Left+Right+w+s+Up+Down+v+n+comma',0)
#plt.pause(2)
"""
pyautogui.keyDown(',')
示例#10
0
文件: main.py 项目: queer/amynia-bot
import time

# We time when each last keypress was, and then we make sure to wait at least 32ms
# (2 frames @ 60FPS) so that we don't get multiple keypresses
currentTimeMillis = lambda: int(round(time.time() * 1000))
sTime = currentTimeMillis()
dTime = currentTimeMillis()
fTime = currentTimeMillis()
spaceTime = currentTimeMillis()
jTime = currentTimeMillis()
kTime = currentTimeMillis()
lTime = currentTimeMillis()
WAIT_TIME_MILLIS = 80
xdo = Xdo()
window_id = xdo.search_windows(winname=bytes('osu!cuttingedge', 'utf-8'))[0]

mode = ''

cap = cv2.VideoCapture()
print('Waiting on video...')
cap.open('udp://127.0.0.1:1234/')
if not cap.open:
    print("Not open")
while True:
    # widthxheight+x+y
    # 322x1078+793+0
    err,img = cap.read()
    #if err:
        #print(err)
    if img.shape != (0,0):
示例#11
0
# General variables:
record = False
win_name = "GAMEWINDOWNAME"
win_props = {
    'top': y_coord,
    'left': x_coord,
    'width': display_width,
    'height': display_height
}
button_delay = 0.2
dir_name = ""
sample_count = 0

# Get window id for sending key events to the application window
win_id = xdo.search_windows(win_name.encode())[0]
print("WIN ID:" + str(win_id))

# Initialize display for monitoring
gameDisplay = pygame.display.set_mode(
    (int(display_width / 4), int(display_height / 4)))
pygame.display.set_caption('Training')

# Key mappings
joy_keys = []
joy_keys.append(0)  #accel
joy_keys.append(0)  #break
joy_keys.append(0)  #sharp turn (not used for training at the moment)
joy_keys.append(0)  #left
joy_keys.append(0)  #right
示例#12
0
from xdo import Xdo
import pyautogui
import time

xdo = Xdo()


win_id = xdo.search_windows(winname=b".*Kindle.*", only_visible=True)
if not win_id or len(win_id)>1:
    win_id = xdo.select_window_with_click()
else:
    win_id = win_id[0]

#define next page position
window_size = xdo.get_window_size(win_id)
next_y = 500
next_x = 1523#188
center_x,center_y = 500,500
nbr_pages = 1000

def next_page():
    xdo.activate_window(win_id)
    xdo.move_mouse(next_x, next_y)
    xdo.click_window(win_id, 1)

def first_page():
    xdo.activate_window(win_id)
    xdo.move_mouse(center_x, center_y)
    xdo.click_window(win_id, 1)
i=0
page_prev = None#pyautogui.screenshot(region=(259,138,1207,739))