示例#1
0
    def __init__(self,
                 xdo_delay=0,
                 display=None,
                 security_token=None,
                 **kwargs):
        """
        :param int xdo_delay: Default pause between keystrokes.
        :param str display: reserved for future use.
        :param security_token: Static token that must be the same between
         client and server.
        :param kwargs:
        """
        super(XdoPlatformRpcs, self).__init__(**kwargs)

        self.display = Xlib.display.Display(display)
        self.libxdo = xdo.Xdo(display)

        self.xdotool_delay = xdo_delay

        # compute and cache {atom_name: atom_value} dict once to save us from
        # having to repeatedly query X for this data.
        self.x_atoms = {
            name: self.display.intern_atom(name)
            for name in _X_PROPERTIES
        }
示例#2
0
class LibxdoKeyboard(BaseX11Keyboard):
    """Static class for typing keys with python-libxdo."""

    _log = logging.getLogger("keyboard")
    display = Xlib.display.Display()
    libxdo = xdo.Xdo(os.environ.get('DISPLAY', ''))

    @classmethod
    def send_keyboard_events(cls, events):
        """
        Send a sequence of keyboard events.

        Positional arguments:
        events -- a sequence of tuples of the form
            (keycode, down, timeout), where
                keycode (str): key symbol.
                down (boolean): True means the key will be pressed down,
                    False means the key will be released.
                timeout (int): number of seconds to sleep after
                    the keyboard event.

        """
        cls._log.debug("Keyboard.send_keyboard_events %r", events)

        # TODO: We can distill this entire loop down to a single libxdo function
        # call when we figure out how to properly user charcode_t entities from
        # libxdo.
        for event in events:
            (key, down, timeout) = event
            delay_micros = int(timeout * 1000.0)
            key = KEY_TRANSLATION.get(key, key)

            # Press/release the key, catching any errors.
            try:
                if down:
                    cls.libxdo.send_keysequence_window_down(
                        0, key, delay_micros)
                else:
                    cls.libxdo.send_keysequence_window_up(0, key, delay_micros)
            except Exception as e:
                cls._log.exception("Failed to type key code %s: %s", key, e)

            # Sleep after the keyboard event if necessary.
            if timeout:
                time.sleep(timeout)
示例#3
0
        except AttributeError:
            pressSpecialKey(key)




if __name__ == '__main__':

    # Connect to the device on port 6000 via ZXTouch tweak
    s = socket.socket()
    s.connect((device_ip, 6000))  # connect to the tweak
    time.sleep(0.1)  # please sleep after connection.

    #Pick your UxPlay window to determine location
    print("Please click on your UxPlay window or other screen mirroring tool")
    xdo = xdo.Xdo()
    win_id = xdo.select_window_with_click()
    print("UxPlay window id selected: ", win_id)

    generateKeymap()
    #listen to keypresses without blocking
    klistener = keyboard.Listener(
        on_press=on_press
        )
    klistener.start()

    with Listener( on_move=on_move,
        on_click=on_click,
        on_scroll=on_scroll) as listener:
        listener.join()
示例#4
0
 def __init__(self, *args, **kwargs) -> None:
     super(LinuxSpy, self).__init__(*args, **kwargs)
     self.__xdo = xdo.Xdo()  # type: ignore
     print('Please click on the OrbtXL window')
     self.__window = self.__xdo.select_window_with_click()
示例#5
0
def AjTrak (modelId='last',moveMouse=False, displayCascade = False):
    targetEye = (48,32)
    dimVector = (1,)+targetEye+(3,)
    recurrent = False

    model = getTrainedModel (modelId)
    model.summary()

    if recurrent:
        config = model.get_config()
        config = configForwardPass (config)
        model.from_config (config)
        model.summary()


    xContext = xdo.Xdo()
    capture = cv2.VideoCapture (0)
    try:
        faceCascade= cv2.CascadeClassifier ("./data/haarcascade_frontalface_alt.xml")
        eyeCascade = cv2.CascadeClassifier ("./data/haarcascade_eye_tree_eyeglasses.xml")
        eye0Recent = LastAverage (10)
    except Exception as e:
        print (e)
        return

    try:
        if displayCascade:
            cv2.namedWindow ("test")
        #  cv2.namedWindow ("test2")
        while True:
            # wyjście z głównej pętli wdrażana przez openCV - nie działa bez okien...
            #  ch = cv2.waitKey(1) & 0xFF
            #  if ch == ord ('q'):
            #      break

            image = capture.read ()[1]
           
            imageGrayscale = cv2.cvtColor (image, cv2.COLOR_BGR2GRAY)
            facesFound = faceCascade.detectMultiScale (imageGrayscale, 1.3, 5) # współczynniki ?

            if len(facesFound) == 1:
                #  print (facesFound)
                x,y,w,h = facesFound[0]
                faceInput = np.array(facesFound[0]).reshape((1,4))
                imageFace = image[y:y+h, x:x+w]
                faceGrayscale = imageGrayscale[y:y+h, x:x+w]
                cv2.rectangle (image,(x,y), (x+w, y+h), (255,0,0), 2)
                eyesFound = eyeCascade.detectMultiScale (faceGrayscale, 1.3, 5)
                if len (eyesFound) == 2:
                    #  print ("Possible face detected\t", facesFound)            
                    e0, e1 = eyesFound[0], eyesFound[1]
                    if e0[0] > e1[0]:
                        e0, e1 = e1, e0
                    e0, e1 = scale (e0, targetEye), scale (e1, targetEye)

                    # experiment with local averaging
                    #  eye0Recent.push (e0)
                    #  e0 = eye0Recent.average ()

                    (x0,y0,w0,h0,x1,y1,w1,h1) = np.concatenate([e0,e1])
                    cv2.rectangle (imageFace,(x0,y0), (x0+w0, y0+h0), (0,0,255), 2)
                    cv2.rectangle (imageFace,(x1,y1), (x1+w1, y1+h1), (0,255,0), 2)
                    imageEye0 = imageFace[y0:y0+h0, x0:x0+w0]
                    imageEye1 = imageFace[y1:y1+h1, x1:x1+w1]
                    eyeInput = np.concatenate (
                       (imageEye0.reshape (dimVector).transpose(0, 2,1,3),
                       imageEye1.reshape (dimVector).transpose(0, 2,1,3)),
                       axis=3
                    )

                    # forward pass
                    modelPrediction = model.predict (x=[faceInput,eyeInput])
                    modelPrediction = modelPrediction.flatten()
                    mouseLoc = getMousePos ()
                    predictionError = np.linalg.norm (modelPrediction-mouseLoc)
                    print ("Delta: {}\tPredicted: {}\tActual: {}".format(
                        "%.2f" % predictionError,modelPrediction,mouseLoc))
                    if moveMouse:
                        xContext.move_mouse (modelPrediction[0],modelPrediction[1],0)
                else:
                    #  print ("{} eyes found".format (len(eyesFound)))
                    pass

            else:
                print ("{} faces found".format (len(facesFound)))
                pass

            if displayCascade:
                cv2.imshow ("test", image )
    except Exception as e:
        print (e)
        pass

    capture.release()
    cv2.destroyAllWindows()
    return