def read_local_screen(self):
        self.window = winGuiAuto.findTopWindow("NoxPlayer")
        win32gui.SetWindowPos(self.window, win32con.HWND_TOPMOST, 0, 0, 0, 0,
                              win32con.SWP_NOMOVE | win32con.SWP_NOSIZE)
        rect = win32gui.GetWindowPlacement(self.window)[-1]
        self.screen = np.array(ImageGrab.grab(rect))

        pass
Exemple #2
0
def window_location():
    """Finds the location of the League window on the desktop"""
    try:
        league_win = winGuiAuto.findTopWindow("League of Legends")
    except:
        return None
    if win32gui.IsWindowVisible(league_win) and not win32gui.IsIconic(league_win):
        win_loc = win32gui.GetWindowPlacement(league_win)[-1]
        return win_loc
    else:
        return None
    def __init__(self, outDim, inputtype, winKeyword=None, crop=None):
        self.inputtype = inputtype
        self.outDim = outDim

        if self.inputtype == 'test':
            self.capture = cv2.VideoCapture('free_will_ch1.MOV')
            self.getFrame = self.getFrameCap

        if self.inputtype == 'win':
            self.hwnd = winGuiAuto.findTopWindow(winKeyword)
            self.crop = crop
            self.getFrame = self.getFrameWin

            print(f'initialized InputManager with target win hwnd={self.hwnd}')
 def start_UFT(self,test_case = r"E:\UFTTestCase\EMS_Test\EMS_FUN_000012\DemoTest\DemoTest",path=''):
     self.progam_path = path
     self.app = application.Application.start(self.progam_path)
     
     WINDOW_TITLE = u'HP Unified Functional Testing'
     cnt = 0
     while cnt<60:
         try:
             hwnd = 0
             hwnd = winGuiAuto.findTopWindow('Unified Functional Testing')
             if hwnd>0:
                 break
         except Exception,e:
             cnt = cnt + 1
             time.sleep(1)
             pass  
class UFT:
    def __init__(self, path=''):
        self.progam_path = path

    def init_uftpath(self, path):
        self.progam_path = path
        return True

    def start_UFT(
            self,
            test_case=r"E:\UFTTestCase\EMS_Test\EMS_FUN_000012\DemoTest\DemoTest",
            path=''):
        self.progam_path = path
        self.app = application.Application.start(self.progam_path)

        WINDOW_TITLE = u'HP Unified Functional Testing'
        cnt = 0
        while cnt < 60:
            try:
                hwnd = 0
                hwnd = winGuiAuto.findTopWindow('Unified Functional Testing')
                if hwnd > 0:
                    break
            except Exception, e:
                cnt = cnt + 1
                time.sleep(1)
                pass
        self.app[u'Unified Functional Testing'][u'&Continue'].Click()
        self.app[u'Unified Functional Testing - Add-in Manager'][u'OK'].Click()

        WINDOW_TITLE = u'HP Unified Functional Testing'

        result = False
        cnt = 0
        while cnt < 60:
            try:
                hwnd = 0
                hwnd = winGuiAuto.findTopWindow(
                    'HP Unified Functional Testing')
                if hwnd > 0:
                    result = True
                    break
            except Exception, e:
                cnt = cnt + 1
                time.sleep(1)
                pass
    def start_UFT(
            self,
            test_case=r"E:\UFTTestCase\EMS_Test\EMS_FUN_000012\DemoTest\DemoTest",
            path=''):
        self.progam_path = path
        self.app = application.Application.start(self.progam_path)

        WINDOW_TITLE = u'HP Unified Functional Testing'
        cnt = 0
        while cnt < 60:
            try:
                hwnd = 0
                hwnd = winGuiAuto.findTopWindow('Unified Functional Testing')
                if hwnd > 0:
                    break
            except Exception, e:
                cnt = cnt + 1
                time.sleep(1)
                pass
# Test screengrab for a window.
# Apparently this works even when the window is not at the foreground!
import winGuiAuto
import win32gui
import ImageGrab

sample = winGuiAuto.findTopWindow("tk")
rect = win32gui.GetWindowPlacement(sample)[-1]
print rect
image = ImageGrab.grab(rect)

# note: does not open directly from terminal.
# need to convert to pixel values directly to feed into convnet
image.save("sample.jpg", "JPEG")
def scanSingle(self):
    """Single gesture scanner """
    try:
        clearfunc(self.cam)
    except:
        pass
    uic.loadUi('UI_Files/scan_single.ui', self)
    self.setWindowTitle(self.title)

    if (self.scan_single.clicked.connect(self.scanSingle)):
        controlTimer(self)
    self.pushButton_2.clicked.connect(lambda: clearfunc(self.cam))
    self.linkButton.clicked.connect(openimg)

    self.scan_single.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))

    try:
        self.exit_button.clicked.connect(lambda: clearfunc(self.cam))
    except:
        pass
    self.exit_button.clicked.connect(self.quitApplication)
    img_text = ''
    while True:
        ret, frame = self.cam.read()
        frame = cv2.flip(frame, 1)
        try:
            frame = cv2.resize(frame, (321, 270))
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            img1 = cv2.rectangle(frame, (150, 50), (300, 200), (0, 255, 0),
                                 thickness=2,
                                 lineType=8,
                                 shift=0)
        except:
            keyboard.press_and_release('esc')

        height1, width1, channel1 = img1.shape
        step1 = channel1 * width1
        # create QImage from image
        qImg1 = QImage(img1.data, width1, height1, step1, QImage.Format_RGB888)
        # show image in img_label
        try:
            self.label_3.setPixmap(QPixmap.fromImage(qImg1))
            slider1 = self.trackbar.value()
        except:
            pass

        lower_blue = np.array([0, 0, 0])
        upper_blue = np.array([179, 255, slider1])

        imcrop = img1[52:198, 152:298]
        hsv = cv2.cvtColor(imcrop, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, lower_blue, upper_blue)

        cv2.namedWindow("mask", cv2.WINDOW_NORMAL)
        cv2.imshow("mask", mask)
        cv2.setWindowProperty("mask", cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
        cv2.resizeWindow("mask", 118, 108)
        cv2.moveWindow("mask", 894, 271)

        hwnd = winGuiAuto.findTopWindow("mask")
        win32gui.SetWindowPos(
            hwnd, win32con.HWND_TOP, 0, 0, 0, 0, win32con.SWP_NOMOVE
            | win32con.SWP_NOSIZE | win32con.SWP_NOACTIVATE)

        try:
            self.textBrowser.setText("\n\n\t" + str(img_text))
        except:
            pass

        img_name = "1.png"
        save_img = cv2.resize(mask, (image_x, image_y))
        cv2.imwrite(img_name, save_img)
        img_text = predictor()

        if cv2.waitKey(1) == 27:
            break

    self.cam.release()
    cv2.destroyAllWindows()
Exemple #9
0
    def scanSent(self):
        """sentence formation module """
        try:
            clearfunc(self.cam)
        except:
            pass
        uic.loadUi('UI_Files/scan_sent.ui', self)
        self.setWindowTitle(self.title)

        self.exit_button_2.clicked.connect(self.quitApplication)
        if (self.scan_sen.clicked.connect(self.scanSent)):
            controlTimer(self)
        self.speech_gen.clicked.connect(to_speech)
        try:
            self.stop.clicked.connect(lambda: clearfunc2(self.cam))
        except:
            pass
        self.linkButton.clicked.connect(openimg)

        self.scan_sen.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
        self.exit_button_2.setCursor(
            QtGui.QCursor(QtCore.Qt.PointingHandCursor))
        self.stop.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
        self.save.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
        self.speech_gen.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))

        #try:
        #self.exit_button.clicked.connect(lambda:clearfunc(self.cam))
        #except:
        #pass

        img_text = ''
        append_text = ''
        new_text = ''
        finalBuffer = []
        counts = 0
        while True:
            ret, frame = self.cam.read()
            frame = cv2.flip(frame, 1)
            try:
                frame = cv2.resize(frame, (331, 310))

                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                img = cv2.rectangle(frame, (150, 50), (300, 200), (0, 255, 0),
                                    thickness=2,
                                    lineType=8,
                                    shift=0)
            except:
                keyboard.press_and_release('esc')
                keyboard.press_and_release('esc')

            height, width, channel = img.shape
            step = channel * width
            # create QImage from image
            qImg = QImage(img.data, width, height, step, QImage.Format_RGB888)
            # show image in img_label
            try:
                self.label_3.setPixmap(QPixmap.fromImage(qImg))
                slider = self.trackbar.value()
            except:
                pass

            lower_blue = np.array([0, slider, 0])
            upper_blue = np.array([179, 255, 255])
            imcrop = img[52:198, 152:298]
            hsv = cv2.cvtColor(imcrop, cv2.COLOR_BGR2HSV)
            mask1 = cv2.inRange(hsv, lower_blue, upper_blue)

            cv2.namedWindow("mask", cv2.WINDOW_NORMAL)
            cv2.imshow("mask", mask1)
            cv2.setWindowProperty("mask", cv2.WND_PROP_FULLSCREEN,
                                  cv2.WINDOW_FULLSCREEN)
            cv2.resizeWindow("mask", 118, 108)
            cv2.moveWindow("mask", 713, 264)

            img_name = "1.png"
            save_img = cv2.resize(mask1, (image_x, image_y))
            cv2.imwrite(img_name, save_img)
            img_text = predictor()

            hwnd = winGuiAuto.findTopWindow("mask")
            win32gui.SetWindowPos(
                hwnd, win32con.HWND_TOP, 0, 0, 0, 0, win32con.SWP_NOMOVE
                | win32con.SWP_NOSIZE | win32con.SWP_NOACTIVATE)

            try:
                self.textBrowser.setText("\n      " + str(img_text))
            except:
                pass

            if cv2.waitKey(1) & 0xFF == ord('c'):
                try:
                    counts += 1
                    append_text += img_text
                    new_text += img_text

                    self.textBrowser_4.setText(new_text)
                except:
                    append_text += ''

                if (len(append_text) > 1):
                    finalBuffer.append(append_text)
                    append_text = ''
                else:
                    finalBuffer.append(append_text)
                    append_text = ''

            try:
                self.save.clicked.connect(
                    lambda: saveBuff(self, self.cam, finalBuffer))
            except:
                pass
            if cv2.waitKey(1) == 27:
                break

            if keyboard.is_pressed('shift+s'):
                if (len(finalBuffer) >= 1):
                    f = open("temp.txt", "w")
                    for i in finalBuffer:
                        f.write(i)
                    f.close()
                break

        self.cam.release()
        cv2.destroyAllWindows()

        if os.path.exists('temp.txt'):
            QtWidgets.QMessageBox.about(
                self, "Information",
                "File is temporarily saved. Please click Speech Button ")
        try:
            self.textBrowser.setText("		 ")
        except:
            pass
Exemple #10
0
    def createGest(self):
        """ Custom gesture generation module"""
        try:
            clearfunc(self.cam)
        except:
            pass
        gesname = ""
        uic.loadUi('UI_Files/create_gest.ui', self)
        self.setWindowTitle(self.title)
        self.create.clicked.connect(self.createGest)
        self.exp2.clicked.connect(self.exportFile)
        if (self.scan_sen.clicked.connect(self.scanSent)):
            controlTimer(self)
        self.scan_sinlge.clicked.connect(self.scanSingle)
        self.linkButton.clicked.connect(openimg)
        self.create.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
        self.scan_sen.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
        self.scan_sinlge.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
        self.exp2.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
        self.pushButton_2.clicked.connect(lambda: clearfunc(self.cam))
        try:
            self.exit_button.clicked.connect(lambda: clearfunc(self.cam))
        except:
            pass
        self.exit_button.clicked.connect(self.quitApplication)
        self.plainTextEdit.setPlaceholderText("Enter Gesture Name Here")
        img_text = ''
        saveimg = []
        while True:
            ret, frame = self.cam.read()
            frame = cv2.flip(frame, 1)
            try:
                frame = cv2.resize(frame, (321, 270))
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                img2 = cv2.rectangle(frame, (150, 50), (300, 200), (0, 255, 0),
                                     thickness=2,
                                     lineType=8,
                                     shift=0)
            except:
                keyboard.press_and_release('esc')

            height2, width2, channel2 = img2.shape
            step2 = channel2 * width2
            # create QImage from image
            qImg2 = QImage(img2.data, width2, height2, step2,
                           QImage.Format_RGB888)
            # show image in img_label
            try:
                self.label_3.setPixmap(QPixmap.fromImage(qImg2))
                slider2 = self.trackbar.value()
            except:
                pass

            lower_blue = np.array([0, 0, 0])
            upper_blue = np.array([179, 255, slider2])
            imcrop = img2[52:198, 152:298]
            hsv = cv2.cvtColor(imcrop, cv2.COLOR_BGR2HSV)
            mask = cv2.inRange(hsv, lower_blue, upper_blue)

            cv2.namedWindow("mask", cv2.WINDOW_NORMAL)
            cv2.imshow("mask", mask)
            cv2.setWindowProperty("mask", cv2.WND_PROP_FULLSCREEN,
                                  cv2.WINDOW_FULLSCREEN)
            cv2.resizeWindow("mask", 170, 160)
            cv2.moveWindow("mask", 766, 271)

            hwnd = winGuiAuto.findTopWindow("mask")
            win32gui.SetWindowPos(
                hwnd, win32con.HWND_TOP, 0, 0, 0, 0, win32con.SWP_NOMOVE
                | win32con.SWP_NOSIZE | win32con.SWP_NOACTIVATE)

            try:
                ges_name = self.plainTextEdit.toPlainText()
            except:
                pass
            if (len(ges_name) >= 1):
                saveimg.append(ges_name)
            else:
                saveimg.append(ges_name)
                ges_name = ''

            try:
                self.pushButton.clicked.connect(
                    lambda: capture_images(self, self.cam, saveimg, mask))
            except:
                pass

            gesname = saveimg[-1]

            if keyboard.is_pressed('shift+s'):
                if not os.path.exists('./SampleGestures'):
                    os.mkdir('./SampleGestures')
                if (len(gesname) >= 1):
                    img_name = "./SampleGestures/" + "{}.png".format(
                        str(gesname))
                    save_img = cv2.resize(mask, (image_x, image_y))
                    cv2.imwrite(img_name, save_img)
                break

            if cv2.waitKey(1) == 27:
                break

        self.cam.release()
        cv2.destroyAllWindows()

        if os.path.exists("./SampleGestures/" + str(gesname) + ".png"):
            QtWidgets.QMessageBox.about(self, "Success",
                                        "Gesture Saved Successfully!")
Exemple #11
0
def main():
	hwcode = win32api.MapVirtualKey(VK_MEDIA_PLAY_PAUSE, 0)

	old_lcd_text = {'line1': "", 'line2': ""}
	artist_pos = {'begin':0, 'end':39}
	song_pos = {'begin':0, 'end':39}
	
	spotify = winGuiAuto.findTopWindow("Spotify - ") #get the handle of the Spotify window
	if not spotify:
		raise ValueError("Spotify window not found. Is it running?")
		
	print "Opening serial connection to AVR."
	avr = AVR(port=COM_PORT) #COM7 - port is zero-indexed
	
	print "Connection open on %s." % avr.connection.name
	print "Waiting for reset..."
	
	time.sleep(3) 
	avr.send_command("LC\r\n")
	
	song = "Waiting for"
	artist = "Spotify data"
	
	print "Connected!"
	
	while True:
		title = win32gui.GetWindowText(spotify) #get the window title
		title = title.decode('ascii', 'ignore') #Ignore the stupid unicode dash
		title = title.split('  ') #split it so we can get the playing song
		
		try:
			artist = title[0].split('Spotify - ')[1]
			song = title[1]
		except IndexError:
			song = song #do not update if Spotify is paused
			artist = artist #do not update if Spotify is paused
			
		cur_lcd_text = {'line1':str((song[song_pos['begin']:song_pos['end']]).ljust(16, ' ')), 'line2':str((artist[artist_pos['begin']:artist_pos['end']]).ljust(16, ' '))}

		if (cur_lcd_text['line1'] != old_lcd_text['line1']) or (cur_lcd_text['line2'] != old_lcd_text['line2']):
			t = threading.Thread(target=update_spotify, args=(q, avr, cur_lcd_text))
			t.start()
			old_lcd_text['line1'] = cur_lcd_text['line1']
			old_lcd_text['line2'] = cur_lcd_text['line2']
			
		if avr.connection.inWaiting(): #there's a command in the buffer
			if debug: print "Got a command"
			r = avr.connection.read() #read the char in the buffer
			if r == "P": #if it's P for pause...
				if debug: print "Pause!" #log this
				win32api.keybd_event(VK_MEDIA_PLAY_PAUSE, hwcode) #and send the scancode
			elif r == "B": #if it's B for back...
				if debug: print "Previous track!" #log this
				win32api.keybd_event(VK_MEDIA_PREV_TRACK, hwcode) #and send the scancode
			elif r == "N": #if it's N for next track...
				if debug: print "Next track!" #log this
				win32api.keybd_event(VK_MEDIA_NEXT_TRACK, hwcode) #and send the scancode
			elif r == "U": #if it's U for volume up...
				if debug: print "Volume up!" #log this
				win32api.keybd_event(VK_MEDIA_VOLUME_UP, hwcode) #and send the scancode
			elif r == "D": #if it's D for volume down...
				if debug: print "Volume down!" #log this
				win32api.keybd_event(VK_MEDIA_VOLUME_DOWN, hwcode) #and send the scancode
			elif r == "T":
				if debug: print "Tick"
	
		time.sleep(1)
		
	avr.close_connection()
	
	print "Connection closed."
Exemple #12
0
import cv2
from PIL import ImageGrab
import win32api
import winGuiAuto
import win32gui
import win32con

cap = cv2.VideoCapture(0)

# Capture the window frame by frame
image_list = []
for _ in range(70):
    ret, frame = cap.read()
    cv2.imshow('SCORE', frame)
    cv2.waitKey(1)
    hwnd = winGuiAuto.findTopWindow("SCORE")
    win32gui.SetWindowPos(hwnd, win32con.HWND_TOPMOST, 0, 0, 0, 0,
                          win32con.SWP_NOMOVE | win32con.SWP_NOSIZE)
    rect = win32gui.GetWindowPlacement(hwnd)[-1]
    image = ImageGrab.grab(rect)
    image_list.append(image)

height, width, channel = np.array(image).shape
cap.release()
cv2.destroyAllWindows()

out = cv2.VideoWriter('video.avi', cv2.VideoWriter_fourcc(*'DIVX'), 5,
                      (width, height))

for images in image_list:
    out.write(cv2.cvtColor(np.array(images), cv2.COLOR_BGR2RGB))
Exemple #13
0
def main():
    restart = False

    # get config settings
    outputWindowName = config.outputWindowName
    captureWindowKeyword = config.captureWindowKeyword
    avgFpsCacheLen = config.avgFpsCacheLen

    # create window 
    cv2.namedWindow(outputWindowName)
    
    # identify capture window
    hwnd = winGuiAuto.findTopWindow(captureWindowKeyword)
    print(f'hwnd={hwnd}')

    alreadyAnalyzed = False
    frameNum = 0
    fpsCache = [-1 for _ in range(avgFpsCacheLen)]

    while True:

        # time runtime
        startTime = timeit.default_timer()

        # get corner coordinates of capture window
        position = win32gui.GetWindowRect(hwnd)

        # save pixels into array
        frame = getRectAsImage(position)
        frame = np.array(frame)
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)

        # clean frame
        frame = cleanFrame(frame)

        infoFrame = frame.copy()

        # check if is in transition
        isInTransition = inTransition(frame)
        if isInTransition:
            cv2.putText(img=infoFrame,
                        text='In Transition',
                        org=(20,infoFrame.shape[0]//2),
                        fontFace=0,
                        fontScale=2,
                        color=(0,0,255),
                        thickness=2
                        )
            print('[INFO] in transition')
        else:
            cv2.putText(img=infoFrame,
                        text='Base',
                        org=(20,infoFrame.shape[0]//2),
                        fontFace=0,
                        fontScale=2,
                        color=(0,255,0),
                        thickness=2
                        )
            print('[INFO] in base')

        # check if analysis already done
        if alreadyAnalyzed and (not isInTransition):
            alreadyAnalyzed = False
        
        # run analysis
        if not alreadyAnalyzed:
            goodBase = isGoodBase(frame)
            alreadyAnalyzed = True

        # complete action
        executeAction(goodBase)

        # escape conditions
        key = cv2.waitKey(1)
        if key == 27: # escape
            break
        if key == 96: # tilde key
            restart = True
            break
        
        # calculate fps
        stopTime = timeit.default_timer()
        if (avgFps := calculateFps( startTime,
                                    stopTime,
                                    frameNum,
                                    avgFpsCacheLen,
                                    fpsCache
                                )
        ): print(f'avgFps={avgFps}')

        # preview frame
        cv2.imshow(outputWindowName, infoFrame)

        frameNum += 1

    # cleanup
    cv2.destroyWindow(outputWindowName)

    return restart
Exemple #14
0
    if (app1.CheckBox10.GetCheckState() == 1):
        app1.CheckBox10.Click()


def initEXBL():  #define the "Blocking" button's initialization check formula
    if (app1.CheckBox11.GetCheckState() == 1):
        app1.CheckBox11.Click()


def initEXMM():  #define the "Mini Map" button's initialization check formula
    if (app1.CheckBox12.GetCheckState() == 1):
        app1.CheckBox12.Click()


oviMdWare = winGuiAuto.findTopWindow(wantedText='OviMiddleWare',
                                     wantedClass=None,
                                     selectionFunction=None)
toolbar = winGuiAuto.findControl(oviMdWare, wantedClass="ToolbarWindow32")
tlw = ToolbarWrapper(toolbar)
tlw.PressButton(0)

time.sleep(1)

app2 = app.window_(title_re='.*Choose a file.*')

app2.TypeKeys('world_00.02.41.121.cdt')
#app2.TypeKeys('world_00.40.114.cdt')
time.sleep(1)
app2.TypeKeys('{ENTER}')

time.sleep(1)