Esempio n. 1
3
def temp():
    pyautogui.alert('This displays some text with an OK button.')
    pyautogui.position()  # current mouse x and y
    pyautogui.onScreen(x, y)  # True if x & y are within the screen.
    pyautogui.PAUSE = 2.5   # Pause 2.5 s
    pyautogui.dragTo(x, y, duration=num_seconds)  # drag mouse to XY
    pyautogui.dragRel(xOffset, yOffset, duration=num_seconds)  # drag mouse relative to its current position
    pyautogui.click(x=moveToX, y=moveToY, clicks=num_of_clicks, interval=secs_between_clicks, button='left') # The button keyword argument can be 'left', 'middle', or 'right'.
    pyautogui.scroll(amount_to_scroll, x=moveToX, y=moveToY)
    pyautogui.mouseDown(x=moveToX, y=moveToY, button='left')
    pyautogui.mouseUp(x=moveToX, y=moveToY, button='left')
    pyautogui.typewrite('Hello world!\n', interval=secs_between_keys)  # useful for entering text, newline is Enter
    pyautogui.typewrite(['a', 'b', 'c', 'left', 'backspace', 'enter', 'f1'], interval=secs_between_keys)
    pyautogui.hotkey('ctrl', 'c')  # ctrl-c to copy
    pyautogui.hotkey('ctrl', 'v')  # ctrl-v to paste
    pyautogui.alert('This displays some text with an OK button.')
    pyautogui.confirm('This displays text and has an OK and Cancel button.')
    pyautogui.prompt('This lets the user type in a string and press OK.')
    pyautogui.screenshot('foo.png')  # returns a Pillow/PIL Image object, and saves it to a file
    pyautogui.locateOnScreen('looksLikeThis.png')
    pyautogui.locateCenterOnScreen('looksLikeThis.png')  # returns center x and y
Esempio n. 2
0
	def screenshot(self):
		self.fmaximizeUO()
		if self.os == "Linux":
			command("import -window $(xdotool getwindowfocus -f) /tmp/screen.png & xdotool click 1")
			return "/tmp/screen.png"
		elif self.os == "Windows":
			pyautogui.screenshot("C:\\Windows\\Temp\\screen.png")
			return "C:/Windows/Temp/screen.png"
Esempio n. 3
0
def screen_shot(left_corner=None, right_corner=None):
    if (left_corner is not None) and (right_corner is not None):
        region=(left_corner[0],left_corner[1],right_corner[0]-left_corner[0],right_corner[1]-left_corner[1])
        pil_image = pyautogui.screenshot(region=region)
    else:
        pil_image = pyautogui.screenshot()
    opencv_image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
    return opencv_image
Esempio n. 4
0
        def func():
            self.screenshot_button.config(state='disabled')

            for second in reversed(range(4)):
                self.screenshot_label.config(
                    text='Deselect the game window %s' % second)
                if second != 0:
                    time.sleep(1)

            region = []
            for second in reversed(range(4)):
                self.screenshot_label.config(
                    text='Place the mouse at the top left\nof the game\'s title bar %s' % second)
                if second != 0:
                    time.sleep(1)
            constant_top_left = pyautogui.position()
            region.extend(constant_top_left)
            for second in reversed(range(4)):
                self.screenshot_label.config(
                    text='Place the mouse at the bottom right\nof the game\'s title bar %s' % second)
                if second != 0:
                    time.sleep(1)
            constant_bottom_right = pyautogui.position()
            region.extend(
                (constant_bottom_right[0] - constant_top_left[0],
                 constant_bottom_right[1] - constant_top_left[1])
            )
            self.deselected_screenshot = pyautogui.screenshot(region=region)
            pyautogui.click()
            self.selected_screenshot = pyautogui.screenshot(region=region)

            for second in reversed(range(4)):
                self.screenshot_label.config(
                    text='Place mouse at the top left\nof the entire game window %s' % second)
                if second != 0:
                    time.sleep(1)
            top_left = pyautogui.position()
            for second in reversed(range(4)):
                self.screenshot_label.config(
                    text='Place mouse at the bottom right\nof the entire game window %s' % second)
                if second != 0:
                    time.sleep(1)
            bottom_right = pyautogui.position()

            self.screen_size = [
                constant_top_left[0] - top_left[0],
                constant_top_left[1] - top_left[1],
                bottom_right[0] - constant_bottom_right[0],
                bottom_right[1] - constant_bottom_right[1]
            ]

            self.screenshot_taken = True
            self.screenshot_label.config(text='Screenshot Taken')
            self.screenshot_button.config(
                state='normal', text='Retake Screenshot')
Esempio n. 5
0
 def click(self, x, y, button, press):
     if button == 1:
         if press:
             self.a.append(self.m.position())
             if(len(self.a) == 2):
                 #print(self.a)
                 x1,y1,x2,y2 = self.a[0][0],self.a[0][1],self.a[1][0],self.a[1][1]
                 pyautogui.screenshot('C:/Python27/output.png',(x1,y1,x2-60,y2-150))
                 self.a = []
                 self.stop()
     else:  # Exit if any other mouse button used
         self.stop()
Esempio n. 6
0
def screenCapture(savePath):
    global SAVE_SCREEN_MAP_PATH
    # hwnd = 0
    # hwndDC = win32gui.GetWindowDC(hwnd)
    # mfcDC=win32ui.CreateDCFromHandle(hwndDC)
    # saveDC=mfcDC.CreateCompatibleDC()
    # saveBitMap = win32ui.CreateBitmap()
    # saveBitMap.CreateCompatibleBitmap(mfcDC, size[0], size[1])
    # saveDC.SelectObject(saveBitMap)
    # saveDC.BitBlt((0,0),SCREEN_SIZE, mfcDC, SCREEN_POS, win32con.SRCCOPY)
    # saveBitMap.SaveBitmapFile(saveDC,SAVE_SCREEN_MAP_PATH)
    # Image.open(SAVE_SCREEN_MAP_PATH).save(SAVE_SCREEN_MAP_PATH[:-4]+".png")
    pyautogui.screenshot(SAVE_SCREEN_MAP_PATH)
Esempio n. 7
0
def locateAllOnScreen(needleImg):
    # Take a screenshot image of the desktop and save it
    img = pyautogui.screenshot( "desktop.png")

    # Allow image to be saved prior to continuing
    time.sleep(5)
    
    # Allow opencv2 to read both images
    haystackImg = cv2.imread( "desktop.png")
    grayImg = cv2.cvtColor( haystackImg, cv2.COLOR_BGR2GRAY)
    needleImg = cv2.imread( needleImg ,0)
    width , height = needleImg.shape[::-1]
    
    # Use thresholding to find multiple matches
    res = cv2.matchTemplate( grayImg, needleImg, cv2.TM_CCOEFF_NORMED)
    threshold = 0.8
    matchCoords = np.where( res >= threshold)
    
    #Uncomment to view the rectangular match regions
    #for pt in zip(*matchCoords[::-1]):
    #    cv2.rectangle( haystackImg, pt, (pt[0] + width, pt[1] + height), (0,0,255), 2)
    
    # Save the resulting image as a png image
    #cv2.imwrite('result.png', haystackImg )

    return matchCoords
def color_height(measure_start):
    screenshot = pyautogui.screenshot()
    height = 0
    start_color = screenshot.getpixel((measure_start))
    while screenshot.getpixel((measure_start[0] + height, measure_start[1])) == start_color:
        height += 1
    return height
def color_width(measure_start):
    screenshot = pyautogui.screenshot()
    width = 0
    start_color = screenshot.getpixel((measure_start))
    while screenshot.getpixel((measure_start[0] + width, measure_start[1])) == start_color:
        width += 1
    return width
Esempio n. 10
0
def screenshotThread():
    OOO="                                           "
    global req
    global alive
    printer.p(OOO+"screenshotThread === checking in...")
    n=0
    while alive:
        #if time.time()-start>20: alive=False #autorestart
        if myID==IDcheck and req==1:
            #pyautogui.screenshot("screenshot.png")
            img=pyautogui.screenshot()
            try:
                img.save(abspath+"screenshot.jpg", "JPEG", quality=0, optimize=True, progressive=False)
                n+=1
            except:
                printer.p(OOO+"screenshotThread === couldn't save it?")
                pass
                
            try:
                files = {'media': open(abspath+'screenshot.jpg', 'rb')}
                r=requests.post('http://'+ip+'/zerog/upload.php', files=files)
                printer.p(OOO+"screenshotThread === screenshot#"+str(n))
            except:
                printer.p(OOO+"screenshotThread === couldn't upload it?")
                pass
        else: time.sleep(5)
        
    printer.p(OOO+"screenshotThread === ...checking out")
Esempio n. 11
0
def shoot(x1,y1,x2,y2, *args, **kwargs):
    """Takes screenshot at given coordinates as PIL image format, the converts to cv2 grayscale image format and returns it"""
    # creates widht & height for screenshot region
    w = x2 - x1
    h = y2 - y1
    # PIL format as RGB
    img = pyautogui.screenshot(region=(x1,y1,w,h)) #X1,Y1,X2,Y2
    #im.save('screenshot.png')

    # Converts to an array used for OpenCV
    img = np.array(img)

    try:
        for arg in args:
            if arg == 'hsv':
                # Converts to BGR format for OpenCV
                img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
                return hsv_img

            if arg == 'rgb':
                rgb_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                return rgb_img
    except:
        pass

    cv_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    return cv_gray
Esempio n. 12
0
def cv2_match(tmp, threshold=0.8, grayscale="auto"):
    if grayscale == "auto":
        if len(tmp.shape) == 2:
            grayscale = True
        else:
            grayscale = False
    q = p.screenshot()
    w = cv2.cvtColor(np.array(q), cv2.COLOR_RGB2BGR)
    if grayscale:
        w = cv2.cvtColor(w, cv2.COLOR_BGR2GRAY)
        if len(tmp.shape) >= 3:
            tmp = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
    else:
        if len(tmp.shape) < 3:
            tmp = cv2.cvtColor(tmp, cv2.COLOR_GRAY2BGR)
    res = cv2.matchTemplate(w, tmp, cv2.TM_CCOEFF_NORMED)
    res = np.where(res > threshold)
    dis = min(tmp.shape[:2]) / 2
    l = int(tmp.shape[1] / 2)
    h = int(tmp.shape[0] / 2)
    result = []
    for i in range(len(res[0])):
        pos = (res[1][i] + l, res[0][i] + h)
        flag = True
        for j in result:
            if get_dis(pos, j) < dis:
                flag = False
                break
        if flag:
            result.append(pos)
    return result
Esempio n. 13
0
def Start(pos):
    pyautogui.FAILSAFE = False
    global root, running
    im=pyautogui.screenshot()
    pos['text']=str(pyautogui.position())
    rgb['text']=str(im.getpixel(pyautogui.position()))
    root.update_idletasks()
    root.after(10, lambda:Start(pos))
Esempio n. 14
0
 def read(self):
     img = pyautogui.screenshot()
     if self.bbox:
         img = img.crop(self.bbox)
     img = np.asarray(img)
     img = cv2.resize(img, (self.width, self.height))
     img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
     return (self._is_opened, img)
Esempio n. 15
0
def screenShot():
	"""function for the taking the screen shot of current window"""
	BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
	img_path = os.path.join(BASE_DIR,r'raw_data\screenshot')
	#print img_path
	img_name = '{0}_{1}.{2}'.format('screnshot',time.strftime('%d%m%y_%I%M%S'),'jpg')
	im1 =pyautogui.screenshot(os.path.join(img_path,img_name))
	voiceOut.Speak("your screen is successfully captured")
	return
Esempio n. 16
0
def screenshotOpencv(allConfigs):
    #This function is taking a screenshot with pyautogui which gets a Pillow image
    #Then it makes sure it is transformed in an opencv format
    position = allConfigs['position']
    im = pyautogui.screenshot()
    if position['crop_window'] == 'yes':
        im = im.crop((position['window_start_x'],position['window_start_y'], position['window_end_x'], position['window_end_y']))
    open_cv_image = cv2.cvtColor(np.array(im), cv2.COLOR_RGB2BGR)
    return open_cv_image
Esempio n. 17
0
def locateOnScreen(needleImg):
    # Take a screenshot image of the desktop and save it
    img = pyautogui.screenshot("desktop.png")
    # Allow image to be saved prior to continuing
    time.sleep(5)
    # Get the matching coordinates
    matchCoords = get_match_coordinates( needleImg, "desktop.png")

    return matchCoords
Esempio n. 18
0
def test_global_prefs(cartavisInstance, cleanSlate):
    """
    Test that object specific settings can also act globally.
    Set the animator to jump. Save the preferences. Open two animators. Restore the preferences.
    Check the second animator is also set to jump.
    """
    # Set the animator to jump 
    settingsButton = ImageUtil.locateCenterOnScreen( "test_images/animatorSettingsCheckBox.png")
    assert settingsButton != None
    pyautogui.click( x=settingsButton[0], y=settingsButton[1])

    # Click the jump radio button
    jumpButton = ImageUtil.locateCenterOnScreen('test_images/jumpButton.png')
    assert jumpButton != None
    pyautogui.doubleClick( x=jumpButton[0], y=jumpButton[1])

    # Save a snapshot. Make sure preferences are checked and 
    # layout and data are not checked
    s = cartavisInstance.newSnapshot('some_session_id','tSnapshotPreferences', False, True, False, '')
    s.save()

    # Find an image window and change it into an animator
    imageWindow = ImageUtil.locateCenterOnScreen('test_images/imageWindow.png')
    assert imageWindow != None
    pyautogui.rightClick( x=imageWindow[0], y=imageWindow[1])
    pyautogui.press('right')
    pyautogui.press('right')
    pyautogui.press('down')
    pyautogui.press('return')
    time.sleep(2)

    # Find the settings button on the animator and click it so the jump will be visible 
    settingsButton = ImageUtil.locateCenterOnScreen('test_images/settingsCheckBox.png')
    assert settingsButton != None
    pyautogui.click( x=settingsButton[0], y=settingsButton[1])

    # Restore the preferences
    s[0].restore()

    # Check that both animators are not displaying jump
    # Verify that the animator jump end behaviour is checked in the screenshot after the tests have been run
    pyautogui.screenshot('layout_check/bothAnimatorsJump.png')
    s[0].delete()
Esempio n. 19
0
 def checkItem(self, x, y, img=None):
     pyautogui.moveTo(x, y, duration=0.1)
     pyautogui.moveTo(x+1, y, duration=0.1)
     pyautogui.moveTo(x, y, duration=0.1)
     im = pyautogui.screenshot(region=(0, 0, 800, 600))
     if img is not None:
         # TODO check they are not the same
         pass
     time.sleep(0.1)
     return im
Esempio n. 20
0
 def detect_position(self):
     screen = ag.screenshot()
     for name, offset_x, offset_y in [('start', 288, 252), ('select_title', 28, 24)]:
         position = self.find_image(screen, self.images[name])
         if position != None:
             x, y = position
             x -= offset_x
             y -= offset_y
             self.set_position(x, y)
             return (x, y)
     return None
Esempio n. 21
0
    def readGameState(self):
      # Read GameOver
        logger.info("In thread %d" %(threading.active_count(),))
        found = Scanner.scanUntil(
        [
          self.offset[0] + self.gameOverOffset[0],
          self.offset[1] + self.gameOverOffset[1]
        ],

        [2, 0], COLOR_DINOSAUR, False, 20, pyautogui.screenshot().resize((screenSize.width, screenSize.height)).convert('RGB'))
        logger.info("%s, %s" %(found,self.gamestate ))
        if found and not self.gamestate == 'OVER':
            self.gamestate = 'OVER'

            # Clear keys
            self.setGameOutput(0.5)

            # Trigger callback and clear
            if self.setGameEnd:
                if self.setGameEnd == 1:
                    self.genome.set_fitness(self.points)
                else:
                    self.startNewGame()
                self.setGameEnd = False

            # console.log('GAME OVER= '+self.points)

        elif not found and not self.gamestate == 'PLAYING':
            logger.info('163 trying to start')
            self.gamestate = 'PLAYING'
            logger.info('164 trying to start')
             # Clear points
            self.points = 0
            self.lastScore = 0

            # Clear keys
            self.setGameOutput(0.5)

            # Clear sensors
            self.sensors[0].lastComputeSpeed = 0
            self.sensors[0].lastSpeeds = []
            self.sensors[0].lastValue = 1
            self.sensors[0].value = 1
            self.sensors[0].speed = 0
            self.sensors[0].size = 0

            # Clar Output flags
            self.lastOutputSet = 'NONE'

            # Trigger callback and clear
            if self.onGameStart:
                logger.info('185 on gamestart %s'%(self.onGameStart))
                self.onGameStart_callback()
                self.onGameStart = False
Esempio n. 22
0
 def moveCharToTheLeftOfFM(self):
     CharIsLeft = False
     pyautogui.keyDown('left')
     while not CharIsLeft:
         im = pyautogui.screenshot(region=(0, 0, 800, 600))
         left = im.getpixel((9, 138))
         if 212 < left[0] and left[0] < 224:
             if 200 < left[1] and left[1] < 208:
                 if 14 < left[2] and left[2] < 18:
                     CharIsLeft = True
     pyautogui.keyUp('left')
Esempio n. 23
0
    def save_screenshot(self, name=None):
        if name is None:
            name = '{test_name}.{counter}.png'.format(
                test_name=self.id(),
                counter=self.screenshot_counter
            )

        if not os.path.isdir(self.SCREENSHOT_DIR):
            os.mkdir(self.SCREENSHOT_DIR)

        absolute_path = os.path.join(
            self.SCREENSHOT_DIR,
            name,
        )

        pyautogui.screenshot(absolute_path)
        self.screenshot_counter += 1

        self.screenshots.append({
            'time': time.time(),
            'filename': absolute_path,
        })
def takecodeshot():
	pyautogui.click(1275,730)
	time.sleep(0.5)
	pyautogui.click(1020,360)
	time.sleep(0.5)
	im = pyautogui.screenshot("C:\\Users\\Sajjad\\Documents\\Instagram\\pytesser_v0.0.1\\image.png", region=(345,330, 155, 80))
	time.sleep(0.5)
	pyautogui.click(1275,730)
	time.sleep(0.5)
	pyautogui.click(250,230)
	time.sleep(0.5)
	
	os.chdir( 'C:\\Users\\Sajjad\\Documents\\Instagram\\pytesser_v0.0.1' )
	os.system("C:\\Users\\Sajjad\\Documents\\Instagram\\pytesser_v0.0.1\\fetchcode.py")
Esempio n. 25
0
def is_hand1(pos):
	reg = (pos[0], pos[1], 50, 20)
	active = False

	im1 = pyautogui.screenshot(region=(region[0]+acoff[0], region[1]+acoff[1], 50, 20))
	time.sleep(.1)

	im2 = pyautogui.screenshot(region=(region[0]+acoff[0], region[1]+acoff[1], 50, 20))
	time.sleep(.2)

	im3 = pyautogui.screenshot(region=(region[0]+acoff[0], region[1]+acoff[1], 50, 20))
	time.sleep(.3)

	if ImageChops.difference(im1, im2).getbbox() is not None:
		active = True

	if ImageChops.difference(im2, im3).getbbox() is not None:
		active = True

	if ImageChops.difference(im1, im3).getbbox() is not None:
		active = True

	return active
Esempio n. 26
0
def locateCenterOnScreen(needleImg):
    # Take a screenshot image of the desktop and save it
        img = pyautogui.screenshot( "desktop.png")
        # Allow image to be saved prior to continuing
        time.sleep(5)
        # Get the matching coordinates
        matchCoords = get_match_coordinates( needleImg, "desktop.png")
        # If there are no matching coordinates, return nothing
        if matchCoords == None:
            return None
        else:
            coords = locateCenter( matchCoords )
        
        return coords
Esempio n. 27
0
 def checkStoresLudi(self):
     x = 770
     y = 370
     while x > 20:
         storeOpen = False
         while not storeOpen:
             pyautogui.click(clicks=2, x=x, y=y)
             im = pyautogui.screenshot(region=(620, 300, 621, 301))
             color = im.getpixel((0, 0))
             storeOpen = all(i > j for i, j in zip(color, defines.white))
             x -= 100
         self.checkStore()
         print("move to next position")
     print("next row")
     x = 800
     y = 160
     while x > 20:
         storeOpen = False
         while not storeOpen:
             pyautogui.click(clicks=2, x=x, y=y)
             im = pyautogui.screenshot(region=(620, 300, 621, 301))
             color = im.getpixel((0, 0))
             storeOpen = all(i > j for i, j in zip(color, defines.white))
             x -= 100
         self.checkStore()
     print("next row")
     x = 800
     y = 60
     while x > 20:
         storeOpen = False
         while not storeOpen:
             pyautogui.click(clicks=2, x=x, y=y)
             im = pyautogui.screenshot(region=(620, 300, 621, 301))
             color = im.getpixel((0, 0))
             storeOpen = all(i > j for i, j in zip(color, defines.white))
             x -= 100
         self.checkStore()
Esempio n. 28
0
 def grab(self, delay=0, bbox=None):
   """Make a screenshot of the screen. This is the basis for methods like :func:`ClientElement.isvisible()`.
 
   :param delay: Seconds to wait before grabbing the screen.
   :type delay: int
   :param bbox: Bounding box
   :type bbox: :class:`BBox`
   :returns: Image of the screen region in ``bbox`` or the full screen.
   :rtype: :class:`numpy.ndarray`
   """
   for i in range(1, delay):
     print(str(i) + "..")
     time.sleep(1)
   rect = tuple(bbox[0:2]) + (bbox[2] - bbox[0] + 1, bbox[3] - bbox[1] + 1) if not bbox is None else None
   return gui.screenshot(region=rect)
    def find_minion(self, minion_name, threshold=0.93, modify=False):
        """
        Finds the location of a minion(s) through threshold detection. Some
        fine-tuning may be necessary if the function is unable to find the
        location of a minion (given that the minion exists on the board) given
        the current threshold value.

        NOTE: attack or health buffs can interfere with image detection.
        """
        p.click(self.left + 5, self.top + 5)    # ensure HS is active window
        p.screenshot('game.png')

        # Python 2.7
        table = string.maketrans("' .","---")
        minion_img = minion_name.translate(table).lower() + '.png'

        img_rgb = cv2.imread('game.png')
        img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
        template = cv2.imread(sys.path[-1] + minion_img,0)
        w, h = template.shape[::-1]

        res = cv2.matchTemplate(img_gray,template,cv2.TM_CCORR_NORMED)

        if minion_img not in self.threshold_dict or modify:
            self.threshold_dict[minion_img] = threshold
            self.threshold_file.close()
            f = open('threshold.txt','w')
            f.write(str(self.threshold_dict))
            
        loc = np.where( res >= self.threshold_dict[minion_img])

        for pt in zip(*loc[::-1]):
            cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
        cv2.imwrite('testpic.png',img_rgb)
        
        return loc
Esempio n. 30
0
def test_restore_missing(cartavisInstance, cleanSlate):
    """
    The purpose of this test is to test that if we try to restore preferences for 
    a widget that is not present there are no problems. 
    We verify an animator is present. We save the preferences. We remove the animator
    We verify that there is not a problem by checking error status is empty and window
    count remains the same with no animator.
    """
    # Verify that there is just one animator
    assert len(cartavisInstance.getAnimatorViews()) == 1

    # Save a snapshot. Make sure preferences are checked and 
    # layout and data are not checked
    s = cartavisInstance.newSnapshot('some_session_id','tSnapshotPreferences', False, True, False, '')
    s.save()

    # Locate the animator and bring up the right-context menu,
    # changing to a CasaImageLoader.
    animWindow = ImageUtil.locateCenterOnScreen('test_images/animWindow.png')
    pyautogui.rightClick( x=animWindow[0], y=animWindow[1])
    time.sleep(2)
    pyautogui.press('down')
    pyautogui.press('right')    
    pyautogui.press('return')
    time.sleep(2)

    # Verify that there are no animation windows 
    assert len(cartavisInstance.getAnimatorViews()) == 0

    # Restore the preferences
    s[0].restore()
    time.sleep(2)

    # No way to check for errors, hence, take a screenshot
    pyautogui.screenshot('layout_check/checkNoErrors.png')
    s[0].delete()
Esempio n. 31
0
import numpy as np
import pyautogui
import cv2

img = pyautogui.screenshot()  # x,y,w,h
#img.save('screenshot.png')

#target = cv2.imread("target.png")
target = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)

template = cv2.imread("isp.png")

theight, twidth = template.shape[:2]

result = cv2.matchTemplate(target, template, cv2.TM_SQDIFF_NORMED)

cv2.normalize(result, result, 0, 1, cv2.NORM_MINMAX, -1)

min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)

strmin_val = str(min_val)

click_x = int(min_loc[0] + (twidth / 2))
click_y = int(min_loc[1] + (theight / 2))

pyautogui.doubleClick(click_x, click_y, button='left')

#pyautogui.moveTo(click_x, click_y, duration=0.25)

#cv2.rectangle(target,min_loc,(min_loc[0]+twidth,min_loc[1]+theight),(0,0,225),2)
Esempio n. 32
0
import imutils
import pyautogui
import cv2

while (1):
    pyautogui.pixel(965, 442)
    pyautogui.screenshot("screenshot.png")
    image = cv2.imread("screenshot.png")
    cv2.imshow("Screenshot", image)
    if cv2.waitKey(25) & 0xFF == ord('q'):
        cv2.destroyAllWindows()
Esempio n. 33
0
import pyautogui
import time

im = pyautogui.screenshot().save('teste.png')

Esempio n. 34
0
def screenshot():
    img = pyautogui.screenshot()
    img.save("C:/Users/hp/Pictures/Screenshots")
Esempio n. 35
0
state_left = win32api.GetKeyState(0x01)  # Left button down = 0 or 1. Button up = -127 or -128
state_right = win32api.GetKeyState(0x02)  # Right button down = 0 or 1. Button up = -127 or -128

large_movement_picture = None
movement_start_time = None
previous_position = pyautogui.position()
window_bounds = GetWindowRect( WindowFromPoint( (0, 0) ) )
window_bounds_text = '[' + ','.join(str(x) for x in window_bounds) + ']'

while True:
    a = win32api.GetKeyState(0x01)
    b = win32api.GetKeyState(0x02)
    position = pyautogui.position()
    if a != state_left:  # Button state changed
        state_left = a
        pic = pyautogui.screenshot()
		
        if a < 0:
		    # Keep the window bounds only when holding down the mouse button, because the windows size can change based on releasing the mouse button
            window_bounds = GetWindowRect( WindowFromPoint( position ) )
            window_bounds_text = '[' + ','.join(str(x) for x in window_bounds) + ']'
            pic.save('data/raw/' + str( int( time.time() * 100 ) ) + '--(' + str(position[0]) + '-' + str(position[1]) + ')--' + window_bounds_text + '--mousedown.png')		
            print('Saving mousedown screenshot')
        else:
            pic.save('data/raw/' + str( int( time.time() * 100 ) ) + '--(' + str(position[0]) + '-' + str(position[1]) + ')--' + window_bounds_text + '--mouseup.png')
            print( "Saving mouseup screenshot" )
		
        if large_movement_picture is not None:
            large_movement_picture.save('data/raw/' + str( int( movement_start_time * 100 ) ) + '--(' + str(position[0]) + '-' + str(position[1]) + ')--' + window_bounds_text + '--mousemove.png')
            print( "Saving mousemovement screenshot" )
            large_movement_picture = None
Esempio n. 36
0
    print("--", _id, _ntn)
    if _ntn.find("-") > 0:
        driver.find_element_by_xpath("//select[@id='ctl00_ContentPlaceHolder1_DDLS0004001']/option[text()='NTN']").click()
    else:
        driver.find_element_by_xpath("//select[@id='ctl00_ContentPlaceHolder1_DDLS0004001']/option[text()='CNIC']").click()

    driver.find_element_by_id('ctl00_ContentPlaceHolder1_TXTS1003002').clear()
    time.sleep(1)

    driver.find_element_by_xpath("//input[@id='ctl00_ContentPlaceHolder1_TXTS1003002']").send_keys(_ntn)
    time.sleep(1)

    kk = 0
    captcha_error = False
    while True:
        pyautogui.screenshot(screenshot_file, region=(33, 945, 247, 51))
        time.sleep(1)

        captcha_result = get_captcha_text()
        if captcha_result == "":
            kk = kk + 1
            if kk > 3:
                captcha_error = True
                break
            driver.find_element_by_xpath("//input[@id='ctl00_ContentPlaceHolder1_imgReload']").click()
            time.sleep(1)
            continue
        else:
            captcha_result = captcha_result.upper().replace(" ", "", 10)
            driver.find_element_by_id('ctl00_ContentPlaceHolder1_txtCapcha').clear()
            time.sleep(1)
def screenshot():
    img = pyautogui.screenshot()
    img.save("D:\\College\\OTHERS\\Udemy\\AI_Jarvis using Python\\s.png")
#CONTAGEM ME SEGUNDOS
contador = 10

#INICIANDO O LOOP DO SCRIPT.
while True:
    now = datetime.datetime.now()
    now_two_params = str(now).split(" ")
    date = str(now_two_params[0])
    raw_time = str(now_two_params[1]).split(".")
    time_raw = str(raw_time[0])
    time_clean = time_raw.replace(':', '')

    try:
        print("Tirando uma Captura de Tela...")
        screenshot = pyautogui.screenshot(
            path + screenshotName + date + "_" + time_clean + ".png",
            region=(x_start_point, y_start_point, x_area, y_area))
    except Exception as e:
        print(e)

    print("Captura de Tela OK! ")
    files_path = os.path.join(path, '*')
    files = sorted(glob.iglob(files_path), key=os.path.getatime, reverse=True)

    last_screenshot = files[0]
    print("Ultima Captura de Tela: \n" + str(last_screenshot))

    # #######################################################################33

    #E-MAIL DO REMETENTE.
    email_user = '******'
Esempio n. 39
0
import pyautogui, time, os
from ftplib import FTP

date = time.strftime('%X')

# Take screenshot
pic = pyautogui.screenshot()

# Save the image
file = pic.save('Screenshot.png')

ftp = FTP('10.101.200.40')
ftp.login("quentin", "deborde")

fileName = "Screenshot.png"
file = open(fileName, 'rb')
ftp.storbinary('STOR ' + date + "_" + fileName, file)
file.close()
os.remove(fileName)
Esempio n. 40
0
def take_screenshot():
    """ Takes screenshot of the game without the bottom and top button bars """
    img = pyautogui.screenshot(region=SCREENSHOT_REGION)
    if DEBUG:
        img.save('polybridge.png')
    return img
Esempio n. 41
0
zone = psutil.boot_time()
time = datetime.fromtimestamp(zone)
cpu = psutil.cpu_freq()
os.getcwd()
try:
    os.chdir(r"Direct_save ")
except OSError:

    @bot.message_handler(commands=['start'])
    def start_message(message):
        bot.send_message(message.chat.id, "[Error]: Location not found!")
        bot.stop_polling()

    bot.polling()
    raise SystemExit
screen = pyautogui.screenshot("screenshot.jpg")
ends = datetime.now()
workspeed = format(ends - start)
os.getcwd()
try:
    os.chdir("Direct_save ")
except OSError:

    @bot.message_handler(commands=['start'])
    def start_message(message):
        bot.send_message(message.chat.id, "[Error]: Location not found!")
        bot.stop_polling()

    bot.polling()
    raise SystemExit
file = open("info.txt", "w")
Esempio n. 42
0
def action(character, ml):
    time.sleep(3.5)
    auto.screenshot("img.jpg")
    switch_action(ml.predict(), character)
import numpy as np
import cv2
import pyautogui
image = pyautogui.screenshot()
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
# writing it to the disk using opencv
cv2.imwrite("imaget.png", image)
Esempio n. 44
0
import time

import pyautogui

# Sleep for 5 seconds to allow me to open book
time.sleep(5)
# Range can be changed depending on the number of pages
for i in range(1000):
    # Turn page
    pyautogui.keyDown('right')
    pyautogui.keyUp('right')
    # Take and save a screenshot
    pyautogui.screenshot('images/page_%d.pdf' % i)
    time.sleep(0.05)
Esempio n. 45
0
def capture(sPoint, rel_x, rel_y):

    im3 = pyautogui.screenshot(str(imgName) + '.jpg',
                               region=(sPoint[0], sPoint[1], rel_x, rel_y))
    print(str(imgName) + "페이지 저장 완료")
Esempio n. 46
0
from find_ball import find_ball
from find_targeting_line import find_targeting_line

# change depending on game screen size
game_res = [1152, 720]
border = [0, 20]

# threshold = 0.95

try:
    while True:
        img = cv2.cvtColor(
            np.array(
                pyautogui.screenshot('screen.png',
                                     region=(border[0], border[1],
                                             game_res[0] + border[0],
                                             game_res[1] + border[1]))),
            cv2.COLOR_BGR2GRAY)

        # cut out the bottom middle third of the screen as ROI for the ball
        ball_roi = img[int(img.shape[0] / 2):img.shape[0],
                       int(img.shape[1] * 1 / 3):int(img.shape[1] * 2 / 3)]

        # insert the ball ROI back into the full image
        img[int(img.shape[0] / 2):img.shape[0],
            int(img.shape[1] * 1 / 3):int(img.shape[1] * 2 /
                                          3)] = find_ball(ball_roi)

        # cut out the bottom middle third of the screen as ROI for the targeting line
        targeting_line_roi = img[0:img.shape[0],
                                 int(img.shape[1] * 1 / 3):int(img.shape[1] *
Esempio n. 47
0
 def screenshot(self, name="刚打开"):
     pyautogui.screenshot().save("./runing_img/%s.jpg" % name)
     self.now_screenshot = cv2.imdecode(
         numpy.fromfile("./runing_img/%s.jpg" % name, dtype=numpy.uint8),
         -1)
Esempio n. 48
0
def rodSwitch():
  inventoryRegion = pyautogui.screenshot(region)
Esempio n. 49
0
import pyautogui

im1 = pyautogui.screenshot()
im1 = save(r"c:\path\to\my\screenshot.png

#also import necessary modules XD
import numpy as np
import pyautogui
import imutils
import cv2

LoopAllow = True

while LoopAllow:
    image = pyautogui.screenshot(region=(49, 297, 1, 1))
    image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
    pixel = image[0, 0]
    if pixel[1] == 219:
        pyautogui.click(200, 300)
Esempio n. 51
0
import pyautogui

myScreenshot = pyautogui.screenshot()
myScreenshot.save(r'D:\screenshot_1.png')

# 取得時間
from datetime import datetime
dt = datetime.now().strftime("%m-%d %H%M")
print(dt)

myScreenshot = pyautogui.screenshot()
myScreenshot.save('D:\\' + dt + ' screenshot_1.png')
Esempio n. 52
0
import pyautogui

myscreenshot = pyautogui.screenshot()
myscreenshot.save("Enter your location")
Esempio n. 53
0
def __region_capture(region, name='testarea'):
    img = pyautogui.screenshot(region=region)
    img.save(name + '.jpg')
Esempio n. 54
0
print("3...")


PressKey(0x2b)
time.sleep(1)
ReleaseKey(0x2b)
time.sleep(1)


t=0
print("start")
while(t<300):

    t+=1
    screenshot = pyautogui.screenshot(region=(x, y, w, h))
    
    xtrack = 0

    for ytrack in range(60,h - 100,2):
        r,g,b=screenshot.getpixel((w_2,y))

        if ((ytrack< 80) and (r > 30)):
            ReleaseKey(0x1f)
            PressKey(0x11)
          
        if ((ytrack < 80) and (r < 30)):   
            ReleaseKey(0x11)
            PressKey(0x1f)
            print(r)
def screenshot():
    img = pg.screenshot()
    img.save("my_screenshot.png")
Esempio n. 56
0
def screenshot(params):
    pyautogui.screenshot(params[0])
Esempio n. 57
0
                speak('i am done')
            else:
                pass

        elif 'do you remember anything' in query:
            remember = open('data.txt', 'r')
            t = remember.read()
            speak(f"you said me to remember that {t}")
            remember.close()

        elif "screenshot" in query or "take screenshot" in query:  #Working fine
            speak('please , tell me the name you want for the screenshot file')
            name = takecommand().lower()
            speak('hold on , taking screenshot')
            time.sleep(3)
            img = pyautogui.screenshot()
            img.save(f"{name}.png")
            speak('screenshot is saved in our main folder , please check')

        elif "bones" in query:
            speak('206 , that was easy')

        elif "open command prompt" in query or "open CMD" in query:
            os.system("start cmd")

        elif "open camera" in query or "open webcam" in query:
            cap = cv2.VideoCapture(0)  # 0 for internal Camera
            while True:
                ret, img = cap.read()
                cv2.imshow('webcam', img)
                k = cv2.waitKey(50)
Esempio n. 58
0
response = requests.get(
    'https://www.ge-tracker.com/api/items/377',
    headers={
        'Authorization':
        'Bearer eb7ac7f38d3767f0ae8f1128be4429cf72327b8359a0719468500b20da6ad7e7',
        'Accept': 'application/x.getracker.v1+json',
    })
response_json = response.json()
#print(response_json['data']) #The entire json dictionary
print("Item Name:", response_json['data']['name'])
print("Buy Limit:", response_json['data']['buyLimit'])
print("Offer Price:", response_json['data']['selling'])
print("Sell Price:", response_json['data']['buying'])
print("Profit Per Sell",
      response_json['data']['buying'] - response_json['data']['selling'])
print("Updated At:", response_json['data']['updatedAt'])

checkMoney()
#checkLastTrans()

#collectItems()
checkMargin(response_json['data']['name'])

location = pyautogui.locateCenterOnScreen('historyUpArrow.png')
lastTrans = pyautogui.screenshot(region=(location[0] - 155, location[1] + 1.5,
                                         140, 15))
lastTrans.save(r"C:\Users\Nirav\Desktop\code\rsBot\lastTrans.png")
text = pytesseract.image_to_string(Image.open('lastTrans.png'))
result = [int(i) for i in text.split() if i.isdigit()]
print(text)
Esempio n. 59
0
print(f'd3dshot: ')
d = d3dshot.create()
start = time.time()
for i in range(100):
    d.screenshot()
end = time.time()

print(f'took 100 screenshots in {end-start} seconds.')
print(f'average of {100/(end-start)}/s.')

print()

print(f'pyautogui:')
start = time.time()
for i in range(100):
    pyautogui.screenshot()
end = time.time()

print(f'took 100 screenshots in {end-start} seconds.')
print(f'average of {100/(end-start)}/s.')

print()

print(f'mss:')
start = time.time()
with mss() as sct:
    for i in range(100):
        sct.shot()
end = time.time()

print(f'took 100 screenshots in {end-start} seconds.')
Esempio n. 60
0
def screenshot():
    name = int(round(time.time() * 1000))
    name = '{}.png'.format(name)
    time.sleep(5)
    img = pyautogui.screenshot(name)
    img.show()