def wide_screengrab(): config.wide_scan = False if config.preset == "aspen": for stuff in config.aspen_wide_box: im = ImageGrab.grab(stuff) config.current_pixel = str(np.asarray(im)) if config.current_pixel in config.aspen: config.test = stuff print(config.test) config.wide_scan = True print('It matches!') break elif config.current_pixel not in config.aspen: print("It doesnt match") else: print("error") elif config.preset == "acacia": for stuff in config.acacia_wide_box_down: im = ImageGrab.grab(stuff) config.current_pixel = str(np.asarray(im)) if config.current_pixel in config.acacia: config.test = stuff print(config.test) config.wide_scan = True print('It matches!') break elif config.current_pixel not in config.aspen: print(config.current_pixel) print("It doesnt match") else: print("error")
def get_current_window(self): # Get a screenshot of the full screen im = ImageGrab.grab() filename = os.getcwd() + '\\full_screen.png' im.save(filename, 'PNG') full_screen = cv2.imread(filename) # load the top left image and the bottom right image top_left_image = cv2.imread(os.getcwd() + '\\templates\\top-left.png') bottom_right_image = cv2.imread(os.getcwd() + '\\templates\\bottom-right.png') result = cv2.matchTemplate(full_screen, top_left_image, METHOD) top_left_location = cv2.minMaxLoc(result)[2] result = cv2.matchTemplate(full_screen, bottom_right_image, METHOD) bottom_right_image_location = cv2.minMaxLoc(result)[2] # See what we found self.BOX_LEFT = top_left_location[0] - LEFT_PADDING self.BOX_TOP = top_left_location[1] - TOP_PADDING self.BOX_RIGHT = bottom_right_image_location[0] + RIGHT_PADDING self.BOX_BOTTOM = bottom_right_image_location[1] + BOTTOM_PADDING box = ( self.BOX_LEFT, self.BOX_TOP, self.BOX_RIGHT, self.BOX_BOTTOM ) im = ImageGrab.grab(box) im.save(WINDOW_IMAGE_LOCATION, 'PNG') self.update_window()
def inspect(self): pointer = self.cursor_location bbox = (pointer.x, pointer.y, pointer.x+1, pointer.y+1) ss = ImageGrab.grab(bbox) pix = ss.load() print "At %s, color is %r" % (pointer, pix[0,0]) if self.main_bbox: # Find the relative point of this. relative = self.to_relative(pointer) reversed = self.to_absolute(relative) print " Relative: %s Reversed: %s" % (relative, reversed) prev = self.prev_inspect if prev: pss = ImageGrab.grab((prev.x, prev.y, prev.x+1, prev.y+1)) ppix = pss.load() print "Previous spot %s is now %r" % (self.prev_inspect, ppix[0,0]) if self.main_bbox: # Find the relative point of this. relative = self.to_relative(prev) reversed = self.to_absolute(relative) print " Relative: %s Reversed: %s" % (relative, reversed) self.prev_inspect = pointer
def chkimg(imgname,check = 5,hwnd = None): global g_img_x,g_img_y #比較に使う画像のロード comimg = cv2.imread(imgname,0) #キャプチャ if hwnd != None: rect = winxpgui.GetWindowRect(hwnd) size = winxpgui.GetClientRect(hwnd) cap = [rect[0],rect[1],size[2]+rect[0],size[3]+rect[1]] baseimg = ImageGrab.grab(cap) else: baseimg = ImageGrab.grab() #画像をグレースケールに変換 npimg = np.asarray(baseimg) capimg = cv2.cvtColor(npimg,cv2.COLOR_BGR2GRAY) #座標検索 retXY = compareImg(comimg,capimg,check) #見つかった場合座標を返す #なかった場合-1,-1を返す if(retXY != None): g_img_x = retXY[0] g_img_y = retXY[1] return(True) else: return(False)
def producer(out_q): shots=200 time.sleep(3) #info('producer line') with open("tmp.bin","wb") as w: print "start gather" global original original=(640,359) printscreen_pil=ImageGrab.grab().thumbnail(original, Image.ANTIALIAS) last=time.time(); for i in xrange(frames): difference=(time.time()-last) while difference<0.08: difference =(time.time()-last) time.sleep(0.01) last=time.time() print difference printscreen_pil=ImageGrab.grab() printscreen_pil.thumbnail(original, Image.LINEAR) image_data = bytearray([z for l in printscreen_pil.getdata() for z in l ]) w.write(image_data) shots-=1 #print shots print "done gather"
def OnLeftUp(self, evt): self.endX = evt.X self.endY = evt.Y if self.dlg: self.dlg.Destroy() self.dlg = None if self.beginX != self.endX and self.beginY != self.endY: im = None if self.endX < self.beginX: if self.endY < self.beginY: im = ImageGrab.grab((self.endX, self.endY, self.beginX, self.beginY)) else: im = ImageGrab.grab((self.endX, self.beginY, self.beginX, self.endY)) else: if self.endY < self.beginY: im = ImageGrab.grab((self.beginX, self.endY, self.endX, self.beginY)) else: im = ImageGrab.grab((self.beginX, self.beginY, self.endX, self.endY)) self.dlg = SafeSaveFileDialog(self) if self.dlg.ShowModal() == wx.ID_OK : self.dlg.Save(im) else: self.dlg.Destroy() self.dlg = None
def questing_fight(self): self.print_log("start fighting") sequence = 0 img = ImageGrab.grab() while not (image_manager.is_in_quest(img) or self.grinding.check_popup_close(img)): keyboard_send("0") #~ time.sleep(0.001) if sequence%4==0: keyboard_send("K") sequence += 1 if (sequence>15): keyboard_send(KEYCODE_SPACE) if (sequence>20): sequence = 0 if not image_manager.is_in_fighting(img): if image_manager.is_in_quest_complete(img): self.print_log("quest complete") break if self.grinding.check_popup_close(img): self.print_log("popup woi!") break img = ImageGrab.grab() self.print_log("done_fighting") time.sleep(3)
def chkimg(imgname, check=5, hwnd=None): global g_img_x, g_img_y #比較に使う画像のロード comimg = cv2.imread(imgname, 0) #キャプチャ if hwnd != None: rect = winxpgui.GetWindowRect(hwnd) size = winxpgui.GetClientRect(hwnd) cap = [rect[0], rect[1], size[2] + rect[0], size[3] + rect[1]] baseimg = ImageGrab.grab(cap) else: baseimg = ImageGrab.grab() #画像をグレースケールに変換 npimg = np.asarray(baseimg) capimg = cv2.cvtColor(npimg, cv2.COLOR_BGR2GRAY) #座標検索 retXY = compareImg(comimg, capimg, check) #見つかった場合座標を返す #なかった場合-1,-1を返す if (retXY != None): g_img_x = retXY[0] g_img_y = retXY[1] return (True) else: return (False)
def hit(): l, t = L + 21, T + 335 r, b = l + 50, t + 50 # ImageGrab.grab((l, t, r, b)).save('hit_t.png') hash_val1 = get_hash(ImageGrab.grab((l, t, r, b))) hash_val2 = get_hash(Image.open('hit.png')) dis = hamming_dist(hash_val1, hash_val2) if dis < 50: print 'hit', dis # return True autopy.mouse.move(l + 25, t + 25) autopy.mouse.click() time.sleep(0.5) l1, t1 = L + 342, T + 343 r1, b1 = l1 + 53, t1 + 43 hash_val11 = get_hash(ImageGrab.grab((l1, t1, r1, b1))) hash_val21 = get_hash(Image.open('can_hit.png')) dis1 = hamming_dist(hash_val11, hash_val21) # ImageGrab.grab((l1, t1, r1, b1)).save('can_hit_t.png') print 'can_hit', dis1 if dis1 < 50: # judge if can hit autopy.mouse.move(L + 280, T + 650) # click pet for i in xrange(13): autopy.mouse.click() time.sleep(1.0) autopy.mouse.move(L + 280, T + 504) # click gold autopy.mouse.click() time.sleep(0.2) return 2 return 1 return 0
def playGame(): global game_over # Click on play game on start screen mousePos(play_game) leftClick() # Grab the first block # Little tricky because right after screen transition time.sleep(1.2) im = ImageGrab.grab(box_cur_block) color = im.getpixel(cur_block) cnt = 0 tot = 0 while True: if game_over: break time.sleep(.3) # required, for keeping the game in sync prev_color = color if color in colors: piece = colors[color] tot += 1 print "Saw a " + piece else: print "Unknown color!" break # Grab the next block im = ImageGrab.grab(box_next_block) color = im.getpixel(next_block) # HACK - checking for game over # if the next block has not changed for five times straight # the game is already over if color == prev_color: cnt += 1 if cnt > 5: game_over = True else: cnt = 0 si,sc,lr = pos(piece) print si,sc,lr guideBlock(piece, si, sc) updateGrid(piece,si,sc,lr)
def _GrabWindowImage(self, WindowTitle, Text, fullFilePath): pos_x = self._AutoIt.WinGetPosX(WindowTitle, Text) pos_y = self._AutoIt.WinGetPosY(WindowTitle, Text) width = self._AutoIt.WinGetPosWidth(WindowTitle, Text) height = self._AutoIt.WinGetPosHeight(WindowTitle, Text) GrabbedImage = ImageGrab.grab((pos_x, pos_y, width, height)) # store screenshot as "RGB" Image GrabbedImage.save(fullFilePath) # PIL evaluates extension
def picmath(box, filepath): fullsc = ImageGrab.grab() savepic = fullsc.crop(box).save(filepath, 'JPEG') pic = open(filepath, 'rb') md5 = hashlib.md5(pic.read()).hexdigest().upper() pic.close() return md5
def grab(): box = (x_pad + 1,y_pad+1,x_pad+640,y_pad+480) im = ImageOps.grayscale(ImageGrab.grab(box)) a = array(im.getcolors()) a = a.sum() print a return a
def grab(): box = (X_PAD + 1, Y_PAD +1,X_PAD + 640, Y_PAD + 480) im = ImageOps.grayscale(ImageGrab.grab(box)) a = array(im.getcolors()) a = a.sum() print a return a
def getSeatFive(): im = ImageOps.grayscale(ImageGrab.grab ((xPad+429,yPad+60,xPad+492,yPad+76))) a = array(im.getcolors()) im.save(os.getcwd() + '\\SeatFiveBubble.png', 'PNG') print 'table 5 bubble = %d' % a.sum() return a.sum()
def getSeatTwo(): im = ImageOps.grayscale(ImageGrab.grab ((xPad+126,yPad+60,xPad+189,yPad+76))) im.save(os.getcwd() + '\\SeatTwoBubble.png', 'PNG') a = array(im.getcolors()) print 'table 2 bubble = %d' % a.sum() return a.sum()
def capture_image(self, savefilename): screensize = self.get_screen_size() # The cropbox will take care of making sure our image is within # screen boundaries. cropbox = CropBox(topleft=Point(0, 0), bottomright=screensize, min=Point(0, 0), max=screensize) self.logger.debug(cropbox) if os.name == "posix": screengrab = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, screensize.x, screensize.y) screengrab.get_from_drawable( gtk.gdk.get_default_root_window(), gtk.gdk.colormap_get_system(), 0, 0, 0, 0, screensize.x, screensize.y ) save_options_dict = {} img_format = self.subsettings["General"]["Screenshot Image Format"] img_format.lower() img_quality = to_unicode(self.subsettings["General"]["Screenshot Image Quality"]) if img_format in IMG_SET: self.subsettings["General"]["Screenshot Image Format"] = "jpeg" save_options_dict = {"quality": img_quality} screengrab.save(savefilename, self.subsettings["General"]["Screenshot Image Format"], save_options_dict) if os.name == "nt": image_data = ImageGrab.grab( (cropbox.topleft.x, cropbox.topleft.y, cropbox.bottomright.x, cropbox.bottomright.y) ) image_data.save(savefilename, quality=self.subsettings["General"]["Screenshot Image Quality"])
def Grab_Screenshot(): while 1: # Will keep taking screenshots until proccess is closed # ----------------- Take Screen Shot ---------------- # screenshot_name = 'Screenshot@[' + win32api.GetComputerName( ) + ']@[' + strftime("(%a %d %b %Y) (%H %M %S %p)") + '].jpg' screenshot = ImageGrab.grab().save(screenshot_name, 'JPEG') # --------------------------------------------------- # # ---------------- Connect to the Server ------------ # server = smtplib.SMTP('smtp.gmail.com:587') server.starttls() server.login(sender, password) # --------------------------------------------------- # # -------------- Send the Screenshot ---------------- # screenshot_data = open(screenshot_name, 'rb').read() screenshot_msg = MIMEMultipart(_subtype='related') screenshot_image = MIMEImage(screenshot_data, 'jpeg') screenshot_msg.attach(screenshot_image) screenshot_msg['Subject'] = screenshot_name screenshot_msg['From'] = sender screenshot_msg['To'] = To server.sendmail(sender, [To], screenshot_msg.as_string()) os.remove(screenshot_name) server.quit() time.sleep(120)
def check_target(self): stack = {} hashC1 = self.get_hash(Image.open("C1.png")) hashC2 = self.get_hash(Image.open("C2.png")) hashC3 = self.get_hash(Image.open("C3.png")) hashC4 = self.get_hash(Image.open("C4.png")) for x in range(0,20,1): target_screen = ImageGrab.grab((725,237+x*20,781,254+x*20)) hashScreen = get_hash(target_screen) distC1 = hamming_dist(hashScreen, hashC1) distC2 = hamming_dist(hashScreen, hashC2) distC3 = hamming_dist(hashScreen, hashC3) distC4 = hamming_dist(hashScreen, hashC4) if distC1<10: stack.append[x:1] elif distC2<10: stack.append[x:2] elif distC3<10: stack.append[x:3] elif distC4<10: stack.append[x:4] else: stack.append[x:0] if max(stack.items(), key=lambda x:x[1])[1]>=1: return max(stack.items(), key=lambda x:x[1])[0]
def updateGrid(self): im = ImageGrab.grab(self.gridRect) self.im = im for i in range(0, self.numRows): for j in range(0, self.numCols): self.grid[i][j] = self.cellAt((i, j))
def getSeatSix(): im = ImageOps.grayscale(ImageGrab.grab ((xPad+530,yPad+60,xPad+593,yPad+76))) a = array(im.getcolors()) im.save(os.getcwd() + '\\SeatSixBubble.png', 'PNG') print 'table 6 bubble = %d' % a.sum() return a.sum()
def capture_image(self, event): screensize = self.get_screen_size() # The cropbox will take care of making sure our image is within # screen boundaries. #AF UPDATED get full screen on mouse click cropbox = CropBox(topleft=Point(0,0), bottomright=screensize, min=Point(0,0), max=screensize) #cropbox.reposition(Point(event.Position[0], event.Position[1])) self.logger.debug(cropbox) if os.name == 'posix': AllPlanes = 0xFFFFFFFF try: # cropbox.topleft.x, cropbox.topleft.y, # cropbox.size.x, cropbox.size.y, self.savefilename raw = self.rootwin.get_image(cropbox.topleft.x, cropbox.topleft.y, cropbox.size.x, cropbox.size.y, X.ZPixmap, AllPlanes) image_data = Image.frombytes("RGBX", (cropbox.size.x, cropbox.size.y), raw.data, "raw", "BGRX").convert("RGB") #ADDED SECTION - AF #GET ACTUAL SCREEN COORDS OF MOUSE CLICK m_x = event.Position[0] m_y = event.Position[1] for i in range(m_x-5,m_x+5): for j in range(m_y-5,m_y+5): image_data.putpixel((i,j),(255,0,0)) #END ADDED SECTION - AF return image_data except error.BadDrawable: print "bad drawable when attempting to get an image! Closed the window?" except error.BadMatch: print "bad match when attempting to get an image! probably specified an area outside the window (too big?)" except error.BadValue: print "getimage: bad value error - tell me about this one, I've not managed to make it happen yet" except: print self.logger.debug('Error in getimage.', exc_info = True) if os.name == 'nt': image_data = ImageGrab.grab( (cropbox.topleft.x, cropbox.topleft.y, cropbox.bottomright.x, cropbox.bottomright.y)) print "putting pixel in" m_x = event.Position[0] m_y = event.Position[1] for i in range(m_x - 5, m_x + 5): for j in range(m_y - 5, m_y + 5): image_data.putpixel((i, j), (255, 0, 0)) return image_data
def find_screen(debug=False): """Function used when starting to find the offset of the game window. It tries to identify the green checkerboard at the top-left of the title screen.""" im = ImageGrab.grab() pix = im.load() (w, h) = im.size pos = None for x in xrange(0, w - 200, 5): if pos: break for y in xrange(0, h - 50, 5): if is_green_check(pix, x, y): pos = slide_top_left(pix, x, y, 5, 5) break if debug: (x0, y0) = pos print("Origin at %d,%d" % pos) for x in xrange(x0 - 10, x0 + 11): pix[x, y0] = RED for y in xrange(y0 - 10, y0 + 11): pix[x0, y] = RED with open("find_squares.png", "wb") as outfile: im.save(outfile) return pos
def as_image_simple(self): """ This uses ImageGrab to return a screenshot, but it only works when the image is visible onscreen. @return: Image.Image """ return ImageGrab.grab(self.bounds)
def run(self): log = logging.getLogger("Screenshots.Run") # If PIL is not installed, I abort execution. This is done in order to # not have PIL as a forced dependency. if not IS_PIL: return False img_counter = 0 img_last = None log.info("Started taking screenshots.") while self._do_run: img_current = ImageGrab.grab() if img_last: if self._equal(img_last, img_current): time.sleep(SHOT_DELAY) continue img_counter += 1 save_at = os.path.join(self.save_path, "shot_%s.jpg" % img_counter) img_current.save(save_at) log.debug("Screenshot saved at \"%s\"." % save_at) img_last = img_current time.sleep(SHOT_DELAY) return True
def screenGrab(): box2 = (x_pad+1,y_pad+1,x_pad+522,y_pad+20) #im = ImageGrab.grab(box) #im.save(os.getcwd() + '\\full_snap__' + str(int(time.time())) '.png', 'PNG') im2 = ImageGrab.grab(box2) im2.save(os.getcwd() + '\\last_test.png', 'PNG') return im2
def screenGrab(self): print("waiting...") #time.sleep(.5) self.im = ImageGrab.grab(self.box) print("image taken") self.im_name = os.getcwd() + '\\screen shots\\full_snap__' + str( datetime.now().strftime("%d%m%y_%H%M%S")) + '.png'
def GetScreenImage(self, FilePath): """ Capture a full screen image into the given _FilePath_. The given _FilePath_ must be relative to Robot Framework output directory, otherwise the embedded image will not be shown in the log file. """ # # Check that PIL is installed # if ImageGrab == None: raise RuntimeError( "Python Imaging Library (PIL) is not installed, but is required for GetScreenImage" ) # # Check for a valid FilePath and make sure the directories exist # if FilePath and os.path.isabs(FilePath): raise RuntimeError( "Given FilePath='%s' must be relative to Robot outpudir" % FilePath) fullFilePath = os.path.join(self._OutputDir, FilePath) if not os.path.exists(os.path.split(fullFilePath)[0]): os.makedirs(os.path.split(fullFilePath)[0]) self._info("GetScreenImage(FilePath=%s)" % fullFilePath) # # Capture and save the screen image of the whole screen # GrabbedImage = ImageGrab.grab() # store screenshot as "RGB" Image GrabbedImage.save(fullFilePath) # PIL evaluates extension # # Embed the screenshot in the Robot Framework log file # self._html('<td></td></tr><tr><td colspan="3"><a href="%s">' '<img src="%s" width="700px"></a></td></tr>' % (FilePath, FilePath))
def getSeatFour(): im = ImageOps.grayscale(ImageGrab.grab ((xPad+328,yPad+60,xPad+391,yPad+76))) a = array(im.getcolors()) im.save(os.getcwd() + '\\SeatFourBubble.png', 'PNG') print 'table 4 bubble = %d' % a.sum() return a.sum()
def clickImage(im): screen = ImageGrab.grab() coords = findSubimage(im, screen) if not coords: print("Couldn't find image to click") return windowsBot.click(coords[0], coords[1])
def run(self): """ Main thread procedure. """ log = logging.getLogger("Screenshots.Run") # If PIL is not installed, I abort execution. This is done in order to # not have PIL as a forced dependency. if not IS_PIL: return False img_counter = 0 img_last = None log.info("Started taking screenshots.") while self._do_run: img_current = ImageGrab.grab() if img_last: if self._equal(img_last, img_current): time.sleep(SHOT_DELAY) continue img_counter += 1 save_at = os.path.join(self.save_path, "shot_%s.jpg" % img_counter) img_current.save(save_at) log.debug("Screenshot saved at \"%s\"." % save_at) img_last = img_current time.sleep(SHOT_DELAY) return True
def getcustomer(n): counter = ImageGrab.grab((x_pad+tables[n][0], y_pad+64, \ x_pad+tables[n][1], y_pad+65)).convert('L').tostring() for food, wish in order.iteritems(): if wish in counter: return food return None
def getSeatThree(): im = ImageOps.grayscale(ImageGrab.grab ((xPad+227,yPad+60,xPad+290,yPad+76))) a = array(im.getcolors()) ## im.save(os.getcwd() + '\\SeatThreeBubble.png', 'PNG') print 'table 3 bubble = %d' % a.sum() return a.sum()
def grabimage(self): print "Generating Image" self.image_num += 1 NewIm = ImageGrab.grab((0, 0, 800, 600)) NewIm.save("Images\\take-" + str(datetime.now())[0:-7].replace(':', '-') + ".png")
def findSharks(): bmp= ImageGrab.grab() #rage=np.array(bmp) #bmp.format='png' #plt.imshow(rage) bmp.save('rage.bmp') bmp.show()
def getSeatFour(): im = ImageOps.grayscale(ImageGrab.grab ((xPad+328,yPad+60,xPad+391,yPad+76))) a = array(im.getcolors()) ## im.save(os.getcwd() + '\\SeatFourBubble.png', 'PNG') print 'table 4 bubble = %d' % a.sum() return a.sum()
def screenshot_RD(): pool=[] screenshot=ImageGrab.grab() rescale=float(screenshot.size[1])/1080 aspect = 16*screenshot.size[1]/screenshot.size[0] if aspect not in hero_group_pixel_offsets_RD: tkMessageBox.showerror("Error","Aspect Ratio not supported") return for group in range(6): for hero_number_within_group in range(len(hero_group_table[group])): blockX,blockY=hero_group_pixel_offsets_RD[aspect][group] offset = 64 if aspect==12 else 73 # The 4:3 icons are smaller image=screenshot.crop((int((blockX+offset*(hero_number_within_group%7)+2)*rescale), int((blockY+offset*(hero_number_within_group/7))*rescale),\ int((blockX+offset*(hero_number_within_group%7)+51)*rescale), int((blockY+offset*(hero_number_within_group/7)+51)*rescale))) #image.show() brightness=max(ImageStat.Stat(image).mean) hero_id = hero_group_table[group][hero_number_within_group] if heroes[hero_id]=="Spectre": brightness+=25 # Spectre is super dark elif heroes[hero_id]=="Puck" or heroes[hero_id]=="Io": # Puck and Io are very bright brightness-=10 if brightness>45: pool.append(hero_id) if brightness>35 and brightness<55: print heroes[hero_id],brightness if len(pool)>24 or len(pool)<14: tkMessageBox.showwarning("Warning","Warning! Number of heroes detected ("+str(len(pool))+") is wrong!") return pool
def eventFinder(): box = (200,500,884,750) im = ImageGrab.grab(box) if specialChecker(im): print 'Special spotted!\n' p = Process(target= multiClick()) p.start() p.join() if bubbleCheck(im): print "A bubble has appeared!\n" leftClick() ## if specialChecker(im): ## print 'Special spotted!\n' ## for i in range(15): ## leftClick() if fuzzCheck(im): leftClick() if specialChecker(im): print 'Special spotted!\n' if spinCheck(im): if isSpinning(): launcher() print 'searching...'
def screenGrab(): #This grabs a screenshot and saves in the current working directory as a '.png' file. box = (X_PAD + 1, Y_PAD +1,X_PAD + 640, Y_PAD + 480) im = ImageGrab.grab() #im.save(CWD + '\\ful_snap__' + str(int(time.time())) + '.png', 'PNG') #(directory, (name, timestamp), fileformat) return im
def canShop(): im = ImageGrab.grab() expectedVal = (242,227,110) inVal = im.getpixel((695,385)) if expectedVal == inVal: return True
def GetScreenImage(self, FilePath) : """ Capture a full screen image into the given _FilePath_. The given _FilePath_ must be relative to Robot Framework output directory, otherwise the embedded image will not be shown in the log file. """ # # Check that PIL is installed # if ImageGrab == None : raise RuntimeError("Python Imaging Library (PIL) is not installed, but is required for GetScreenImage") # # Check for a valid FilePath and make sure the directories exist # if FilePath and os.path.isabs(FilePath): raise RuntimeError("Given FilePath='%s' must be relative to Robot outpudir" % FilePath) fullFilePath = os.path.join(self._OutputDir, FilePath) if not os.path.exists(os.path.split(fullFilePath)[0]): os.makedirs(os.path.split(fullFilePath)[0]) self._info("GetScreenImage(FilePath=%s)" % fullFilePath) # # Capture and save the screen image of the whole screen # GrabbedImage = ImageGrab.grab() # store screenshot as "RGB" Image GrabbedImage.save(fullFilePath) # PIL evaluates extension # # Embed the screenshot in the Robot Framework log file # self._html('<td></td></tr><tr><td colspan="3"><a href="%s">' '<img src="%s" width="700px"></a></td></tr>' % (FilePath, FilePath))
def screenGrab(): #upper left and lower right bounding box box = (x_pad+1,y_pad+1, x_pad+500,y_pad+700) #Grab image from screen image = ImageGrab.grab(box) #image.save(os.getcwd() + '\\full_snap__' + str(int(time.time())) + '.png', 'PNG') return image
def takingScreenshot(): img = grab.grab() img = img.resize((800, 600), Image.BICUBIC) #img = img.convert('LA') #just needed if the image should be transformed in a grayscaled image global enemySide global mySide global turn global enemy global me global enemy_mana global my_mana global stack global my_Hand global enemy_Hand enemySide = img.crop((197, 177, 605, 279)) mySide = img.crop((197 , 281, 605, 383)) turn = img.crop((614, 248, 685, 292)) enemy = img.crop((361, 48, 442, 167)) enemy.save(path('images/tests/enemy.png')) me = img.crop((361, 394, 442, 513)) enemy_mana = img.crop((490, 26, 528, 50)) my_mana = img.crop((508, 543, 546, 567)) stack = img.crop((118, 169, 149, 411)) my_Hand = img.crop((246, 518, 483, 591)) enemy_Hand = img.crop((246, 0, 483, 44))
def nextclass(): def picmath(box, filepath): savepic = fullsc.crop(box).save(filepath, 'JPEG') pic = open(filepath, 'rb') md5 = hashlib.md5(pic.read()).hexdigest().upper() pic.close() return md5 clickClient(getbrow, (1020, 865)) #交卷 time.sleep(1) clickClient(getbrow, (560, 680)) #未完成提示框的確認 time.sleep(1.5) fullsc = ImageGrab.grab() Buttunmd5 = picmath(Button, 'tmp/Button.JPG') if Buttunmd5 == ButtunT: clickClient(getbrow, (490, 720)) #練習下一個知識點 time.sleep(3) Pickit() elif Buttunmd5 == ButtunF: clickClient(getbrow, (490, 720)) #失敗重做 AutoAnswer() else: print "all this chapter finish!" return
def grab(): im = ImageOps.grayscale( ImageGrab.grab(bbox=(x_pad + 1, y_pad + 1, x_pad + 641, y_pad + 480))) a = array(im.getcolors()) a = a.sum() print a return a
def screenGrab(): box2 = (x_pad + 1, y_pad + 1, x_pad + 522, y_pad + 20) #im = ImageGrab.grab(box) #im.save(os.getcwd() + '\\full_snap__' + str(int(time.time())) '.png', 'PNG') im2 = ImageGrab.grab(box2) im2.save(os.getcwd() + '\\last_test.png', 'PNG') return im2
def script_analyzeObject(self, gesture): api_url = "https://us-central1-icon-classifier.cloudfunctions.net/function-2/interpret_image?locale=%s&b64=%s" ui.message(_("Analyzing navigator object")) nav = api.getNavigatorObject() if not nav.location: ui.message(_("This navigator object is not analyzable")) return left, top, width, height = nav.location img = ImageGrab.grab(bbox=(left, top, left + width, top + height)) maxArea = 35000 area = width * height if area > maxArea: scale = sqrt(float(maxArea) / float(area)) img = img.resize((int(width * scale), int(height * scale))) buffer = StringIO() img.save(buffer, format="JPEG") img_str = base64.b64encode(buffer.getvalue()) lang = getConfig()['language'] resp = urllib.urlopen(api_url % (lang, img_str)).read().decode('utf-8') ui.message(_('Analysis completed: ') + resp)
def Screenshot(): img = ImageGrab.grab() saveas = os.path.join(time.strftime('%Y_%m_%d_%H_%M_%S') + '.png') img.save(saveas) if LOG_SENDMAIL == True: addFile = str(os.getcwd()) + "\\" + str(saveas) LOG_TOSEND.append(addFile) # add to the list
def grab(): #screenshots of the game area box = (x_padding + 1, y_padding + 1, x_padding + 1824, y_padding + 476) im = ImageOps.grayscale(ImageGrab.grab(box)) a = array(im.getcolors()) a = a.sum() print a return a
def get_bar(): #returns whether the bar is red or gray box = (Cord.redBar[0], Cord.redBar[1], Cord.redBar[0] + 2, Cord.redBar[1] + 5) im = ImageOps.grayscale(ImageGrab.grab(box)) a = array(im.getcolors()) a = a.sum() return a
def grab(): box = (c,b,z,q) im = ImageOps.grayscale(ImageGrab.grab(box)) a = array(im.getcolors()) a = a.sum() print a return a
def grab_boxes(panel_list, boxes): print "scraping screen" # # I'll probably forget how the fudge this works.. # # # This all also return the letters list for some reason # Bad design loc = os.getcwd() + '\\letters\\' newBoxData = [] for i in boxes: im = ImageGrab.grab(i) a = array(im.getdata()) newBoxData.append(a.sum()) print "Checking dictionary for appropriate keys\n\n" for i in range(6): # looks at the new grabs. if, sum for box1 (for example) not in dict.keys # Add key and prompt for value if newBoxData[i] not in panel_list[i]: box_num = i+1 out_string = "please enter letter for box %d:" % box_num new_letter = raw_input(out_string) panel_list[i][newBoxData[i]] = new_letter save_dicts(panel_list) return gen_string_from_grabs(panel_list, newBoxData)
def eventFinder(): box = (200, 500, 884, 750) im = ImageGrab.grab(box) if specialChecker(im): print 'Special spotted!\n' p = Process(target=multiClick()) p.start() p.join() if bubbleCheck(im): print "A bubble has appeared!\n" leftClick() ## if specialChecker(im): ## print 'Special spotted!\n' ## for i in range(15): ## leftClick() if fuzzCheck(im): leftClick() if specialChecker(im): print 'Special spotted!\n' if spinCheck(im): if isSpinning(): launcher() print 'searching...'
def canShop(): im = ImageGrab.grab() expectedVal = (242, 227, 110) inVal = im.getpixel((695, 385)) if expectedVal == inVal: return True
def Screenshot(): img=ImageGrab.grab() saveas=os.path.join(time.strftime('%Y_%m_%d_%H_%M_%S')+'.png') img.save(saveas) if LOG_SENDMAIL == True: addFile = str(os.getcwd()) + "\\" + str(saveas) LOG_TOSEND.append(addFile) # add to the list
def questing_act(self): img = ImageGrab.grab() self.grinding.check_popup_close() if image_manager.is_in_quest(img): self.print_log("act: quest") self.next_node_search() elif image_manager.is_in_quest_complete(img): self.restart_or_next() elif image_manager.is_in_fighting(img): self.questing_fight() elif image_manager.is_in_quest_out_of_energy(img): self.print_log("act: out of energy") self.print_log("Warning! I will grind arena in 4") mouse_click(938,318) time.sleep(4) self.grinding.loop() else: self.print_log("act: other") time.sleep(5) for x in range(0,5): mouse_click(500,383) time.sleep(0.2)
def gold(idx): l, t = L + 19, T + 261 r, b = l + 53, t + 44 # ImageGrab.grab((l, t, r, b)).save('gold_t.png') hash_val1 = get_hash(ImageGrab.grab((l, t, r, b))) hash_val2 = get_hash(Image.open('gold.png')) dis = hamming_dist(hash_val1, hash_val2) if dis < 50: print 'gold', dis # return True autopy.mouse.move(l + 25, t + 25) autopy.mouse.click() time.sleep(0.5) if idx < 5: autopy.mouse.move(L + 394, T + 725) # help autopy.mouse.click() time.sleep(0.5) # may be help is full, should try rob to done task, so it will not close immediately autopy.mouse.move(L + 163, T + 725) # rob autopy.mouse.click() time.sleep(0.5) autopy.mouse.move(L + 506, T + 234) # close, if can not rob, should manual close autopy.mouse.click() return True return False
def pickit(): time.sleep(0.5) #减速 这里数值可以增大 click(465, 580) ##点击A click(505, 715) ##提交答案 time.sleep(0.5) #减速等待 fullsc = ImageGrab.grab() ##截全圖,因爲是pick工作,所以提交答案再截圖 saveQ = fullsc.crop(Qbox).save('tmp/Qbox.jpg', 'JPEG') picQ = open('tmp/Qbox.jpg', 'rb') Qmd5 = hashlib.md5(picQ.read()).hexdigest().upper() wQmd5 = '%s' % Qmd5 #为解决相同题目產生錯誤的问题,加入QBox2,再生成MD5 saveQ2 = fullsc.crop(Q2box).save('tmp/Qbox2.jpg', 'JPEG') picQ2 = open('tmp/Qbox2.jpg', 'rb') Q2md5 = hashlib.md5(picQ.read()).hexdigest().upper() AllQmd5 = '%s%s' % (Qmd5, Q2md5) AllQmd5 = hashlib.md5(AllQmd5).hexdigest().upper() wAllQmd5 = '%s' % AllQmd5 picQ.close(), picQ2.close() saveTips = fullsc.crop(Tipsbox).save('tmp/Tips.JPG', 'JPEG') #截图,保存,提交答案之後查看正確答案 Tips = open('tmp/Tips.JPG', 'rb') Tipsmd5 = hashlib.md5(Tips.read()).hexdigest().upper() Tips.close() #计算答案MD5 def md5w(): md5 = hashlib.md5(pic.read()).hexdigest().upper() md5 = '%s' % md5 hashdict[wQmd5] = md5 #單單題目區 hashdict_v2[wAllQmd5] = md5 #圖片區+題目區 pic.close() if Tipsmd5 == Amd5: saveA = fullsc.crop(Abox).save('tmp/A.jpg', 'JPEG') pic = open('tmp/A.jpg', 'rb') md5w() elif Tipsmd5 == Bmd5: saveB = fullsc.crop(Bbox).save('tmp/B.jpg', 'JPEG') pic = open('tmp/B.jpg', 'rb') md5w() elif Tipsmd5 == Cmd5: saveC = fullsc.crop(Cbox).save('tmp/C.jpg', 'JPEG') pic = open('tmp/C.jpg', 'rb') md5w() elif Tipsmd5 == Dmd5: saveD = fullsc.crop(Dbox).save('tmp/D.jpg', 'JPEG') pic = open('tmp/D.jpg', 'rb') md5w() else: print 'no answer! no tips! cant catch!' getProcess()
def pickit(): click(465,580) ##点击A shash = os.getcwd() + '/hash/1.dict' hashp = os.getcwd() + '/hash' ##os.getcwd獲取當前工作目錄 if os.path.exists(hashp): pass else: os.mkdir('hash') wokingp = os.getcwd() + '/tmp/' if os.path.exists(wokingp): pass else: os.mkdir('tmp') click(505,715) ##提交答案 time.sleep(0.5) fullsc = ImageGrab.grab() ##截全圖,因爲是pick工作,所以提交答案再截圖 saveQ = fullsc.crop(Qbox).save(wokingp + 'saveQ.jpg','JPEG') #截图,保存 picQ = open(wokingp +'imageQ.jpg','rb') Qmd5 = hashlib.md5(picQ.read()).hexdigest().upper() ##計算題目Q的MD5 saveTips = fullsc.crop(Tipsbox).save(wokingp + 'Tips.JPG','JPEG') #截图,保存,提交答案之後查看正確答案 Tips = open(wokingp +'Tips.JPG','rb') Tipsmd5 = hashlib.md5(Tips.read()).hexdigest().upper() def md5w(): md5 = hashlib.md5(pic.read()).hexdigest().upper() ##計算答案的MD5 w = '%s:%s \n' % (Qmd5 ,md5) ##寫入HASH至文件 tk = open(shash,'a') tk.write(w) tk.close() pic.close() if Tipsmd5 == Amd5: saveA = fullsc.crop(Abox).save(wokingp + 'A.jpg','JPEG') pic = open(wokingp +'A.jpg','rb') md5w() elif Tipsmd5 == Bmd5: saveB = fullsc.crop(Bbox).save(wokingp + 'B.jpg','JPEG') pic = open(wokingp +'B.jpg','rb') md5w() elif Tipsmd5 == Cmd5: saveC = fullsc.crop(Cbox).save(wokingp + 'C.jpg','JPEG') pic = open(wokingp +'C.jpg','rb') md5w() elif Tipsmd5 == Dmd5: saveD = fullsc.crop(Dbox).save(wokingp + 'D.jpg','JPEG') pic = open(wokingp +'D.jpg','rb') md5w() else: print 'no answer!'