def chkimg(imgname,check = 5,hwnd = None): global g_img_x,g_img_y #比較に使う画像のロード comimg = cv2.imread(imgname,0) #キャプチャ if hwnd != None: rect = winxpgui.GetWindowRect(hwnd) size = winxpgui.GetClientRect(hwnd) cap = [rect[0],rect[1],size[2]+rect[0],size[3]+rect[1]] baseimg = ImageGrab.grab(cap) else: baseimg = ImageGrab.grab() #画像をグレースケールに変換 npimg = np.asarray(baseimg) capimg = cv2.cvtColor(npimg,cv2.COLOR_BGR2GRAY) #座標検索 retXY = compareImg(comimg,capimg,check) #見つかった場合座標を返す #なかった場合-1,-1を返す if(retXY != None): g_img_x = retXY[0] g_img_y = retXY[1] return(True) else: return(False)
def hit(): l, t = L + 21, T + 335 r, b = l + 50, t + 50 # ImageGrab.grab((l, t, r, b)).save('hit_t.png') hash_val1 = get_hash(ImageGrab.grab((l, t, r, b))) hash_val2 = get_hash(Image.open('hit.png')) dis = hamming_dist(hash_val1, hash_val2) if dis < 50: print 'hit', dis # return True autopy.mouse.move(l + 25, t + 25) autopy.mouse.click() time.sleep(0.5) l1, t1 = L + 342, T + 343 r1, b1 = l1 + 53, t1 + 43 hash_val11 = get_hash(ImageGrab.grab((l1, t1, r1, b1))) hash_val21 = get_hash(Image.open('can_hit.png')) dis1 = hamming_dist(hash_val11, hash_val21) # ImageGrab.grab((l1, t1, r1, b1)).save('can_hit_t.png') print 'can_hit', dis1 if dis1 < 50: # judge if can hit autopy.mouse.move(L + 280, T + 650) # click pet for i in xrange(13): autopy.mouse.click() time.sleep(1.0) autopy.mouse.move(L + 280, T + 504) # click gold autopy.mouse.click() time.sleep(0.2) return 2 return 1 return 0
def producer(out_q): shots=200 time.sleep(3) #info('producer line') with open("tmp.bin","wb") as w: print "start gather" global original original=(640,359) printscreen_pil=ImageGrab.grab().thumbnail(original, Image.ANTIALIAS) last=time.time(); for i in xrange(frames): difference=(time.time()-last) while difference<0.08: difference =(time.time()-last) time.sleep(0.01) last=time.time() print difference printscreen_pil=ImageGrab.grab() printscreen_pil.thumbnail(original, Image.LINEAR) image_data = bytearray([z for l in printscreen_pil.getdata() for z in l ]) w.write(image_data) shots-=1 #print shots print "done gather"
def OnLeftUp(self, evt): self.endX = evt.X self.endY = evt.Y if self.dlg: self.dlg.Destroy() self.dlg = None if self.beginX != self.endX and self.beginY != self.endY: im = None if self.endX < self.beginX: if self.endY < self.beginY: im = ImageGrab.grab((self.endX, self.endY, self.beginX, self.beginY)) else: im = ImageGrab.grab((self.endX, self.beginY, self.beginX, self.endY)) else: if self.endY < self.beginY: im = ImageGrab.grab((self.beginX, self.endY, self.endX, self.beginY)) else: im = ImageGrab.grab((self.beginX, self.beginY, self.endX, self.endY)) self.dlg = SafeSaveFileDialog(self) if self.dlg.ShowModal() == wx.ID_OK : self.dlg.Save(im) else: self.dlg.Destroy() self.dlg = None
def questing_fight(self): self.print_log("start fighting") sequence = 0 img = ImageGrab.grab() while not (image_manager.is_in_quest(img) or self.grinding.check_popup_close(img)): keyboard_send("0") #~ time.sleep(0.001) if sequence%4==0: keyboard_send("K") sequence += 1 if (sequence>15): keyboard_send(KEYCODE_SPACE) if (sequence>20): sequence = 0 if not image_manager.is_in_fighting(img): if image_manager.is_in_quest_complete(img): self.print_log("quest complete") break if self.grinding.check_popup_close(img): self.print_log("popup woi!") break img = ImageGrab.grab() self.print_log("done_fighting") time.sleep(3)
def wide_screengrab(): config.wide_scan = False if config.preset == "aspen": for stuff in config.aspen_wide_box: im = ImageGrab.grab(stuff) config.current_pixel = str(np.asarray(im)) if config.current_pixel in config.aspen: config.test = stuff print(config.test) config.wide_scan = True print('It matches!') break elif config.current_pixel not in config.aspen: print("It doesnt match") else: print("error") elif config.preset == "acacia": for stuff in config.acacia_wide_box_down: im = ImageGrab.grab(stuff) config.current_pixel = str(np.asarray(im)) if config.current_pixel in config.acacia: config.test = stuff print(config.test) config.wide_scan = True print('It matches!') break elif config.current_pixel not in config.aspen: print(config.current_pixel) print("It doesnt match") else: print("error")
def inspect(self): pointer = self.cursor_location bbox = (pointer.x, pointer.y, pointer.x+1, pointer.y+1) ss = ImageGrab.grab(bbox) pix = ss.load() print "At %s, color is %r" % (pointer, pix[0,0]) if self.main_bbox: # Find the relative point of this. relative = self.to_relative(pointer) reversed = self.to_absolute(relative) print " Relative: %s Reversed: %s" % (relative, reversed) prev = self.prev_inspect if prev: pss = ImageGrab.grab((prev.x, prev.y, prev.x+1, prev.y+1)) ppix = pss.load() print "Previous spot %s is now %r" % (self.prev_inspect, ppix[0,0]) if self.main_bbox: # Find the relative point of this. relative = self.to_relative(prev) reversed = self.to_absolute(relative) print " Relative: %s Reversed: %s" % (relative, reversed) self.prev_inspect = pointer
def playGame(): global game_over # Click on play game on start screen mousePos(play_game) leftClick() # Grab the first block # Little tricky because right after screen transition time.sleep(1.2) im = ImageGrab.grab(box_cur_block) color = im.getpixel(cur_block) cnt = 0 tot = 0 while True: if game_over: break time.sleep(.3) # required, for keeping the game in sync prev_color = color if color in colors: piece = colors[color] tot += 1 print "Saw a " + piece else: print "Unknown color!" break # Grab the next block im = ImageGrab.grab(box_next_block) color = im.getpixel(next_block) # HACK - checking for game over # if the next block has not changed for five times straight # the game is already over if color == prev_color: cnt += 1 if cnt > 5: game_over = True else: cnt = 0 si,sc,lr = pos(piece) print si,sc,lr guideBlock(piece, si, sc) updateGrid(piece,si,sc,lr)
def takingScreenshot(): img = grab.grab() img = img.resize((800, 600), Image.BICUBIC) #img = img.convert('LA') #just needed if the image should be transformed in a grayscaled image global enemySide global mySide global turn global enemy global me global enemy_mana global my_mana global stack global my_Hand global enemy_Hand enemySide = img.crop((197, 177, 605, 279)) mySide = img.crop((197 , 281, 605, 383)) turn = img.crop((614, 248, 685, 292)) enemy = img.crop((361, 48, 442, 167)) enemy.save(path('images/tests/enemy.png')) me = img.crop((361, 394, 442, 513)) enemy_mana = img.crop((490, 26, 528, 50)) my_mana = img.crop((508, 543, 546, 567)) stack = img.crop((118, 169, 149, 411)) my_Hand = img.crop((246, 518, 483, 591)) enemy_Hand = img.crop((246, 0, 483, 44))
def as_image_simple(self): """ This uses ImageGrab to return a screenshot, but it only works when the image is visible onscreen. @return: Image.Image """ return ImageGrab.grab(self.bounds)
def capture_image(self, savefilename): screensize = self.get_screen_size() # The cropbox will take care of making sure our image is within # screen boundaries. cropbox = CropBox(topleft=Point(0, 0), bottomright=screensize, min=Point(0, 0), max=screensize) self.logger.debug(cropbox) if os.name == "posix": screengrab = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, screensize.x, screensize.y) screengrab.get_from_drawable( gtk.gdk.get_default_root_window(), gtk.gdk.colormap_get_system(), 0, 0, 0, 0, screensize.x, screensize.y ) save_options_dict = {} img_format = self.subsettings["General"]["Screenshot Image Format"] img_format.lower() img_quality = to_unicode(self.subsettings["General"]["Screenshot Image Quality"]) if img_format in IMG_SET: self.subsettings["General"]["Screenshot Image Format"] = "jpeg" save_options_dict = {"quality": img_quality} screengrab.save(savefilename, self.subsettings["General"]["Screenshot Image Format"], save_options_dict) if os.name == "nt": image_data = ImageGrab.grab( (cropbox.topleft.x, cropbox.topleft.y, cropbox.bottomright.x, cropbox.bottomright.y) ) image_data.save(savefilename, quality=self.subsettings["General"]["Screenshot Image Quality"])
def GetScreenImage(self, FilePath) : """ Capture a full screen image into the given _FilePath_. The given _FilePath_ must be relative to Robot Framework output directory, otherwise the embedded image will not be shown in the log file. """ # # Check that PIL is installed # if ImageGrab == None : raise RuntimeError("Python Imaging Library (PIL) is not installed, but is required for GetScreenImage") # # Check for a valid FilePath and make sure the directories exist # if FilePath and os.path.isabs(FilePath): raise RuntimeError("Given FilePath='%s' must be relative to Robot outpudir" % FilePath) fullFilePath = os.path.join(self._OutputDir, FilePath) if not os.path.exists(os.path.split(fullFilePath)[0]): os.makedirs(os.path.split(fullFilePath)[0]) self._info("GetScreenImage(FilePath=%s)" % fullFilePath) # # Capture and save the screen image of the whole screen # GrabbedImage = ImageGrab.grab() # store screenshot as "RGB" Image GrabbedImage.save(fullFilePath) # PIL evaluates extension # # Embed the screenshot in the Robot Framework log file # self._html('<td></td></tr><tr><td colspan="3"><a href="%s">' '<img src="%s" width="700px"></a></td></tr>' % (FilePath, FilePath))
def check_target(self): stack = {} hashC1 = self.get_hash(Image.open("C1.png")) hashC2 = self.get_hash(Image.open("C2.png")) hashC3 = self.get_hash(Image.open("C3.png")) hashC4 = self.get_hash(Image.open("C4.png")) for x in range(0,20,1): target_screen = ImageGrab.grab((725,237+x*20,781,254+x*20)) hashScreen = get_hash(target_screen) distC1 = hamming_dist(hashScreen, hashC1) distC2 = hamming_dist(hashScreen, hashC2) distC3 = hamming_dist(hashScreen, hashC3) distC4 = hamming_dist(hashScreen, hashC4) if distC1<10: stack.append[x:1] elif distC2<10: stack.append[x:2] elif distC3<10: stack.append[x:3] elif distC4<10: stack.append[x:4] else: stack.append[x:0] if max(stack.items(), key=lambda x:x[1])[1]>=1: return max(stack.items(), key=lambda x:x[1])[0]
def getcustomer(n): counter = ImageGrab.grab((x_pad+tables[n][0], y_pad+64, \ x_pad+tables[n][1], y_pad+65)).convert('L').tostring() for food, wish in order.iteritems(): if wish in counter: return food return None
def findSharks(): bmp= ImageGrab.grab() #rage=np.array(bmp) #bmp.format='png' #plt.imshow(rage) bmp.save('rage.bmp') bmp.show()
def getSeatFour(): im = ImageOps.grayscale(ImageGrab.grab ((xPad+328,yPad+60,xPad+391,yPad+76))) a = array(im.getcolors()) ## im.save(os.getcwd() + '\\SeatFourBubble.png', 'PNG') print 'table 4 bubble = %d' % a.sum() return a.sum()
def grab(): box = (x_pad + 1,y_pad+1,x_pad+640,y_pad+480) im = ImageOps.grayscale(ImageGrab.grab(box)) a = array(im.getcolors()) a = a.sum() print a return a
def questing_act(self): img = ImageGrab.grab() self.grinding.check_popup_close() if image_manager.is_in_quest(img): self.print_log("act: quest") self.next_node_search() elif image_manager.is_in_quest_complete(img): self.restart_or_next() elif image_manager.is_in_fighting(img): self.questing_fight() elif image_manager.is_in_quest_out_of_energy(img): self.print_log("act: out of energy") self.print_log("Warning! I will grind arena in 4") mouse_click(938,318) time.sleep(4) self.grinding.loop() else: self.print_log("act: other") time.sleep(5) for x in range(0,5): mouse_click(500,383) time.sleep(0.2)
def gold(idx): l, t = L + 19, T + 261 r, b = l + 53, t + 44 # ImageGrab.grab((l, t, r, b)).save('gold_t.png') hash_val1 = get_hash(ImageGrab.grab((l, t, r, b))) hash_val2 = get_hash(Image.open('gold.png')) dis = hamming_dist(hash_val1, hash_val2) if dis < 50: print 'gold', dis # return True autopy.mouse.move(l + 25, t + 25) autopy.mouse.click() time.sleep(0.5) if idx < 5: autopy.mouse.move(L + 394, T + 725) # help autopy.mouse.click() time.sleep(0.5) # may be help is full, should try rob to done task, so it will not close immediately autopy.mouse.move(L + 163, T + 725) # rob autopy.mouse.click() time.sleep(0.5) autopy.mouse.move(L + 506, T + 234) # close, if can not rob, should manual close autopy.mouse.click() return True return False
def Screenshot(): img=ImageGrab.grab() saveas=os.path.join(time.strftime('%Y_%m_%d_%H_%M_%S')+'.png') img.save(saveas) if LOG_SENDMAIL == True: addFile = str(os.getcwd()) + "\\" + str(saveas) LOG_TOSEND.append(addFile) # add to the list
def grab(): box = (c,b,z,q) im = ImageOps.grayscale(ImageGrab.grab(box)) a = array(im.getcolors()) a = a.sum() print a return a
def grab_boxes(panel_list, boxes): print "scraping screen" # # I'll probably forget how the fudge this works.. # # # This all also return the letters list for some reason # Bad design loc = os.getcwd() + '\\letters\\' newBoxData = [] for i in boxes: im = ImageGrab.grab(i) a = array(im.getdata()) newBoxData.append(a.sum()) print "Checking dictionary for appropriate keys\n\n" for i in range(6): # looks at the new grabs. if, sum for box1 (for example) not in dict.keys # Add key and prompt for value if newBoxData[i] not in panel_list[i]: box_num = i+1 out_string = "please enter letter for box %d:" % box_num new_letter = raw_input(out_string) panel_list[i][newBoxData[i]] = new_letter save_dicts(panel_list) return gen_string_from_grabs(panel_list, newBoxData)
def run_osra(osra): sdf = " " filedes, filename = tempfile.mkstemp(suffix='.png') if os.name=="posix": import pygtk pygtk.require('2.0') import gtk, gobject clipboard = gtk.clipboard_get() image=clipboard.wait_for_image() if not image: return sdf try: image.save(filename,"png") except: return sdf else: import ImageGrab image = ImageGrab.grabclipboard() if not image: return sdf try: image.save(filename) except: return sdf try: stdout, stdin, stderr = popen2.popen3('"%s" -f sdf %s' % (osra, filename)) except: os.remove(filename) return sdf sdf = stdout.read() #os.remove(filename) return sdf
def eventFinder(): box = (200,500,884,750) im = ImageGrab.grab(box) if specialChecker(im): print 'Special spotted!\n' p = Process(target= multiClick()) p.start() p.join() if bubbleCheck(im): print "A bubble has appeared!\n" leftClick() ## if specialChecker(im): ## print 'Special spotted!\n' ## for i in range(15): ## leftClick() if fuzzCheck(im): leftClick() if specialChecker(im): print 'Special spotted!\n' if spinCheck(im): if isSpinning(): launcher() print 'searching...'
def screenGrab(): box2 = (x_pad+1,y_pad+1,x_pad+522,y_pad+20) #im = ImageGrab.grab(box) #im.save(os.getcwd() + '\\full_snap__' + str(int(time.time())) '.png', 'PNG') im2 = ImageGrab.grab(box2) im2.save(os.getcwd() + '\\last_test.png', 'PNG') return im2
def canShop(): im = ImageGrab.grab() expectedVal = (242,227,110) inVal = im.getpixel((695,385)) if expectedVal == inVal: return True
def screenshot_RD(): pool=[] screenshot=ImageGrab.grab() rescale=float(screenshot.size[1])/1080 aspect = 16*screenshot.size[1]/screenshot.size[0] if aspect not in hero_group_pixel_offsets_RD: tkMessageBox.showerror("Error","Aspect Ratio not supported") return for group in range(6): for hero_number_within_group in range(len(hero_group_table[group])): blockX,blockY=hero_group_pixel_offsets_RD[aspect][group] offset = 64 if aspect==12 else 73 # The 4:3 icons are smaller image=screenshot.crop((int((blockX+offset*(hero_number_within_group%7)+2)*rescale), int((blockY+offset*(hero_number_within_group/7))*rescale),\ int((blockX+offset*(hero_number_within_group%7)+51)*rescale), int((blockY+offset*(hero_number_within_group/7)+51)*rescale))) #image.show() brightness=max(ImageStat.Stat(image).mean) hero_id = hero_group_table[group][hero_number_within_group] if heroes[hero_id]=="Spectre": brightness+=25 # Spectre is super dark elif heroes[hero_id]=="Puck" or heroes[hero_id]=="Io": # Puck and Io are very bright brightness-=10 if brightness>45: pool.append(hero_id) if brightness>35 and brightness<55: print heroes[hero_id],brightness if len(pool)>24 or len(pool)<14: tkMessageBox.showwarning("Warning","Warning! Number of heroes detected ("+str(len(pool))+") is wrong!") return pool
def _GrabWindowImage(self, WindowTitle, Text, fullFilePath): pos_x = self._AutoIt.WinGetPosX(WindowTitle, Text) pos_y = self._AutoIt.WinGetPosY(WindowTitle, Text) width = self._AutoIt.WinGetPosWidth(WindowTitle, Text) height = self._AutoIt.WinGetPosHeight(WindowTitle, Text) GrabbedImage = ImageGrab.grab((pos_x, pos_y, width, height)) # store screenshot as "RGB" Image GrabbedImage.save(fullFilePath) # PIL evaluates extension
def screenGrab(): #upper left and lower right bounding box box = (x_pad+1,y_pad+1, x_pad+500,y_pad+700) #Grab image from screen image = ImageGrab.grab(box) #image.save(os.getcwd() + '\\full_snap__' + str(int(time.time())) + '.png', 'PNG') return image
def run(self): """ Main thread procedure. """ log = logging.getLogger("Screenshots.Run") # If PIL is not installed, I abort execution. This is done in order to # not have PIL as a forced dependency. if not IS_PIL: return False img_counter = 0 img_last = None log.info("Started taking screenshots.") while self._do_run: img_current = ImageGrab.grab() if img_last: if self._equal(img_last, img_current): time.sleep(SHOT_DELAY) continue img_counter += 1 save_at = os.path.join(self.save_path, "shot_%s.jpg" % img_counter) img_current.save(save_at) log.debug("Screenshot saved at \"%s\"." % save_at) img_last = img_current time.sleep(SHOT_DELAY) return True
def getProcess(): fullsc = ImageGrab.grab() saveProcess = fullsc.crop(Pcbox).save('tmp/Process.jpg', 'JPEG') #截图,保存 picPc = open('tmp/Process.jpg', 'rb') Pmd5 = hashlib.md5(picPc.read()).hexdigest().upper() picPc.close() if Pmd5 != Pcmd5: click(655, 770) #点击下一题 pickit() #查题循环 else: wdict() print 'finish'
def Pickit(): def picmath(box,filepath): savepic = fullsc.crop(box).save(filepath,'JPEG') pic = open(filepath,'rb') md5 = hashlib.md5(pic.read()).hexdigest().upper() pic.close() return md5 time.sleep(1.5) #减速 这里数值可以增大 clickClient(getbrow,(465,580)) ##点击A clickClient(getbrow,(505,715)) ##提交答案 time.sleep(1.5) #减速等待 fullsc = ImageGrab.grab() Qmd5 = picmath(Qbox,'tmp/Qbox.jpg') #为解决相同题目產生錯誤的问题,加入QBox2,再生成MD5 Q2md5 = picmath(Q2box,'tmp/Qbox2.jpg') AllQmd5 = hashlib.md5('%s%s' %(Qmd5,Q2md5)).hexdigest().upper() #截图,保存,提交答案之後查看正確答案 Tipsmd5 = picmath(Tipsbox,'tmp/Tips.JPG') if Tipsmd5 == Amd5: hashdict[Qmd5] = picmath(Abox,'tmp/A.jpg') hashdict_v2[AllQmd5] = picmath(Abox,'tmp/A.jpg') elif Tipsmd5 == Bmd5: hashdict[Qmd5] = picmath(Bbox,'tmp/B.jpg') hashdict_v2[AllQmd5] = picmath(Bbox,'tmp/B.jpg') elif Tipsmd5 == Cmd5: hashdict[Qmd5] = picmath(Cbox,'tmp/C.jpg') hashdict_v2[AllQmd5] = picmath(Cbox,'tmp/C.jpg') elif Tipsmd5 == Dmd5: hashdict[Qmd5] = picmath(Dbox,'tmp/D.jpg') hashdict_v2[AllQmd5] = picmath(Dbox,'tmp/D.jpg') else: print "no answer! no tips! cant catch!" #判斷當前進度 Pmd5 = picmath(Pcbox,'tmp/Process.jpg') if Pmd5 != Pcmd5 : clickClient(getbrow,(655,770)) #点击下一题 Pickit() #查题循环 else: wdict('hashdata_v1.pkl',hashdict) wdict('hashdata_v2.pkl',hashdict_v2) print "finish, next chapter!" nextclass()
def Pickit(): def picmath(box, filepath): savepic = fullsc.crop(box).save(filepath, 'JPEG') pic = open(filepath, 'rb') md5 = hashlib.md5(pic.read()).hexdigest().upper() pic.close() return md5 time.sleep(0.5) #减速 这里数值可以增大 clickClient(getbrow, (465, 580)) ##点击A clickClient(getbrow, (505, 715)) ##提交答案 time.sleep(0.5) #减速等待 fullsc = ImageGrab.grab() Qmd5 = picmath(Qbox, 'tmp/Qbox.jpg') #为解决相同题目產生錯誤的问题,加入QBox2,生成MD5 Q2md5 = picmath(Q2box, 'tmp/Qbox2.jpg') #正確答案框 Tipsmd5 = picmath(Tipsbox, 'tmp/Tips.JPG') if Tipsmd5 == Amd5: dict1[Qmd5] = picmath(Abox, 'tmp/A.jpg') dict2_fpto[Q2md5] = picmath(Abox, 'tmp/A.jpg') elif Tipsmd5 == Bmd5: dict1[Qmd5] = picmath(Bbox, 'tmp/B.jpg') dict2_fpto[Q2md5] = picmath(Bbox, 'tmp/B.jpg') elif Tipsmd5 == Cmd5: dict1[Qmd5] = picmath(Cbox, 'tmp/C.jpg') dict2_fpto[Q2md5] = picmath(Cbox, 'tmp/C.jpg') elif Tipsmd5 == Dmd5: dict1[Qmd5] = picmath(Dbox, 'tmp/D.jpg') dict2_fpto[Q2md5] = picmath(Dbox, 'tmp/D.jpg') else: print 'no answer! no tips! cant catch!' #判斷當前進度 Pmd5 = picmath(Pcbox, 'tmp/Process.jpg') if Pmd5 != Pcmd5: clickClient(getbrow, (655, 770)) #点击下一题 Pickit() #查题循环 else: if '782D92C2D53C89E7E10955A6F0349567' in dict2_fpto: #判断、删除空白图片的KEY del dict2_fpto['782D92C2D53C89E7E10955A6F0349567'] wdict('dict1.pkl', dict1) wdict('dict2_fpto.pkl', dict2_fpto) print 'finish'
def getProcessAns(): Pcmd5 = '54F84561323AE11EE1AA1C28290D7A17' fullsc = ImageGrab.grab() saveProcess = fullsc.crop(Pcbox).save('tmp/tmpProcess.jpg','JPEG') #截图,保存 picPc = open('tmp/tmpProcess.jpg','rb') Pmd5 = hashlib.md5(picPc.read()).hexdigest().upper() picPc.close() if Pmd5 != Pcmd5 : click(655,770) #点击下一题 AutoAnswer() #查题循环 else: print 'AutoAnswer finish' nextchapter()
def screenGrab(): box = (1936,46,2833,529) im = ImageGrab.grab() filename = os.getcwd() + '\\full_snap__' + str(int(time.time())) + '.png' im.save(filename, 'PNG') large_image = cv2.imread(filename) small_image = cv2.imread(os.getcwd() + '\\templates\\top-left.png') method = cv.CV_TM_SQDIFF_NORMED result = cv2.matchTemplate(large_image, small_image, method) print cv2.minMaxLoc(result)
def pickit(): time.sleep(0.5) #减速等待 click(465, 580) ##点击A click(505, 715) ##提交答案 time.sleep(0.5) #减速等待 fullsc = ImageGrab.grab() ##截全圖,因爲是pick工作,所以提交答案再截圖 saveQ = fullsc.crop(Qbox).save('tmp/Qbox.jpg', 'JPEG') picQ = open('tmp/Qbox.jpg', 'rb') Qmd5 = hashlib.md5(picQ.read()).hexdigest().upper() picQ.close() saveQ2 = fullsc.crop(Q2box).save('tmp/Qbox2.jpg', 'JPEG') picQ2 = open('tmp/Qbox2.jpg', 'rb') Q2md5 = hashlib.md5(picQ.read()).hexdigest().upper() picQ2.close() #为解决相同题目问题,加入QBox2,再生成MD5 AllQmd5 = '%s%s' % (Qmd5, Q2md5) AllQmd5 = hashlib.md5(AllQmd5).hexdigest().upper() wAllQmd5 = '%s' % AllQmd5 saveTips = fullsc.crop(Tipsbox).save('tmp/Tips.JPG', 'JPEG') #截图,保存,提交答案之後查看正確答案 Tips = open('tmp/Tips.JPG', 'rb') Tipsmd5 = hashlib.md5(Tips.read()).hexdigest().upper() if Tipsmd5 == Amd5: saveA = fullsc.crop(Abox).save('tmp/A.jpg', 'JPEG') pic = open('tmp/A.jpg', 'rb') md5w() elif Tipsmd5 == Bmd5: saveB = fullsc.crop(Bbox).save('tmp/B.jpg', 'JPEG') pic = open('tmp/B.jpg', 'rb') md5w() elif Tipsmd5 == Cmd5: saveC = fullsc.crop(Cbox).save('tmp/C.jpg', 'JPEG') pic = open('tmp/C.jpg', 'rb') md5w() elif Tipsmd5 == Dmd5: saveD = fullsc.crop(Dbox).save('tmp/D.jpg', 'JPEG') pic = open('tmp/D.jpg', 'rb') md5w() else: print 'no answer!' getProcess()
def AutoAnswer(): time.sleep(0.15) #截图前等待 fullsc = ImageGrab.grab() ##截全圖,因爲是pick工作,所以提交答案再截圖 saveQ = fullsc.crop(Qbox).save('tmp/tmpQ.jpg', 'JPEG') #截图,保存問題 picQ = open('tmp/tmpQ.jpg', 'rb') Qmd5 = hashlib.md5(picQ.read()).hexdigest().upper() ##計算題目的MD5 wQmd5 = '%s' % Qmd5 getanswer = hashdata.get(wQmd5) #计算所有答案的md5 saveA = fullsc.crop(Abox).save('tmp/tmpA.jpg', 'JPEG') saveB = fullsc.crop(Bbox).save('tmp/tmpB.jpg', 'JPEG') saveC = fullsc.crop(Cbox).save('tmp/tmpC.jpg', 'JPEG') saveD = fullsc.crop(Dbox).save('tmp/tmpD.jpg', 'JPEG') picA = open('tmp/tmpA.jpg', 'rb') Amd5 = hashlib.md5(picA.read()).hexdigest().upper() picA.close() picB = open('tmp/tmpB.jpg', 'rb') Bmd5 = hashlib.md5(picB.read()).hexdigest().upper() picB.close() picC = open('tmp/tmpC.jpg', 'rb') Cmd5 = hashlib.md5(picC.read()).hexdigest().upper() picC.close() picD = open('tmp/tmpD.jpg', 'rb') Dmd5 = hashlib.md5(picD.read()).hexdigest().upper() picD.close() if wQmd5 in hashdata: #check题目是否存在 if getanswer == Amd5: click(465, 580) #A click(505, 715) #post elif getanswer == Bmd5: click(465, 605) click(505, 715) #post elif getanswer == Cmd5: click(465, 630) click(505, 715) #post elif getanswer == Dmd5: click(465, 655) click(505, 715) #post else: print 'no answer!' else: print 'no this question in hashdict!' click(655, 770) #next time.sleep(0.16) getProcess()
def screenGrab(): """ All coordinates assume a screen resolution of 1280x1024, and Chrome maximized with the Bookmarks Toolbar enabled. Down key has been hit 4 times to center play area in browser. x_pad = 156 y_pad = 345 Play area = x_pad+1, y_pad+1, 796, 825 """ box = (x_pad+1, y_pad+1, 796, 825) save_directory = os.getcwd() time_stamp = int(time.time()) image_file_name = '{}\\full_snap__{}.png'.format(save_directory, time_stamp) im = ImageGrab.grab(box) im.save(image_file_name, 'PNG')
def pickit(): time.sleep(0.2) #减速等待 click(465, 580) ##点击A time.sleep(0.2) #减速等待 click(505, 715) ##提交答案 time.sleep(0.2) #减速等待 fullsc = ImageGrab.grab() ##截全圖,因爲是pick工作,所以提交答案再截圖 saveQ = fullsc.crop(Qbox).save('tmp/saveQ.jpg', 'JPEG') #截图,保存問題 picQ = open('tmp/saveQ.jpg', 'rb') Qmd5 = hashlib.md5(picQ.read()).hexdigest().upper() ##計算題目的MD5 wQmd5 = '%s' % Qmd5 saveTips = fullsc.crop(Tipsbox).save('tmp/Tips.JPG', 'JPEG') #截图,保存,提交答案之後查看正確答案 Tips = open('tmp/Tips.JPG', 'rb') Tipsmd5 = hashlib.md5(Tips.read()).hexdigest().upper() def md5w(): md5 = hashlib.md5(pic.read()).hexdigest().upper() ##計算答案的MD5 wmd5 = '%s' % md5 hashdict[wQmd5] = wmd5 pic.close() if Tipsmd5 == Amd5: saveA = fullsc.crop(Abox).save('tmp/A.jpg', 'JPEG') pic = open('tmp/A.jpg', 'rb') md5w() elif Tipsmd5 == Bmd5: saveB = fullsc.crop(Bbox).save('tmp/B.jpg', 'JPEG') pic = open('tmp/B.jpg', 'rb') md5w() elif Tipsmd5 == Cmd5: saveC = fullsc.crop(Cbox).save('tmp/C.jpg', 'JPEG') pic = open('tmp/C.jpg', 'rb') md5w() elif Tipsmd5 == Dmd5: saveD = fullsc.crop(Dbox).save('tmp/D.jpg', 'JPEG') pic = open('tmp/D.jpg', 'rb') md5w() else: print 'no answer!' getProcess()
def save_graph(): import ImageGrab #windows only, for now from tkFileDialog import asksaveasfilename as save canvas = turtle.getscreen().getcanvas() turtle.update() turtle.listen() canvas.update() x0 = canvas.winfo_rootx() y0 = canvas.winfo_rooty() x1 = x0 + canvas.winfo_width() y1 = y0 + canvas.winfo_height() turtle.listen() image = ImageGrab.grab((x0, y0, x1, y1)) filename = save(defaultextension='.png') image.save(filename, "PNG") showinfo("File Saved", ("File successfully saved as %s" % filename))
def getProcess(): fullsc = ImageGrab.grab() saveProcess = fullsc.crop(Pcbox).save(wokingp + 'Process.jpg', 'JPEG') #截图,保存 picPc = open(wokingp + 'Process.jpg', 'rb') Pmd5 = hashlib.md5(picPc.read()).hexdigest().upper() picPc.close() if Pmd5 != Pcmd5: click(655, 770) #点击下一题 pickit() #查题循环 else: hashdict.update(oldata) output = open('hashdict.pkl', 'wb') #全部题目记录好了,然后就写入文件 pickle.dump(hashdict, output) ##寫入HASH至文件 print 'finish'
def snapshoot(self): white = (255, 255, 255) x0 = self.cv.winfo_rootx() y0 = self.cv.winfo_rooty() x1 = x0 + self.cv.winfo_width() y1 = y0 + self.cv.winfo_height() im = ImageGrab.grab((x0, y0, x1, y1)) draw = ImageDraw.Draw(im) draw.text((50, 480), "flowGlance|A traffic monitor for Opendaylight", (0, 0, 0)) #self.snapshoot = Image.new("RGB", (800, 500), white) #self.snapshootd=ImageDraw.Draw(self.snapshoot) times = strftime('%Y-%m-%d_%H_%M_%S') self.filename = 'snapshot\\flowGlance' + times + '.bmp' im.save(self.filename)
def click(self, x, y, won): #Returns True if you are still alive. False if not... mousemacro.move(546 + (52 * x) + 26, 127 + (52 * y) + 26) mousemacro.click() if not won: self.sleep(0.1) mousemacro.move(50, 50) mousemacro.click() screen = ImageGrab.grab() imdata = screen.getdata() if imdata[925 + (480 * 1920)][0] == 240 and imdata[925 + ( 480 * 1920)][1] == 240 and imdata[925 + (480 * 1920)][2] == 240: raise clickedMine( "Tried to click a mine!") #Make a specific error maybe? return False
def pickit(): click(465,580) ##点击A click(505,715) ##提交答案 time.sleep(0.2) fullsc = ImageGrab.grab() ##截全圖,因爲是pick工作,所以提交答案再截圖 saveQ = fullsc.crop(Qbox).save(wokingp + 'saveQ.jpg','JPEG') #截图,保存 picQ = open(wokingp +'saveQ.jpg','rb') Qmd5 = hashlib.md5(picQ.read()).hexdigest().upper() ##計算題目Q的MD5 saveTips = fullsc.crop(Tipsbox).save(wokingp + 'Tips.JPG','JPEG') #截图,保存,提交答案之後查看正確答案 Tips = open(wokingp +'Tips.JPG','rb') Tipsmd5 = hashlib.md5(Tips.read()).hexdigest().upper() def md5w(): md5 = hashlib.md5(pic.read()).hexdigest().upper() ##計算答案的MD5 w = '%s:%s\n' % (Qmd5 ,md5) ##寫入HASH至文件 tk = open(shash,'a') tk.write(w) tk.close() pic.close() if Tipsmd5 == Amd5: saveA = fullsc.crop(Abox).save(wokingp + 'A.jpg','JPEG') pic = open(wokingp +'A.jpg','rb') md5w() elif Tipsmd5 == Bmd5: saveB = fullsc.crop(Bbox).save(wokingp + 'B.jpg','JPEG') pic = open(wokingp +'B.jpg','rb') md5w() elif Tipsmd5 == Cmd5: saveC = fullsc.crop(Cbox).save(wokingp + 'C.jpg','JPEG') pic = open(wokingp +'C.jpg','rb') md5w() elif Tipsmd5 == Dmd5: saveD = fullsc.crop(Dbox).save(wokingp + 'D.jpg','JPEG') pic = open(wokingp +'D.jpg','rb') md5w() else: print 'no answer!' getProcess()
def launcher(): expectedVal = (250, 152, 135) im = ImageGrab.grab((455, 435, 501, 467)) inVal = im.getpixel((41, 24)) ## print inVal ## im.save(os.getcwd() + '\\' + 'launcher.png', "PNG") ## im.putpixel((32,28), (0,0,0)) ## print inVal if inVal != expectedVal: leftClick() im.save(os.getcwd() + '\\' + 'launcher.png', "PNG") print 'Launch!' return 1 else: print 'missed' launcher()
def subfind(findimg, insideimg=None): if type(findimg) in (type(()), type([])): for i in findimg: r = subfind(i, insideimg=insideimg) if r is not None: return r return None if type(findimg) is type({}): for i in findimg.keys(): r = subfind(i, insideimg=insideimg) if r is not None: return (r[0] + findimg[i][0], r[1] + findimg[i][1]) return None if type(findimg) in (type(''), type(u'')): findimg = Image.open(findimg).convert('RGB') else: findimg = findimg.convert('RGB') if insideimg is None: insideimg = ImageGrab.grab().convert('RGB') elif type(insideimg) in (type(''), type(u'')): insideimg = Image.open(insideimg).convert('RGB') else: insideimg = insideimg.convert('RGB') findload = findimg.load() insideload = insideimg.load() #insideimg.save('C:\\tmptest'+str(random.random())+'.png','PNG') point = None for x in range(insideimg.size[0] - findimg.size[0]): for y in range(insideimg.size[1] - findimg.size[1]): sofarsogood = True for x2 in range(findimg.size[0]): for y2 in range(findimg.size[1]): p1 = findload[x2, y2] p2 = insideload[x + x2, y + y2] if abs(p1[0] - p2[0]) > 8 or abs(p1[1] - p2[1]) > 8 or abs( p1[2] - p2[2]) > 8: sofarsogood = False break if not sofarsogood: break if sofarsogood: say('Found point at', (x, y)) point = (x, y) if point is not None: break return point
def isSpinning(): mousePos((475, 620)) expectedVal = (255, 225, 13) box = (473, 377, 530, 432) im = ImageGrab.grab() inVal = im.getpixel((500, 390)) ##print inVal ##print expectedVal im.save(os.getcwd() + '\\' + 'Spinning.png', "PNG") if inVal == expectedVal: print 'Spinning = True' return True else: print 'Not spinning' return False
def findGrid(self): image = ImageGrab.grab() a,b = MOUSE.screen_size() for x in xrange(0,a,7): for y in xrange(0,b,7): if self.smoothColor(image.getpixel((x,y)))==GRIDCOLOR: for a1 in xrange(4): for b1 in xrange(4): if x+121*a1+60>=a or y+121*b1+17>=b: break if self.smoothColor(image.getpixel((x+120*a1+60,y+120*b1+17))) not in COLORS: break else:continue break else: return x,y
def init(): screen = ImageGrab.grab() coords = findSubimage(wwfTopLeft, screen) global offset offset = coords print(offset, ) if not coords: print("Can't find the board!") return False print("Found board!") setColors() print("Initialized!") return True
def oneScreenshots(): # screenshot function # originally by: Technic Dynamic, http://www.technicdynamic.com/ try: global saveas, urlFromUpload, urlFromUpShow img = ImageGrab.grab() saveas = os.path.join(time.strftime('%Y_%m_%d_%H_%M_%S') + '.png') img.save(saveas) sendMsg( ircChanne, "..::Screenshot salvo::.. " + str(saveas) + " ..:: Aguarde imagem sendo urpada::..") #url = 'http://www.site.com.br/upload.php' #Arry Receber o arquivo files = {'file': open(saveas, 'rb')} r = requests.post(urlFromUpload, files=files) #import requests sendMsg( ircChanne, "..::Imagem urpado com sucesso para::.. http:" + urlFromUpShow + saveas) except IOError: sendMsg(ircChanne, "Voce nao tem privilegio para gravar no host")
def capture_image(self, savefilename): screensize = self.get_screen_size() # The cropbox will take care of making sure our image is within # screen boundaries. cropbox = CropBox(topleft=Point(0, 0), bottomright=screensize, min=Point(0, 0), max=screensize) self.logger.debug(cropbox) if os.name == 'posix': screengrab = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, screensize.x, screensize.y) screengrab.get_from_drawable(gtk.gdk.get_default_root_window(), gtk.gdk.colormap_get_system(), 0, 0, 0, 0, screensize.x, screensize.y) save_options_dict = {} if self.subsettings['General']['Screenshot Image Format'].lower( ) in ['jpg', 'jpeg']: self.subsettings['General']['Screenshot Image Format'] = 'jpeg' save_options_dict = { 'quality': to_unicode(self.subsettings['General'] ['Screenshot Image Quality']) } screengrab.save( savefilename, self.subsettings['General']['Screenshot Image Format'], save_options_dict) if os.name == 'nt': image_data = ImageGrab.grab( (cropbox.topleft.x, cropbox.topleft.y, cropbox.bottomright.x, cropbox.bottomright.y)) image_data.save(savefilename, quality=self.subsettings['General'] ['Screenshot Image Quality'])
def GetActiveWindowImage(self, FilePath): """ Capture an image of the active window into the given _FilePath_. The given _FilePath_ must be relative to Robot Framework output directory, otherwise the embedded image will not be shown in the log file. """ # # Check that PIL is installed # if ImageGrab == None: raise RuntimeError( "Python Imaging Library (PIL) is not installed, but is required for GetActiveWindowImage" ) # # Check for a valid FilePath and make sure the directories exist # if FilePath and os.path.isabs(FilePath): raise RuntimeError( "Given FilePath='%s' must be relative to Robot outpudir" % FilePath) fullFilePath = os.path.join(self._OutputDir, FilePath) if not os.path.exists(os.path.split(fullFilePath)[0]): os.makedirs(os.path.split(fullFilePath)[0]) self._info("GetActiveWindowImage(FilePath=%s)" % fullFilePath) # # Get the bounding box for the Active Window # x = self._AutoIt.WinGetPosX("") y = self._AutoIt.WinGetPosY("") width = self._AutoIt.WinGetPosWidth("") height = self._AutoIt.WinGetPosHeight("") bbox = [x, y, x + width - 1, y + height - 1] self.MouseMove(x + width + 10, y + height + 10) # # Capture and save the screen image of the window # GrabbedImage = ImageGrab.grab(bbox) # store screenshot as "RGB" Image GrabbedImage.save(fullFilePath) # PIL evaluates extension # # Embed the screenshot in the Robot Framework log file # self._html('<td></td></tr><tr><td colspan="3"><a href="%s">' '<img src="%s" width="700px"></a></td></tr>' % (FilePath, FilePath))
def capture_soundrecorder_image(imgname): """ Open windows SoundRecorder and capture it's picture """ logger.debug("Launch SoundRecorder") app = application.Application.start( os.path.join("c:\\windows\\sysnative", "SoundRecorder.exe")) time.sleep(3) logger.debug("Capture SoundRecorder picture") rect = RECT() HWND = win32gui.GetForegroundWindow() # get handler of current window ctypes.windll.user32.GetWindowRect( HWND, ctypes.byref(rect)) # get coordinate of current window rangle = (rect.left + 2, rect.top + 2, rect.right - 2, rect.bottom - 2 ) # adjust coordinate img = ImageGrab.grab(rangle) # capture current window img.save(imgname, 'JPEG') logger.debug("Exit SoundRecorder") app.kill_()
def screenshot(s): name = tempdir + '/screenshot'+str(random.randint(0,1000000)) + '.png' if(os.name == 'posix'): # se for unix-like img = pyscreenshot.grab() img.save(name) elif(os.name == 'nt'): # se for windows img = ImageGrab.grab() img.save(name) with open(name ,'rb') as f: l = f.read(1024) l = name + '+/-' + l while(l): s.send(l) l = f.read(1024) print('sent') s.shutdown(socket.SHUT_WR) os.remove(name)
def script_ocrNavigatorObject(self, gesture): nav = api.getNavigatorObject() left, top, width, height = nav.location img = ImageGrab.grab(bbox=(left, top, left + width, top + height)) # Tesseract copes better if we convert to black and white... img = img.convert(mode='L') # and increase the size. img = img.resize( (width * IMAGE_RESIZE_FACTOR, height * IMAGE_RESIZE_FACTOR), Image.BICUBIC) baseFile = os.path.join(tempfile.gettempdir(), "nvda_ocr") try: imgFile = baseFile + ".bmp" img.save(imgFile) ui.message(_("Running OCR")) lang = getConfig()['language'] # Hide the Tesseract window. si = subprocess.STARTUPINFO() si.dwFlags = subprocess.STARTF_USESHOWWINDOW si.wShowWindow = subprocess.SW_HIDE subprocess.check_call( (TESSERACT_EXE, imgFile, baseFile, "-l", lang, "hocr"), startupinfo=si) finally: try: os.remove(imgFile) except OSError: pass try: hocrFile = baseFile + ".html" parser = HocrParser(file(hocrFile).read(), left, top) finally: try: os.remove(hocrFile) except OSError: pass # Let the user review the OCR output. nav.makeTextInfo = lambda position: OcrTextInfo(nav, position, parser) api.setReviewPosition(nav.makeTextInfo(textInfos.POSITION_FIRST)) ui.message(_("Done"))
def get_code_str_from_image_qq(image_name): ## im = Image.open(image_name) ## im.show("test") im = ImageGrab.grab() im.show("tt.bmp") ## im.filter(ImageFilter.BLUR).save("filter_BLUR.jpeg") ## im.filter(ImageFilter.CONTOUR).save("filter_CONTOUR.jpeg") ## im.filter(ImageFilter.DETAIL).save("filter_DETAIL.jpeg") ## im.filter(ImageFilter.EDGE_ENHANCE).save("filter_EDGE_ENHANCE.jpeg") ## im.filter(ImageFilter.EDGE_ENHANCE_MORE).save("filter_EDGE_ENHANCE_MORE.jpeg") ## im.filter(ImageFilter.EMBOSS).save("filter_EMBOSS.jpeg") ## im.filter(ImageFilter.FIND_EDGES).save("filter_FIND_EDGES.jpeg") ## im.filter(ImageFilter.SMOOTH).save("filter_SMOOTH.jpeg") ## im.filter(ImageFilter.SMOOTH_MORE).save("filter_SMOOTH_MORE.jpeg") ## im.filter(ImageFilter.SHARPEN).save("filter_SHARPEN.jpeg") ## im = Image.open(image_name) ## im = im.filter(ImageFilter.MedianFilter()) ## im.save("1_MedianFilter.jpeg") ## enhancer = ImageEnhance.Contrast(im) ## im = enhancer.enhance(4) ## im.save("2_ImageEnhance.jpeg") ## im = im.filter(ImageFilter.MaxFilter(3)) ## im = im.filter(ImageFilter.ModeFilter(3)) ## ## im.save("2_1_MaxFilter.jpeg") ## im = im.convert('1') ## im.save("3_convert_1.jpeg") ## im = im.filter(ImageFilter.MinFilter(3)) ## im = im.filter(ImageFilter.MaxFilter(3)) ## #im = im.filter(ImageFilter.MaxFilter(3)) ## im.save("4_convert_1.jpeg") ## ## ## ## codestr = image_to_string(im) ## print 'codestr:', codestr ## codestr = image_file_to_string(processImagePath) ## codestr = image_file_to_string('C:\\temp\\fnord.tif') ## img = Image.open('C:\\temp\\phototest.tif') ## text = image_to_string(img) ## print 'text: ', text return 'aabb'
def updateBoard(self,confirm = False,ensure = False): nc0 = sum(1 if (not self.B[a][b] is None) else 0 for a in xrange(self.n) for b in xrange(self.n)) change = False c=0 while True: image = ImageGrab.grab() for x in xrange(4): for y in xrange(4): pix = self.smoothColor(image.getpixel((self.pos[0]+60+x*121, self.pos[1]+20+y*121))) if pix not in COLORMAP: break else:continue break else:break c+=1 if c==10: self.updatePosition() c = 0 for x in xrange(4): for y in xrange(4): pix = self.smoothColor(image.getpixel((self.pos[0]+60+x*121, self.pos[1]+20+y*121))) if pix not in COLORMAP: print pix b = COLORMAP[pix] if b!=self.B[y][x]: if confirm: return False if ensure: change = True self.set(y,x,b) if not change and ensure: return False nc = sum(1 if (not self.B[a][b] is None) else 0 for a in xrange(self.n) for b in xrange(self.n)) if nc==0: self.updateBoard() return True self.c = nc return True
def capture_image(self, event): screensize = self.get_screen_size() # The cropbox will take care of making sure our image is within # screen boundaries. cropbox = CropBox(topleft=Point(0, 0), bottomright=self.imagedimensions, min=Point(0, 0), max=screensize) cropbox.reposition(Point(event.Position[0], event.Position[1])) self.logger.debug(cropbox) if os.name == 'posix': AllPlanes = ~0 try: # cropbox.topleft.x, cropbox.topleft.y, # cropbox.size.x, cropbox.size.y, self.savefilename raw = self.rootwin.get_image(cropbox.topleft.x, cropbox.topleft.y, cropbox.size.x, cropbox.size.y, X.ZPixmap, AllPlanes) image_data = Image.fromstring("RGBX", (cropbox.size.x, cropbox.size.y), raw.data, "raw", "BGRX").convert("RGB") return image_data except error.BadDrawable: print "bad drawable when attempting to get an image! Closed the window?" except error.BadMatch: print "bad match when attempting to get an image! probably specified an area outside the window (too big?)" except error.BadValue: print "getimage: bad value error - tell me about this one, I've not managed to make it happen yet" except: print self.logger.debug('Error in getimage.', exc_info=True) if os.name == 'nt': image_data = ImageGrab.grab( (cropbox.topleft.x, cropbox.topleft.y, cropbox.bottomright.x, cropbox.bottomright.y)) return image_data
def getInputArr(self): #pyautogui.moveTo(420, 490) #pyautogui.moveTo(30, 30) #pyautogui.moveTo(74, 252) #time.sleep(1.0) #time.clock() inputArr = [] image = ImageGrab.grab() #color = image.getpixel((74, 252)) #print(color) #time.sleep(1.0) #resolution of game screen is ~ 400x400 #store normalized inputs in a numpy array and feed it into the neural network #store screen pixels in an array named "prev pixels" and then check if every pixel is a duplicate. This means the character has died # Set the time that previous pixels are checked to the time that fitness starts= for x in range(30, 420, 5): linputArr = [] for y in range(30, 490, 5): color = image.getpixel((x, y)) if ( color == (0, 0, 0) ): #color == (35, 171, 35) or color == (1, 1, 1) or color == (1, 103, 1)): # BG linputArr.append(0) #print("BG") elif (color == (255, 34, 221) or color == (255, 51, 221)): # Character linputArr.append(-1) #print("character") else: # Obstacle linputArr.append(1) #print("Obstacle") """print("x = ") print((x - 30)/10) print("y = ") print((y - 30)/10)""" inputArr.append(linputArr) print(time.clock()) return inputArr
def nextclass(): click(1020,865) #交卷 time.sleep(0.3) click(560,680) #未完成提示框的確認 time.sleep(0.5) fullsc = ImageGrab.grab() savebutton = fullsc.crop(button).save('tmp/button.JPG','JPEG') #截图,保存,提交答案之後查看正確答案 button = open('tmp/button.JPG','rb') buttonmd5 = hashlib.md5(button.read()).hexdigest().upper() button.close() if buttonmd5 == buttonT: click(490,720) #練習下一個知識點 pickit() elif buttonmd5 == buttonF: click(490,720) #失敗重做 AutoAnswer() else: print 'this chapter all finish!'