def e(): log("Waiting for client...") time.sleep(30) wincap = WindowCapture('League of Legends') while (True): try: screenshot = wincap.get_screenshot() except: break if (locate("playButton", screenshot) != None): log("Restarting.") break if (locate("honor", screenshot) != None): log("Honoring teammate...") click(client.honorButton) time.sleep(1) if (locate("dailyPlay", screenshot, 0.8) != None): log("Choosing reward...") click(client.dailyPlayButton) time.sleep(1) if (locate("okButton", screenshot, 0.8) != None): log("Choosing reward...") click(locate("okButton", screenshot, 0.8)) time.sleep(1) if (locate("xButton", screenshot, 0.8) != None): log("Exiting lobby...") click(locate("xButton", screenshot, 0.8)) time.sleep(1)
def Attack(): os.chdir(os.path.dirname(os.path.abspath(__file__))) wincap = WindowCapture('Legends of Runeterra') #vision_IP = Vision('img\minalegal.jpg') #haystack_img = cv.imread('img\este1.jpg', cv.IMREAD_UNCHANGED) needle_img = cv.imread('img\mobBeSelected.jpg', cv.IMREAD_UNCHANGED) result = cv.matchTemplate(wincap.get_screenshot(), needle_img, cv.TM_CCOEFF_NORMED) #cv.imshow('Result', result) #cv.waitKey() min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result) print('Best match top left position: %s' % str(max_loc)) print('Best match confidence: %s' % max_val) print('ScreenPosition: %s' % str(wincap.get_screen_position(max_loc))) pos = str(wincap.get_screen_position(max_loc)) arrayPos = pos.split(', ') PosX = arrayPos[0].replace("(", '') PosY = arrayPos[1].replace(")", '') print(PosX) print(PosY) pyautogui.moveTo(int(PosX), int(PosY), 0.1) pyautogui.dragTo(2851, 652, 0.25)
def login(self): # empty bits on bitmap, idk how they made the launcher # self.locator() # Just send input # Decrypt our user name and pw. IF you want to continue, # generate a new key for your own credentials; or remove the encryption all together. self.key = self.load_key() f = Fernet(self.key) pdi.press(['tab']) sleep(0.05) pdi.typewrite(f.decrypt(self.user).decode()) sleep(0.05) pdi.press(['tab']) sleep(0.05) pdi.typewrite(f.decrypt(self.pw).decode()) sleep(0.05) pdi.press(['enter']) sleep(0.05) # Wait for TTR self.wincap.wait_hwnd() sleep(10.5) pdi.press(['up']) sleep(4.5) self.wincap = WindowCapture(self.haystack_wnd) self.vision = Vision("targets/bear.png") self.bot = TTRBot((self.wincap.offset_x, self.wincap.offset_y), (self.wincap.w, self.wincap.h), 'tooltips/tooltip_bear.png')
def streamScreen(window_name): print("Starting streaming game window") # initialize the WindowCapture class wincap = WindowCapture(window_name) loop_time = time() frames = 0 while (True): # get an updated image of the game screenshot = wincap.get_screenshot() cv.imshow('Computer Vision', screenshot) frames = frames + 1 # debug the loop rate if frames == 200: print('FPS {}'.format(int(200 / (time() - loop_time)))) loop_time = time() frames = 0 # press 'q' with the output window focused to exit. # waits 1 ms every loop to process key presses if cv.waitKey(1) == ord('q'): cv.destroyAllWindows() break print('Done.')
def __init__(self, target, haystack_wnd='Toontown Rewritten'): # Window Capture has default to TTR, else we choose from main. self.wincap = WindowCapture(window_name=haystack_wnd) # Previously, we had to use the object to call this function. # Now that it is static, we can call the class directly. # wincap.list_window_names() # WindowCapture.list_window_names() # check foreground window title current = self.wincap.title() """ The Encryption Method I used: click.write_key() key = click.load_key() message1 = user.encode() print(message1) - bytes now message2 = pw.encode() print(message2) f = Fernet(key) encrypted1 = f.encrypt(message1) encrypted2 = f.encrypt(message2) print(encrypted1) print(encrypted2) """ # Decrypt our user name and pw. IF you want to use it, # remove the encryption method, or generate your own encrypted values. # Add in your own user/pw instead.`` self.key = self.load_key() f = Fernet(self.key) # Target is selectable from main file now. if (current == "Toontown Rewritten Launcher"): # Make TextBox Bigger self.vision_target = Vision('TextBox.png') # empty bits on bitmap, idk how they made the launcher # self.locator() # Just send input pdi.press(['tab']) time.sleep(0.05) pdi.typewrite(f.decrypt(self.user).decode()) time.sleep(0.05) pdi.press(['tab']) time.sleep(0.05) pdi.typewrite(f.decrypt(self.pw).decode()) time.sleep(0.05) pdi.press(['enter']) time.sleep(0.05) else: self.vision_target = Vision(target) # Only find best match self.locator(multi=False)
def SelectAlly(Ally): os.chdir(os.path.dirname(os.path.abspath(__file__))) wincap = WindowCapture('Legends of Runeterra') #------------------------------# #fogolivre = ["img\\oomcrewrookie.jpg", "img\\crimsondisciple.jpg", "img\\decimate.jpg", "img\\getexcited.jpg", "img\\imperialdemoli.jpg", "img\\legiongran.jpg", "img\\legionrear.jpg", "img\\legionsaboteur.jpg", "img\\mysticshot.jpg", "img\\oxianfervor.jpg", "img\\preciouspet.jpg", "img\\statikkshock.jpg", "img\\sformation.jpg", "img\\usedcasksalesman.jpg"] if Ally == "Legion Rearguard": vToFind = cv.imread('img\legionrear.jpg', cv.IMREAD_UNCHANGED) elif Ally == "Legion Saboteur": vToFind = cv.imread('img\legionsaboteur.jpg', cv.IMREAD_UNCHANGED) elif Ally == "Precious Pet": vToFind = cv.imread('img\preciouspet1.jpg', cv.IMREAD_UNCHANGED) elif Ally == "Boomcrew Rookie": vToFind = cv.imread('img\oomcrewrookie.jpg', cv.IMREAD_UNCHANGED) elif Ally == "Crimson Disciple": vToFind = cv.imread('img\crimsondisciple.jpg', cv.IMREAD_UNCHANGED) elif Ally == "Imperial Demolitionist": vToFind = cv.imread('img\imperialdemoli.jpg', cv.IMREAD_UNCHANGED) elif Ally == "Legion Granadier": vToFind = cv.imread('img\legiongran.jpg', cv.IMREAD_UNCHANGED) elif Ally == "Mystic Shot": vToFind = cv.imread('img\mysticshot.jpg', cv.IMREAD_UNCHANGED) elif Ally == "Transfusion": vToFind = cv.imread('img\sformation.jpg', cv.IMREAD_UNCHANGED) elif Ally == "Get Excited": vToFind = cv.imread('img\getexcited.jpg', cv.IMREAD_UNCHANGED) elif Ally == "Noxian Fervor": vToFind = cv.imread('img\oxianfervor.jpg', cv.IMREAD_UNCHANGED) elif Ally == "Used Cask Salesman": vToFind = cv.imread('img\casksalesman.jpg', cv.IMREAD_UNCHANGED) elif Ally == "Statikk Shock": vToFind = cv.imread('img\statikkshock.jpg', cv.IMREAD_UNCHANGED) elif Ally == "Decimate": vToFind = cv.imread('img\decimate.jpg', cv.IMREAD_UNCHANGED) #------------------------------# result = cv.matchTemplate(wincap.get_screenshot(), vToFind, cv.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result) print('Best match top left position: %s' % str(max_loc)) print('Best match confidence: %s' % max_val) print('ScreenPosition: %s' % str(wincap.get_screen_position(max_loc))) pos = str(wincap.get_screen_position(max_loc)) arrayPos = pos.split(', ') PosX = arrayPos[0].replace("(", '') PosY = arrayPos[1].replace(")", '') print(PosX) print(PosY) pyautogui.click(PosX, PosY, clicks=1, interval=1)
def command_chain(self, command, tooltip): self.wincap.stop() self.vision.stop() self.bot.stop() self.wincap = WindowCapture(window_name=self.haystack_wnd) self.vision = Vision(command) self.bot = TTRBot((self.wincap.offset_x, self.wincap.offset_y), (self.wincap.w, self.wincap.h), tooltip) self.wincap.start() self.vision.start() self.bot.start()
def c(): log("Game window found.") wincap = WindowCapture('League of Legends (TM) Client') while (True): try: screenshot = wincap.get_screenshot() except: break if (locate("minimap", screenshot) != None): log("Game started.") break else: log("Waiting for game to start...") click((computerWidth / 2, computerHeight / 2))
def set_to_begin(self, values): if values['-ENDTIMEP-']: self.end_time_enable = True try: self.end_time = int(values['-ENDTIME-']) * 60 except: self.end_time = 0 self.bait_time = values['-BAITTIME-'] self.throw_time = values['-THROWTIME-'] self.game_time = values['-STARTGAME-'] self.wincap = WindowCapture('METIN2') self.state = 0 self.initial_time = time() self.timer_action = time()
def __init__(self, target='doodle.png', tooltip='doodle.png', haystack_wnd='Toontown Rewritten'): self.haystack_wnd = haystack_wnd # Our list of commands to execute in sequence self.targetList = [ "targets/speedchat_bubble.png", "targets/Pets.png", "targets/good.png", "targets/Tricks.png", "targets/Play_dead.png", "targets/Scratch.png", "targets/Feed.png", "targets/Tired.png", "targets/Excited.png" ] self.tooltipList = [ "tooltips/tooltip.png", "tooltips/Pets_tt.png", "tooltips/good_tt.png", "tooltips/Tricks_tt.png", "tooltips/Play_dead_tt.png", "tooltips/Scratch_tt.png", "tooltips/Feed_tt.png", "tooltips/Tired_tt.png", "tooltips/Excited_tt.png" ] # Window Capture has default to TTR, else we choose from main. self.wincap = WindowCapture(window_name=haystack_wnd) # WindowCapture.list_window_names() # check foreground window title current = self.wincap.title() # Only two modes. Does not work from character select. if (current == "Toontown Rewritten Launcher"): self.login() else: self.vision = Vision(target) self.bot = TTRBot((self.wincap.offset_x, self.wincap.offset_y), (self.wincap.w, self.wincap.h)) # When giving our property objects new parameters # we must stop and start again, otherwise "stopped" # property gets reset to True. self.wincap.start() self.vision.start() self.bot.start() self.locator()
def b(): wincap = WindowCapture('League of Legends') while (True): try: screenshot = wincap.get_screenshot() except: break if (locate("grayLockInButton", screenshot) == None): if (locate("findMatchButton", screenshot) != None): log("Finding a match...") click(client.findMatchButton) if (locate("acceptButton", screenshot) != None): log("Waiting for champ select...") click(client.acceptButton) if (locate("chooseChampion", screenshot) != None): chooseChamp() log("Waiting for game window...") time.sleep(10)
def make_cropped_ss( LOAD_IMAGE_=LOAD_IMAGE, cropping_x=450, cropping_y=970, cropping_width=1000, cropping_height=30, IMAGE_DEBUG_MODE_=IMAGE_DEBUG_MODE, ): """ Parameters ---------- LOAD_IMAGE_ : If want to open without game then change to 1. The default is 0. window : Window to be captured, set to None if want to open without game. The default is wincap. Defaults to cropp screenshot from first to fifth(1-5) champion card name. cropping_x : The default is 450. cropping_y : The default is 970. cropping_width : The default is 1000. cropping_height : The default is 30. Returns ------- crop_img : Cropped screenshot. """ logging.debug("Function make_cropped_ss() called") if LOAD_IMAGE_: screenshot = cv.imread("examples/ss3.jpg", cv.IMREAD_UNCHANGED) else: wincap = WindowCapture("League of Legends (TM) Client") screenshot = wincap.get_screenshot() crop_img = screenshot[cropping_y:cropping_y + cropping_height, cropping_x:cropping_x + cropping_width, ] if IMAGE_DEBUG_MODE_: cv.imshow("make_cropped_ss()", crop_img) logging.debug("Function make_cropped_ss() end") return crop_img, screenshot
def Block(Spot): # -------------------- Found My Card ----------------------------- # os.chdir(os.path.dirname(os.path.abspath(__file__))) wincap = WindowCapture('Legends of Runeterra') needle_img = cv.imread('img\mobBeSelected.jpg', cv.IMREAD_UNCHANGED) result = cv.matchTemplate(wincap.get_screenshot(), needle_img, cv.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result) pos = str(wincap.get_screen_position(max_loc)) arrayPos = pos.split(', ') PosX = arrayPos[0].replace("(", '') PosY = arrayPos[1].replace(")", '') pyautogui.moveTo(int(PosX), int(PosY), 0.1) sleep(1) spot = ["2379", "711", "2576", "713", "2751", "710", "3008", "711", "3180", "712", "3370", "706"] if Spot == "1": cardToBlockX = spot[0] cardToBlockY = spot[1] pyautogui.dragTo(int(cardToBlockX), int(cardToBlockY), 0.25) if Spot == "2": cardToBlockX = spot[2] cardToBlockY = spot[3] pyautogui.dragTo(int(cardToBlockX), int(cardToBlockY), 0.25) if Spot == "3": cardToBlockX = spot[4] cardToBlockY = spot[5] pyautogui.dragTo(int(cardToBlockX), int(cardToBlockY), 0.25) if Spot == "4": cardToBlockX = spot[6] cardToBlockY = spot[7] pyautogui.dragTo(int(cardToBlockX), int(cardToBlockY), 0.25) if Spot == "5": cardToBlockX = spot[8] cardToBlockY = spot[9] pyautogui.dragTo(int(cardToBlockX), int(cardToBlockY), 0.25) if Spot == "6": cardToBlockX = spot[10] cardToBlockY = spot[11] pyautogui.dragTo(int(cardToBlockX), int(cardToBlockY), 0.25) # -------------------- Found My Enemy ----------------------------- # '''wincap2 = WindowCaptureEnemy('Legends of Runeterra')
def IsMyRound(): os.chdir(os.path.dirname(os.path.abspath(__file__))) # initialize the WindowCapture class wincap = WindowCapture('Legends of Runeterra') # finding myRound.jpg #vision_IP = Vision('img\minalegal.jpg') vToFind = Vision('img\isMyRound.jpg') loop_time = time() while(True): screen = wincap.get_screenshot() points = vToFind.find(screen, 0.99, 'rectangles') #print('FPS {}'.format(1 / (time() - loop_time))) if points: print('Is my turn.') else: print('Not my turn') pass loop_time = time() #return False if cv.waitKey(1) == ord('q'): cv.destroyAllWindows() break pass
def set_to_begin(self, values): if values['-ENDTIMEP-']: self.end_time_enable = True try: self.end_time = int(values['-ENDTIME-']) * 60 except: self.end_time = 0 self.bait_time = values['-BAITTIME-'] self.throw_time = values['-THROWTIME-'] self.game_time = values['-STARTGAME-'] self.wincap = WindowCapture(constants.GAME_NAME) self.state = 0 self.initial_time = time() self.timer_action = time() mouse_x = int(self.FISH_WINDOW_POSITION[0] + self.wincap.offset_x + 200) mouse_y = int(self.FISH_WINDOW_POSITION[1] + self.wincap.offset_y + 200) pydirectinput.click(x=mouse_x, y=mouse_y, button='right')
class FindLoc(): os.chdir(os.path.dirname(os.path.abspath(__file__))) wincap = WindowCapture('Legends of Runeterra') haystack_img = cv.imread('img\este1.jpg', cv.IMREAD_UNCHANGED) needle_img = cv.imread('img\crimsondisciple.jpg', cv.IMREAD_UNCHANGED) result = cv.matchTemplate(wincap.get_screenshot(), needle_img, cv.TM_CCOEFF_NORMED) cv.imshow('Result', result) cv.waitKey() min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result) print('Best match top left position: %s' % str(max_loc)) print('Best match confidence: %s' % max_val) print('ScreenPosition: %s' % str(wincap.get_screen_position(max_loc))) threshold = 0.5 if max_val >= threshold: print('Found button.') # pega as dimensões da imagem pequena needle_w = needle_img.shape[1] needle_h = needle_img.shape[0] top_left = max_loc bottom_right = (top_left[0] + needle_w, top_left[1], needle_h) #cv.rectangle(needle_img, top_left, bottom_right, color=(0, 255, 0), thickness=2, lineType=cv.LINE_4) # cv.drawMarker(needle_img, max_loc, (255,255,0), markerType=None, markerSize=None, thickness=None, line_type=None) # cv.circle(needle_img, max_loc, 5, color=(255,255,0), thickness=2, lineType=cv.LINE_4, shift=None) #cv.imshow('Result', haystack_img) #cv.waitKey() # Aqui é onde o programa captura a imagem e vai # pyautogui.click(top_left, clicks=1, interval=1, duration=0.1); # Start() # Replace("4 2 3 1") else: print('Image not found.')
import cv2 as cv import numpy as np from random import shuffle import time from windowcapture import WindowCapture from vision import Vision from movement import * from hsvfilter import HsvFilter import concurrent.futures import threading from multiprocessing import Process, RLock from multiprocessing.sharedctypes import Array from ctypes import Structure, c_int import pyautogui as pog wincap = WindowCapture('Runelite - USERNAME') item_1 = Vision('whirlpool.png') item_2 = Vision('inventory.png') class Point(Structure): _fields_ = [('x', c_int), ('y', c_int)] lock = RLock() A = Array(Point, 10, lock=lock) # Global shared Array def printListOfPoints(A): return str([(a.x, a.y) for a in A])
class PuzzleBot: #properties botting = False PUZZLE_WINDOW_SIZE = (260, 170) PUZZLE_WINDOW_POSITION = (270, 227) PUZZLE_GET_NEW_PIECE = (230, 85) PUZZLE_COMFIRM = (100, 90) PUZZLE_GET_NEW_PIECE_COLOR = (110, 150) wincap = None tetris = Tetris() timer_action = time() get_piece_time = 2 new_piece = None state = 0 end = False dictdump = None def set_to_begin(self, values): self.wincap = WindowCapture(constants.GAME_NAME) self.state = 0 with open('pieces_second.json') as handle: self.dictdump = json.loads(handle.read()) def set_puzzle_state(self, crop_img): paint_c = 32 board = [[0,0,0,0,0,0], [0,0,0,0,0,0], [0,0,0,0,0,0], [0,0,0,0,0,0]] for i in range(0, 4): for j in range(0, 6): if crop_img[15 + paint_c*i, 15 + paint_c*j, 0] < 50 and crop_img[15 + paint_c*i, 15 + paint_c*j, 1] < 50 and crop_img[15 + paint_c*i, 15 + paint_c*j, 2] < 50: board[i][j] = 0 else: board[i][j] = 1 cv.rectangle(crop_img, (15 + paint_c*j, 15 + paint_c*i), (15 + paint_c*j, 15 + paint_c*i), color=(0, 255, 255), thickness=4, lineType=cv.LINE_4) self.tetris.board = board if self.tetris.count_zeros == 0: self.tetris.first = 0 self.tetris.second = 0 else: self.tetris.first = 1 self.tetris.second = 1 def get_image(self): screenshot = self.wincap.get_screenshot() crop_img = screenshot[self.PUZZLE_WINDOW_POSITION[1]:self.PUZZLE_WINDOW_POSITION[1]+self.PUZZLE_WINDOW_SIZE[1], self.PUZZLE_WINDOW_POSITION[0]:self.PUZZLE_WINDOW_POSITION[0]+self.PUZZLE_WINDOW_SIZE[0]] return crop_img def press_comfirm(self): mouse_x = int(self.PUZZLE_COMFIRM[0] + self.PUZZLE_WINDOW_POSITION[0] + self.wincap.offset_x) mouse_y = int(self.PUZZLE_COMFIRM[1] + self.PUZZLE_WINDOW_POSITION[1] + self.wincap.offset_y) pydirectinput.click(x=mouse_x, y=mouse_y, button='left') def press_comfirm_cake(self): mouse_x = int(self.PUZZLE_COMFIRM[0] + 20 + self.PUZZLE_WINDOW_POSITION[0] + self.wincap.offset_x) mouse_y = int(self.PUZZLE_COMFIRM[1] + self.PUZZLE_WINDOW_POSITION[1] + self.wincap.offset_y) pydirectinput.click(x=mouse_x, y=mouse_y, button='left') def throw_pice(self): mouse_x = int(self.PUZZLE_COMFIRM[0] + self.PUZZLE_WINDOW_POSITION[0] + self.wincap.offset_x) mouse_y = int(self.PUZZLE_COMFIRM[1] + self.PUZZLE_WINDOW_POSITION[1] + self.wincap.offset_y) pydirectinput.click(x=mouse_x, y=mouse_y, button='right') def get_new_piece_color(self, crop_image): x = int(self.PUZZLE_GET_NEW_PIECE_COLOR[0]) y = int(self.PUZZLE_GET_NEW_PIECE_COLOR[1]) if (crop_image[y, x, 0] > 35 and crop_image[y, x, 0] < 40 and crop_image[y, x, 1] > 60 and crop_image[y, x, 1] < 70 and crop_image[y, x, 2] > 240 and crop_image[y, x, 2] < 260): return 4 elif (crop_image[y, x, 0] > 20 and crop_image[y, x, 0] < 30 and crop_image[y, x, 1] > 150 and crop_image[y, x, 1] < 170 and crop_image[y, x, 2] > 240 and crop_image[y, x, 2] < 260): return 1 elif (crop_image[y, x, 0] > 35 and crop_image[y, x, 0] < 50 and crop_image[y, x, 1] > 240 and crop_image[y, x, 1] < 260 and crop_image[y, x, 2] > 35 and crop_image[y, x, 2] < 50): return 5 elif (crop_image[y, x, 0] > 240 and crop_image[y, x, 0] < 260 and crop_image[y, x, 1] > 240 and crop_image[y, x, 1] < 260 and crop_image[y, x, 2] > 20 and crop_image[y, x, 2] < 30): return 3 elif (crop_image[y, x, 0] > 240 and crop_image[y, x, 0] < 260 and crop_image[y, x, 1] > 100 and crop_image[y, x, 1] < 115 and crop_image[y, x, 2] > -10 and crop_image[y, x, 2] < 10): return 2 elif (crop_image[y, x, 0] > 50 and crop_image[y, x, 0] < 60 and crop_image[y, x, 1] > 235 and crop_image[y, x, 1] < 255 and crop_image[y, x, 2] > 250 and crop_image[y, x, 2] < 260): return 6 def detect_end_game(self, crop_img): x = int(self.PUZZLE_GET_NEW_PIECE[0]) y = int(self.PUZZLE_GET_NEW_PIECE[1]) if crop_img[y, x, 0] > 100 and crop_img[y, x, 1] > 150 and crop_img[y, x, 2] > 150: return False else: return True def play_game(self): piece = Piece(self.new_piece) decision, pos = self.tetris.find_first(piece, self.dictdump) paint_c = 32 if decision == 1: self.tetris.insert_piece(pos[0], pos[1], piece) if self.tetris.verify_end(): self.end = True mouse_x = 15 + paint_c*pos[1] + self.PUZZLE_WINDOW_POSITION[0] + self.wincap.offset_x mouse_y = 15 + paint_c*pos[0] + self.PUZZLE_WINDOW_POSITION[1] + self.wincap.offset_y pydirectinput.click(mouse_x, mouse_y) return None if decision == 2: return None possibilites = self.tetris.find_possibles(piece) pices_count = 0 for i in range(1,7): if i != piece.piece_type: possis = self.tetris.find_possibles(Piece(i)) if len(possis): pices_count += 1 if piece.piece_type == 1 and pices_count != 0: possibilites = [i for i in possibilites if self.tetris.verify_isolated(i[0], i[1])] if len(possibilites): a = self.tetris.choose_better(piece, possibilites) self.tetris.insert_piece(a[0], a[1], piece) if self.tetris.verify_end(): self.end = True mouse_x = 15 + paint_c*a[1] + self.PUZZLE_WINDOW_POSITION[0] + self.wincap.offset_x mouse_y = 15 + paint_c*a[0] + self.PUZZLE_WINDOW_POSITION[1] + self.wincap.offset_y pydirectinput.click(mouse_x, mouse_y) return True return None def runHack(self): crop_image = self.get_image() timep = 0.2 if self.state == 0: mouse_x = int(self.PUZZLE_GET_NEW_PIECE[0] + self.PUZZLE_WINDOW_POSITION[0] + self.wincap.offset_x) mouse_y = int(self.PUZZLE_GET_NEW_PIECE[1] + self.PUZZLE_WINDOW_POSITION[1] + self.wincap.offset_y) if time() - self.timer_action > timep: if self.detect_end_game(crop_image): self.botting = False return None pydirectinput.click(x=mouse_x, y=mouse_y, button='left') self.state = 1 self.timer_action = time() if self.state == 1: if time() - self.timer_action > timep: self.press_comfirm() self.state = 2 self.timer_action = time() if self.state == 2: mouse_x = int(self.PUZZLE_GET_NEW_PIECE_COLOR[0] + self.PUZZLE_WINDOW_POSITION[0] + self.wincap.offset_x) mouse_y = int(self.PUZZLE_GET_NEW_PIECE_COLOR[1] + self.PUZZLE_WINDOW_POSITION[1] + self.wincap.offset_y) if time() - self.timer_action > timep: self.state = 4 self.timer_action = time() pydirectinput.moveTo(mouse_x, mouse_y) if self.state == 4: if time() - self.timer_action > timep: self.state = 5 self.timer_action = time() self.new_piece = self.get_new_piece_color(crop_image) if self.state == 5: if time() - self.timer_action > timep: self.timer_action = time() self.set_puzzle_state(crop_image) if self.play_game(): self.state = 6 else: self.state = 7 if self.state == 6: if time() - self.timer_action > timep: self.press_comfirm() self.timer_action = time() if self.end: self.state = 9 else: self.state = 0 if self.state == 7: if time() - self.timer_action > timep: self.throw_pice() self.timer_action = time() self.state = 8 if self.state == 8: if time() - self.timer_action > timep: self.press_comfirm() self.timer_action = time() self.state = 0 if self.state == 9: if time() - self.timer_action > 2: self.end = False self.press_comfirm_cake() self.timer_action = time() self.state = 0 return None
def set_to_begin(self, values): self.wincap = WindowCapture(constants.GAME_NAME) self.state = 0 with open('pieces_second.json') as handle: self.dictdump = json.loads(handle.read())
import cv2 as cv import numpy as np import os from time import time # from PIL import ImageGrab from windowcapture import WindowCapture # from vision import findClickPositions from vision import Vision # Change the working directory to the folder this script is in. os.chdir(os.path.dirname(os.path.abspath(__file__))) wincap = WindowCapture('Toontown Rewritten') # Previously, we had to use the object to call this function. # Now that it is static, we can call the class directly. # wincap.list_window_names() WindowCapture.list_window_names() # Initialize the Vision clas vision_speedchat = Vision('speedchat_bubble.png') loop_time = time() points = [] firstRun = True while (True): # screenshot = None # screenshot = pyautogui.screenshot() # cv.imshow is not compatable with pyautogui ss format. # so we use numpy. # screenshot = ImageGrab.grab()
for text in OCRResultsSorted: for champ in championListForOCR: if champ in text: sortedChampionsToBuy.append(champ) print("found {}".format(champ)) print("List of sorted champions to buy: ", sortedChampionsToBuy) return sortedChampionsToBuy # Change the working directory to the folder this script is in. # Doing this because I'll be putting the files from each video in their own folder on GitHub os.chdir(os.path.dirname(os.path.abspath(__file__))) #'League of Legends (TM) Client' # initialize the WindowCapture class wincap = WindowCapture('League of Legends (TM) Client') # initialize the Vision class # First champion card to buy on screen xFirstChampionCard = 505 wChampionCard = 175 yFirstChampionCard = 865 hChampionCard = 135 PADDINGBETWEENCHAMPIONCARDS = 10 #drawing rectangles line_color = (255, 0, 255) line_type = cv.LINE_4 marker_color = (255, 0, 255)
import numpy as np import cv2 as cv import pyautogui import os from time import time from windowcapture import WindowCapture import pytesseract as tess tess.pytesseract.tesseract_cmd = r'C:\Users\---USER---\AppData\Local\Tesseract-OCR\tesseract.exe' from PIL import Image #windows needs to not be minamized else all black wincap = WindowCapture('REALPOSEIDON · DLive - Brave') #name found via windowcapture.py loop_time = time() #find full name of the window you want to capture without hex number #uncomment this to find the names of the windows ##wincap.list_windows_names() ##exit() cycle = 0 grab = 0 while True: #this is how you quit the window if cv.waitKey(1) == ord('q'): cv.destroyAllWindows() break
class Movement: # Properties DEBUG = True wincap = None vision = None bot = None haystack_wnd = None targetList = [] tooltipList = [] loginList = [] state = 0 isGood = False loggingIn = False points = [] current = '' key = None user = b'gAAAAABf470AGsKOJ65Ee9ZxZasRjABVUbdimwfivMloakcKoa20R_guknxp0K7xqYAbLD5IfZ9dUMJP77lKTM6oWRpYl17GHw==' pw = b'gAAAAABf470AeGuSrJmZEZrBzs8rJEQqiUDUoArQPNSkMJnlaKyxEknOUXvtvpWlLbTqBkq0SnEnYvjadV7gFI1sd7jtJJbImQ==' # Constructor def __init__(self, target='doodle.png', tooltip='doodle.png', haystack_wnd='Toontown Rewritten'): self.haystack_wnd = haystack_wnd # Our list of commands to execute in sequence self.targetList = [ "targets/speedchat_bubble.png", "targets/Pets.png", "targets/good.png", "targets/Tricks.png", "targets/Play_dead.png", "targets/Scratch.png", "targets/Feed.png", "targets/Tired.png", "targets/Excited.png" ] self.tooltipList = [ "tooltips/tooltip.png", "tooltips/Pets_tt.png", "tooltips/good_tt.png", "tooltips/Tricks_tt.png", "tooltips/Play_dead_tt.png", "tooltips/Scratch_tt.png", "tooltips/Feed_tt.png", "tooltips/Tired_tt.png", "tooltips/Excited_tt.png" ] # Window Capture has default to TTR, else we choose from main. self.wincap = WindowCapture(window_name=haystack_wnd) # WindowCapture.list_window_names() # check foreground window title current = self.wincap.title() # Only two modes. Does not work from character select. if (current == "Toontown Rewritten Launcher"): self.login() else: self.vision = Vision(target) self.bot = TTRBot((self.wincap.offset_x, self.wincap.offset_y), (self.wincap.w, self.wincap.h)) # When giving our property objects new parameters # we must stop and start again, otherwise "stopped" # property gets reset to True. self.wincap.start() self.vision.start() self.bot.start() self.locator() def command_chain(self, command, tooltip): self.wincap.stop() self.vision.stop() self.bot.stop() self.wincap = WindowCapture(window_name=self.haystack_wnd) self.vision = Vision(command) self.bot = TTRBot((self.wincap.offset_x, self.wincap.offset_y), (self.wincap.w, self.wincap.h), tooltip) self.wincap.start() self.vision.start() self.bot.start() """ Cryptology: click.write_key() key = click.load_key() message1 = user.encode() print(message1) - bytes now message2 = pw.encode() print(message2) f = Fernet(key) encrypted1 = f.encrypt(message1) encrypted2 = f.encrypt(message2) print(encrypted1) print(encrypted2) """ def login(self): # empty bits on bitmap, idk how they made the launcher # self.locator() # Just send input # Decrypt our user name and pw. IF you want to continue, # generate a new key for your own credentials; or remove the encryption all together. self.key = self.load_key() f = Fernet(self.key) pdi.press(['tab']) sleep(0.05) pdi.typewrite(f.decrypt(self.user).decode()) sleep(0.05) pdi.press(['tab']) sleep(0.05) pdi.typewrite(f.decrypt(self.pw).decode()) sleep(0.05) pdi.press(['enter']) sleep(0.05) # Wait for TTR self.wincap.wait_hwnd() sleep(10.5) pdi.press(['up']) sleep(4.5) self.wincap = WindowCapture(self.haystack_wnd) self.vision = Vision("targets/bear.png") self.bot = TTRBot((self.wincap.offset_x, self.wincap.offset_y), (self.wincap.w, self.wincap.h), 'tooltips/tooltip_bear.png') def locator(self): # Too late, lazy to change variables from "self" loop_time = time() firstRun = True while (True): # All the classes run in their own thread that's separate from the main thread # so that the code here can continue while the bot performs its actions # if we don't have a screenshot yet, don't run the code below this point yet if self.wincap.screenshot is None: continue # give vision the current screenshot to search for objects in self.vision.update(self.wincap.screenshot) # update the bot with the data it needs right now if self.bot.state == BotState.INITIALIZING: # while bot is waiting to start, go ahead and start giving it some targets to work # on right away when it does start targets = self.vision.get_click_points(self.vision.rectangles) self.bot.update_targets(targets) elif self.bot.state == BotState.SEARCHING: # when searching for something to click on next, the bot needs to know what the click # points are for the current detection results. it also needs an updated screenshot # to verify the hover tooltip once it has moved the mouse to that position targets = self.vision.get_click_points(self.vision.rectangles) self.bot.update_targets(targets) self.bot.update_screenshot(self.wincap.screenshot) elif self.bot.state == BotState.MOVING: # when moving, we need fresh screenshots to determine when we've stopped moving. self.bot.update_screenshot(self.wincap.screenshot) elif self.bot.state == BotState.STILL: # nothing is needed while we wait for the ui to finish """ class Command: OPTION = 0 PETS = 1 GOOD = 2 TRICKS = 3 TRICK = 4 TIRED = 5 SCRATCH = 6 - To save jellybeans. FEED = 7 EXCITED = 8 """ # Regular route - On successfull click if not self.loggingIn: if self.state + 1 < Command.EXCITED: # Always do one round of excited check. # Similar to bot's confirm_tooltip method excited = cv.imread(self.targetList[Command.EXCITED], cv.IMREAD_UNCHANGED) excitement = cv.matchTemplate(self.wincap.screenshot, excited, cv.TM_CCOEFF_NORMED) # get the best match postition min_val, bestMatch, min_loc, max_loc = cv.minMaxLoc( excitement) if bestMatch >= 0.60: # No need to feed if excited if self.state == Command.FEED or self.state == Command.SCRATCH: self.state = 0 self.isGood = False # If we've already said "Good Boy!" elif self.isGood and Command.GOOD: self.state = Command.TRICKS self.isGood = False # If Good, then naw. elif self.state == Command.GOOD: self.isGood = True self.command_chain( command=self.targetList[self.state], tooltip=self.tooltipList[self.state]) # If tired, or neutral, not good. elif self.state == Command.FEED: self.isGood = False self.command_chain( command=self.targetList[self.state], tooltip=self.tooltipList[self.state]) else: self.state = Command.SCRATCH self.isGood = False self.command_chain( command=self.targetList[self.state], tooltip=self.tooltipList[self.state]) # Increment after everything is done. if firstRun: firstRun = False else: self.state += 1 else: self.isGood = False self.state = Command.OPTION self.command_chain( command=self.targetList[self.state], tooltip=self.tooltipList[self.state]) # Use loginList instead else: # for now pass if self.DEBUG: # draw the detection results onto the original image detection_image = self.vision.draw_rectangles( self.wincap.screenshot, self.vision.rectangles) # display the images try: cv.imshow('Matches', detection_image) except: pass # sleep(0.5) # win32gui.SetForegroundWindow(win32gui.FindWindow(None, "Toontown Rewritten")) # debug the loop rate - bad atm print('FPS {}'.format(1 / (time() - loop_time))) print(self.state) loop_time = time() # Press 'q' with the output window focused to exit. # Waits 1 ms every loop to process key presses key = cv.waitKey(1) if key == ord('q'): self.wincap.stop() self.vision.stop() self.bot.stop() cv.destroyAllWindows() break print('Done performing {} task.'.format(self.targetList[self.state])) # Encrypts our user/pw def write_key(self): # Generates a key and save it into a file self.key = Fernet.generate_key() with open("key.key", "wb") as key_file: key_file.write(self.key) def load_key(self): # Loads the key from the current directory named `key.key` return open("key.key", "rb").read()
import cv2 as cv import numpy as np import os from time import time from windowcapture import WindowCapture from vision import Vision from bot import Bot # Change the working directory to the folder this script is in. # Doing this because I'll be putting the files from each video in their own folder on GitHub #os.chdir(os.path.dirname(os.path.abspath(__file__))) # initialize the WindowCapture class wincap = WindowCapture() # initialize the Vision class vision_pilar1 = Vision('pilar1.png') vision_pilar2 = Vision('pilar2.png') vision_pilar3 = Vision('pilar3.png') #vision_pilar4 = Vision('pilar4.png') vision_pilar5 = Vision('pilar5.png') vision_pilar6 = Vision('pilar6.png') vision_pilar7 = Vision('pilar7.png') #vision_pilar62 = Vision('pilar62.png') vision_pilarPr = Vision('pilarpr.png') loop_time = time() while (True): # get an updated image of the game screenshot = wincap.get_screenshot() points_1 = []
class FishingBot: #properties fish_pos_x = None fish_pos_y = None fish_last_time = None detect_text_enable = False botting = False FISH_RANGE = 74 FISH_VELO_PREDICT = 30 BAIT_POSITION = (473, 750) FISH_POSITION = (440, 750) FILTER_CONFIG = [49, 0, 58, 134, 189, 189, 0, 0, 0, 0] FISH_WINDOW_CLOSE = (430, 115) # set position of the fish windows # this value can be diferent by the sizes of the game window FISH_WINDOW_SIZE = (280, 226) FISH_WINDOW_POSITION = (163, 125) wincap = None fishfilter = Filter() if detect_text_enable else None # Load the needle image needle_img = cv.imread('images/fiss.jpg', cv.IMREAD_UNCHANGED) needle_img_clock = cv.imread('images/clock.jpg', cv.IMREAD_UNCHANGED) # Some time cooldowns detect_text = True # Limit time initial_time = None end_time_enable = False end_time = 0 # for fps loop_time = time() # The mouse click cooldown timer_mouse = time() # The timer beteween the states timer_action = time() bait_time = 2 throw_time = 2 game_time = 2 # This is the filter parameters, this help to find the right image hsv_filter = HsvFilter(*FILTER_CONFIG) state = 0 def detect(self, haystack_img): # match the needle_image with the hasytack image result = cv.matchTemplate(haystack_img, self.needle_img, cv.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result) # needle_image's dimensions needle_w = self.needle_img.shape[1] needle_h = self.needle_img.shape[0] # get the position of the match image top_left = max_loc bottom_right = (top_left[0] + needle_w, top_left[1] + needle_h) # Draw the circle of the fish limits cv.circle( haystack_img, (int(haystack_img.shape[1] / 2), int(haystack_img.shape[0] / 2)), self.FISH_RANGE, color=(0, 0, 255), thickness=1) # Only the max level of match is greater than 0.5 if max_val > 0.5: pos_x = (top_left[0] + bottom_right[0]) / 2 pos_y = (top_left[1] + bottom_right[1]) / 2 if self.fish_last_time: dist = math.sqrt((pos_x - self.fish_pos_x)**2 + (self.fish_pos_y - pos_y)**2) cv.rectangle(haystack_img, top_left, bottom_right, color=(0, 255, 0), thickness=2, lineType=cv.LINE_4) # Calculate the fish velocity velo = dist / (time() - self.fish_last_time) if velo == 0.0: return (pos_x, pos_y, True) elif velo >= 150: # With this velocity the fish position will be predict pro = self.FISH_VELO_PREDICT / dist destiny_x = int(pos_x + (pos_x - self.fish_pos_x) * pro) destiny_y = int(pos_y + (pos_y - self.fish_pos_y) * pro) # Draw the predict line cv.line(haystack_img, (int(pos_x), int(pos_y)), (destiny_x, destiny_y), (0, 255, 0), thickness=3) return (destiny_x, destiny_y, False) # get the fish position and the time self.fish_pos_x = pos_x self.fish_pos_y = pos_y self.fish_last_time = time() return None def detect_minigame(self, haystack_img): result = cv.matchTemplate(haystack_img, self.needle_img_clock, cv.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result) if max_val > 0.9: return True return False def set_to_begin(self, values): if values['-ENDTIMEP-']: self.end_time_enable = True try: self.end_time = int(values['-ENDTIME-']) * 60 except: self.end_time = 0 self.bait_time = values['-BAITTIME-'] self.throw_time = values['-THROWTIME-'] self.game_time = values['-STARTGAME-'] self.wincap = WindowCapture('METIN2') self.state = 0 self.initial_time = time() self.timer_action = time() def runHack(self): screenshot = self.wincap.get_screenshot() # crop and aply hsv filter crop_img = screenshot[ self.FISH_WINDOW_POSITION[1]:self.FISH_WINDOW_POSITION[1] + self.FISH_WINDOW_SIZE[1], self.FISH_WINDOW_POSITION[0]:self.FISH_WINDOW_POSITION[0] + self.FISH_WINDOW_SIZE[0]] detect_end_img = screenshot[ self.FISH_WINDOW_POSITION[1]:self.FISH_WINDOW_POSITION[1] + self.FISH_WINDOW_SIZE[1], self.FISH_WINDOW_POSITION[0]:self.FISH_WINDOW_POSITION[0] + self.FISH_WINDOW_SIZE[0]] crop_img = self.hsv_filter.apply_hsv_filter(crop_img) cv.putText(crop_img, 'FPS: ' + str(1 / (time() - self.loop_time))[:2], (10, 200), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) cv.putText( crop_img, 'State: ' + str(self.state) + ' ' + str(time() - self.timer_action)[:5], (10, 160), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) self.loop_time = time() # Verify total time if self.end_time_enable and time() - self.initial_time > self.end_time: self.botting = False # State to click put the bait in the rod if self.state == 0: mouse_x = int(self.BAIT_POSITION[0] + self.wincap.offset_x) mouse_y = int(self.BAIT_POSITION[1] + self.wincap.offset_y) if time() - self.timer_action > self.bait_time: self.detect_text = True pydirectinput.click(x=mouse_x, y=mouse_y, button='right') self.state = 1 self.timer_action = time() # State to throw the bait if self.state == 1: if time() - self.timer_action > self.throw_time: mouse_x = int(self.FISH_POSITION[0] + self.wincap.offset_x) mouse_y = int(self.FISH_POSITION[1] + self.wincap.offset_y) pydirectinput.click(x=mouse_x, y=mouse_y, button='right') self.state = 2 self.timer_action = time() # Delay to start the clicks if self.state == 2: if time() - self.timer_action > self.game_time: self.state = 3 self.timer_action = time() # Countdown to finish the state detected_end = self.detect_minigame(detect_end_img) if self.state == 3: if time() - self.timer_action > 15: self.timer_action = time() self.state = 0 if time() - self.timer_action > 5 and detected_end is False: self.timer_action = time() self.state = 0 if self.detect_text_enable and time() - self.timer_action > 1.5: if self.detect_text: if self.fishfilter.match_with_text(screenshot) is False: mouse_x = int(self.wincap.offset_x + self.FISH_WINDOW_CLOSE[0]) mouse_y = int(self.wincap.offset_y + self.FISH_WINDOW_CLOSE[1]) pydirectinput.click(x=mouse_x, y=mouse_y, button='left') pydirectinput.click(x=mouse_x, y=mouse_y, button='left') self.detect_text = False # make the click if (time() - self.timer_mouse) > 0.3 and self.state == 3 and detected_end: # Detect the fish square_pos = self.detect(crop_img) if square_pos: # Recalculate the mouse position with the fish position pos_x = square_pos[0] pos_y = square_pos[1] center_x = self.FISH_WINDOW_SIZE[0] / 2 center_y = self.FISH_WINDOW_SIZE[1] / 2 mouse_x = int(pos_x) mouse_y = int(pos_y) # Verify if the fish is in range d = self.FISH_RANGE**2 - ((center_x - mouse_x)**2 + (center_y - mouse_y)**2) # Make the click if (d > 0): self.timer_mouse = time() mouse_x = int(pos_x + self.FISH_WINDOW_POSITION[0] + self.wincap.offset_x) mouse_y = int(pos_y + self.FISH_WINDOW_POSITION[1] + self.wincap.offset_y) pydirectinput.click(x=mouse_x, y=mouse_y) ''' cv.imshow('Minha Janela', crop_img) if cv.waitKey(1) == ord('q'): cv.destroyAllWindows() return True ''' return crop_img
import cv2 as cv import numpy as np import os from time import time from windowcapture import WindowCapture from vision import Vision from hsvfilter import HsvFilter # Change the working directory to the folder this script is in. # Doing this because I'll be putting the files from each video in their own folder on GitHub os.chdir(os.path.dirname(os.path.abspath(__file__))) # initialize the WindowCapture class wincap = WindowCapture('Albion Online Client') # initialize the Vision class vision_limestone = Vision('albion_limestone_processed.jpg') # initialize the trackbar window vision_limestone.init_control_gui() # limestone HSV filter hsv_filter = HsvFilter(0, 180, 129, 15, 229, 243, 143, 0, 67, 0) loop_time = time() while (True): # get an updated image of the game screenshot = wincap.get_screenshot() # pre-process the image processed_image = vision_limestone.apply_hsv_filter(screenshot, hsv_filter)
import os from time import time from windowcapture import WindowCapture from detection import Detection from vision import Vision from bot import AlbionBot, BotState # Change the working directory to the folder this script is in. # Doing this because I'll be putting the files from each video in their # own folder on GitHub os.chdir(os.path.dirname(os.path.abspath(__file__))) DEBUG = True # initialize the WindowCapture class wincap = WindowCapture('Albion Online Client') # load the detector detector = Detection('limestone_model_final.xml') # load an empty Vision class vision = Vision() # initialize the bot bot = AlbionBot((wincap.offset_x, wincap.offset_y), (wincap.w, wincap.h)) wincap.start() detector.start() bot.start() while (True): # if we don't have a screenshot yet, don't run the code below this point yet if wincap.screenshot is None:
import cv2 as cv import numpy as np import os from time import time from windowcapture import WindowCapture # Change the working directory to the folder this script is in. # Doing this because I'll be putting the files from each video in their own folder on GitHub os.chdir(os.path.dirname(os.path.abspath(__file__))) # initialize the WindowCapture class wincap = WindowCapture('Untitled - Notepad') loop_time = time() while (True): # get an updated image of the game screenshot = wincap.get_screenshot() cv.imshow('Computer Vision', screenshot) # debug the loop rate print('FPS {}'.format(1 / (time() - loop_time))) loop_time = time() # press 'q' with the output window focused to exit. # waits 1 ms every loop to process key presses if cv.waitKey(1) == ord('q'): cv.destroyAllWindows() break
# Detecção dos objetos treinados na camera import cv2 as cv import numpy as np import os from time import time from windowcapture import WindowCapture from vision import Vision os.chdir(os.path.dirname(os.path.abspath(__file__))) # Inicializa o WindowCapture wincap = WindowCapture('Camera') # Carrega o modelo treinado cascade_limestone = cv.CascadeClassifier('result.xml') # Carregue uma classe Vision vision_limestone = Vision(None) loop_time = time() while (True): # get an updated image of the game imagem = wincap.get_screenshot() # Detecção de objetos rectangles = cascade_limestone.detectMultiScale(imagem) # Desenha os resultados da detecção na imagem original detection_image = vision_limestone.draw_rectangles(imagem, rectangles)
def d(): rightClick((computerWidth / 2, computerHeight / 2)) time.sleep(1) q = False w = False e = False buyItems() time.sleep(1) log("Locking screen...") pressKey(keyboardButton.n0) wincap = WindowCapture('League of Legends (TM) Client') while (True): try: screenshot = wincap.get_screenshot() except: break if (getPixel(game.recallButton) != (64, 85, 95)): #checking health if (getPixel(game.healthBar) == (1, 13, 7)): rightClick(game.base) rightClick(game.base) pressKey(keyboardButton.D) pressKey(keyboardButton.F) #pressKey(keyboardButton.n1) flag = True for i in range(8): flag = getPixel(game.healthBar) == (1, 13, 7) rightClick(game.base) time.sleep(1) if (flag == False): break if flag: pressKey(keyboardButton.B) flag2 = True for i in range(9): flag2 = getPixel(game.healthBar) == (1, 13, 7) time.sleep(1) if (flag2 == False): break if flag2: buyItems() pressKey(keyboardButton.n4) else: max_loc = locateGame("casterMinion", screenshot, np.array([111, 167, 81]), np.array([121, 215, 130]), 0.5) if (max_loc != None): setPos(max_loc) pressKey(keyboardButton.Space) #finding champ maxChamp_loc = locateGame("champHealth", screenshot, np.array([2, 204, 165]), np.array([4, 204, 166]), 0.5) #checking tower range max_loc = locateGame("tower", screenshot, np.array([49, 0, 135]), np.array([100, 40, 166]), 0.5) #checking if theres a enemy champion if (maxChamp_loc != None): #checking if you are not in tower range if (max_loc == None): pressKey(keyboardButton.T) maxChamp_loc = (maxChamp_loc[0] + 50, maxChamp_loc[1] + 75) setPos(maxChamp_loc) pressKey(keyboardButton.E) pressKey(keyboardButton.R) pressKey(keyboardButton.Q) pressKey(keyboardButton.W) pressKey(keyboardButton.Space) pressKey(keyboardButton.T) time.sleep(0.5) else: pressKey(keyboardButton.W) else: x = random.randint(-25, 25) rightClick((game.tower[0] + x, game.tower[1] - x)) #levelling up if (locate("levelUpButton", screenshot) != None): holdKey(keyboardButton.Lctrl) time.sleep(0.05) pressKey(keyboardButton.R) if (q == False): pressKey(keyboardButton.Q) q = True elif (w == False): pressKey(keyboardButton.W) w = True elif (e == False): pressKey(keyboardButton.E) e = True pressKey(keyboardButton.Q) pressKey(keyboardButton.W) pressKey(keyboardButton.E) releaseKey(keyboardButton.Lctrl) else: buyItems() log("Game has ended.")