class TestRobot(unittest.TestCase): def setUp(self): self.robot = Robot() def test_take_screenshot_full_screen(self): SM_CXVIRTUALSCREEN = 78 # width of the virtual screen SM_CYVIRTUALSCREEN = 79 # height of the virtual screen virtual_screen_size = ( windll.user32.GetSystemMetrics(SM_CXVIRTUALSCREEN), windll.user32.GetSystemMetrics(SM_CYVIRTUALSCREEN)) im = self.robot.take_screenshot() # no args: full virtual screen self.assertEquals(im.size, virtual_screen_size) def test_take_screenshot_bounds(self): bounding_box = (0, 0, 100, 100) box_size = (100, 100) im = self.robot.take_screenshot(bounding_box) self.assertEquals(im.size, box_size) def test_get_display_monitors(self): # Since the code inside of the get_display_monitors func # is the same I'd use here to test, I'm going to just # assume that if the first is sucesfull, the rest will be # too. SM_CXSCREEN = 0 # width_flag for primary SM_CYSCREEN = 1 # height_flag for primary primary_screen_size = (0, 0, windll.user32.GetSystemMetrics(SM_CXSCREEN), windll.user32.GetSystemMetrics(SM_CYSCREEN)) screen_coords = self.robot.get_display_monitors() self.assertEquals(screen_coords[0], primary_screen_size)
class TestRobot(unittest.TestCase): def setUp(self): self.robot = Robot() def test_take_screenshot_full_screen(self): SM_CXVIRTUALSCREEN = 78 # width of the virtual screen SM_CYVIRTUALSCREEN = 79 # height of the virtual screen virtual_screen_size = ( windll.user32.GetSystemMetrics(SM_CXVIRTUALSCREEN), windll.user32.GetSystemMetrics(SM_CYVIRTUALSCREEN), ) im = self.robot.take_screenshot() # no args: full virtual screen self.assertEquals(im.size, virtual_screen_size) def test_take_screenshot_bounds(self): bounding_box = (0, 0, 100, 100) box_size = (100, 100) im = self.robot.take_screenshot(bounding_box) self.assertEquals(im.size, box_size) def test_get_display_monitors(self): # Since the code inside of the get_display_monitors func # is the same I'd use here to test, I'm going to just # assume that if the first is sucesfull, the rest will be # too. SM_CXSCREEN = 0 # width_flag for primary SM_CYSCREEN = 1 # height_flag for primary primary_screen_size = ( 0, 0, windll.user32.GetSystemMetrics(SM_CXSCREEN), windll.user32.GetSystemMetrics(SM_CYSCREEN), ) screen_coords = self.robot.get_display_monitors() self.assertEquals(screen_coords[0], primary_screen_size)
def bot(): # get window and rect window = windows[index] rect = windowRects[index] #sepperate x,y,w,h of rect x = rect[0] y = rect[1] w = rect[2] h = rect[3] #create bounding box: top and height are cut bounding_box = (x, y + 100, w, h - 160) waittime = 0 try: while True: if (keyboard.is_pressed("q")): break robot = Robot() try: windowimg = robot.take_screenshot(bounding_box) except Exception as e: continue #conert to type L, means just black and white windowimg = windowimg.convert('L') #convert image to numpy array to work with pixels img = np.array(windowimg) # add Threshhold, just black and white ret, img = cv2.threshold(img, 240, 255, cv2.THRESH_BINARY) d = pytesseract.image_to_data(img, output_type='data.frame') #cv2.imshow('screen', img) if (cv2.waitKey(1) & 0xFF) == ord('q'): cv2.destroyAllWindows() break # filter the results of tesseract for flyff # these filter arent perfect yet d = d[d['text'].apply(lambda x: isinstance(x, str))] d = d[(d['conf'] != -1) & (d['conf'] > 2) & (d['text'] != "") & (d['text'] != " ") & (d['text'] != "NaN") & (d['text'].apply(lambda x: len(x) > 2))] # text in image is found if (d.shape[0] > 0): # get row of found text row = d.iloc[0] # + 20 because x + row[left] is the x-top left corner of text monsterx = x + row['left'] + 20 # + 5 because y + row[top] is the y-top left corner of text # + 100 because we did + 100 at bounding box earlier monstery = y + row['top'] + 100 + 5 print("Found Monster in Window: " + str(window) + " at x: " + str(monsterx) + " at y: " + str(monstery)) #click click(monsterx, monstery) #spellcast keyboard.press_and_release("c") keyboard.press_and_release("s") # general wait time for next analysis of image time.sleep(2) except KeyboardInterrupt: exit()
from pyrobot import Robot robot = Robot() monitors = robot.get_display_monitors() print(monitors) print(len(monitors)) im = robot.take_screenshot(monitors[-1]) im.save('screenshotwhatever.png', 'png') imcrop = im.crop((0, 0, 1000, 1000)) imcrop.save("screenshotcropexample.png", 'png')