def takeScreenshot(screenshotType): loadConfig() if ('default' in screenshotType): with mss.mss() as sct: file = sct.shot(output=img_filename) elif ('-d' in screenshotType): with mss.mss() as sct: time.sleep(5) file = sct.shot(output=img_filename) elif ('-i' in screenshotType): with mss.mss() as sct: file = sct.shot(output='imgur.png') upload()
def screen_record(): sct = mss.mss() last_time = time.time() while (True): img = sct.grab(mon) print('loop took {} seconds'.format(time.time() - last_time)) last_time = time.time() img = np.array(img) processed_image = process_image(img) mean = np.mean(processed_image) print('mean = ', mean) if mean <= float(0.11): print('SSSSSSSS ') pyautogui.click(button='left') break return else: time.sleep(0.01) continue return if cv2.waitKey(25) & 0xFF == ord('q'): cv2.destroyAllWindows() break
def fish_shrimp(): mon = tools.find_rs() if mon['width'] < 773 or mon['height'] < 534: print('rs not in screen or partially blocked!') return -1 inv_items = [ cv2.imread("net.png"), cv2.imread("shrimp.png"), cv2.imread("bottle.png") ] not_fishing = True inventory = 0 cursor_data = display.Display().screen().root.query_pointer()._data mouse_pos = (cursor_data["root_x"] - mon['left'], cursor_data["root_y"] - mon['top']) current_spot = (mon['left'] + mon['width'] / 1.5, mon['top'] + mon['height'] / 2) while True: imfile = 'imfile.png' shot = mss.mss().grab(mon) mss.tools.to_png(shot.rgb, shot.size, output=imfile) image = cv2.imread(imfile) if not_fishing and inventory < 28: #find closest fishing spot new_spot = find_new_spot(image) #move mouse and click move_mouse(current_spot, new_spot, mon) time.sleep(5) shot = mss.mss().grab(mon) mss.tools.to_png(shot.rgb, shot.size, output=imfile) image = cv2.imread(imfile) current_spot = find_new_spot(image) not_fishing = False else: #check if spot has changed temp_spot = find_new_spot(image) if temp_spot is not current_spot: not_fishing = True #count items inventory = inventory_num(image) time.sleep(2) if inventory > 28: #go to bank print('done fishing')
def ss(): op = 1 with mss.mss() as sct: monitor = {"top": 40, "left": 0, "width": 800, "height": 640} while "Screen capturing": last_time = time.time() img = numpy.array(sct.grab(monitor)) gray_frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) res = cv2.matchTemplate(gray_frame, template, cv2.TM_CCOEFF_NORMED) loc = np.where(res >= 0.7) op += 1 print(op) for pt in zip(*loc[::-1]): cv2.rectangle(img, pt, (pt[0] + w, pt[1] + h), (0, 255, 0), 3) for p in img: pts = (pt[0], pt[1]) x = (pt[0]) y = (pt[1]) print(x) if 100 < x < 490: pyautogui.mouseDown(button='left') time.sleep(2) pyautogui.mouseUp(button='left') x = 0 break else: continue break else: continue break key = cv2.waitKey(1) if cv2.waitKey(25) & 0xFF == ord("q"): cv2.destroyAllWindows() if op > 35: return
def grabData(menuMonitor): sct = mss.mss() sct_img = sct.grab(menuMonitor) return sct_img
def find_top_corner(): with mss.mss() as sct: cmon = cv2.imread(sct.shot(), 0) rl = cv2.imread('rl_logo.png', 0) res = cv2.matchTemplate(cmon, rl, cv2.TM_CCOEFF) return cv2.minMaxLoc(res)[3]
def live_play_to_table(name): global click click = False mon = find_rs() cursor = cv2.imread("cursor.png") fish = cv2.imread("fish.png") inv_items = [ cv2.imread("net.png"), cv2.imread("shrimp.png"), cv2.imread("bottle.png") ] with open(name + ".csv", 'w', newline='') as file: fieldnames = [ 'cursor_locx', 'cursor_locy', 'fish_locx', 'fish_locy', 'inv_num', 'click', 'time_elapsed' ] writer = csv.DictWriter(file, fieldnames=fieldnames) count = 0 prev_time = time.time() with mouse.Listener(on_click=on_click) as listener: while 1: #get image imfile = 'imfile.png' shot = mss.mss().grab(mon) mss.tools.to_png(shot.rgb, shot.size, output=imfile) image = cv2.imread(imfile) #cursor location cursor_data = display.Display().screen().root.query_pointer( )._data cursor_loc = (cursor_data["root_x"] - mon['left'], cursor_data["root_y"] - mon['top']) #fishing spots convolution fish_res = cv2.matchTemplate(image[:np.newaxis, :570], fish, cv2.TM_CCOEFF_NORMED) fish_spots = np.where(fish_res >= 0.7) fish_loc = [pt for pt in zip(*fish_spots[::-1])] for f in fish_loc: f = list(f) f[0] += 13 f[1] += 17 close_fish = tuple(closest_coord(fish_loc)) #inventory number item_count = 0 for item in inv_items: res = cv2.matchTemplate(image[:np.newaxis, 570:], item, cv2.TM_CCOEFF_NORMED) inv_spots = np.where(res >= 0.8) inv_loc = [pt for pt in zip(*inv_spots[::-1])] item_count += len(inv_loc) #click click_loc = 0 if click: click_loc = 1 #screen turn (hard, do later :P) time_elapsed = time.time() - prev_time prev_time = time.time() #write to csv writer.writerow({ 'cursor_locx': cursor_loc[0] / mon['width'], 'cursor_locy': cursor_loc[1] / mon['height'], 'fish_locx': close_fish[0] / mon['width'], 'fish_locy': close_fish[1] / mon['height'], 'inv_num': item_count / 28, 'click': click_loc, 'time_elapsed': time_elapsed }) count += 1 click = False listener.join()
def get_monitor(): with mss.mss() as sct: size = pag.size() mon = {"top": 0, "left": 0, "width": size.width, "height": size.height} return np.array(sct.grab(mon))
import numpy as np import cv2 from mss.linux import MSS as mss from PIL import Image import time import pyautogui as pg import cv2 import mss import numpy template = cv2.imread("Senkoface.png", cv2.IMREAD_GRAYSCALE) w, h = template.shape[::-1] with mss.mss() as sct: monitor = {"top": 40, "left": 0, "width": 800, "height": 640} while "Screen capturing": last_time = time.time() img = numpy.array(sct.grab(monitor)) cv2.imshow("OpenCV/Numpy normal", img) print("fps: {}".format(1 / (time.time() - last_time))) gray_frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) res = cv2.matchTemplate(gray_frame, template, cv2.TM_CCOEFF_NORMED) loc = np.where(res >= 0.7) for pt in zip(*loc[::-1]): cv2.rectangle(img, pt, (pt[0] + w, pt[1] + h), (0, 255, 0), 3) cv2.imshow("Frame", img) key = cv2.waitKey(1) if cv2.waitKey(25) & 0xFF == ord("q"): cv2.destroyAllWindows() break