Пример #1
0
    def start_macro(self):
        if not self.macro_process:
            self.toggle_macro_process()
        keymap = self.get_keymap()
        if not keymap:
            showerror(APP_TITLE, "키설정을 읽어오지 못했습니다. 키를 다시 설정해주세요.")
        else:
            if not self.platform_file_dir.get():
                showwarning(APP_TITLE, "지형 파일을 선택해 주세요.")
            else:
                if not MapleScreenCapturer().ms_get_screen_hwnd():
                    showwarning(APP_TITLE, "메이플 창을 찾지 못했습니다. 메이플을 실행해 주세요")
                else:

                    cap = MapleScreenCapturer()
                    hwnd = cap.ms_get_screen_hwnd()
                    rect = cap.ms_get_screen_rect(hwnd)
                    self.log("MS hwnd", hwnd)
                    self.log("MS rect", rect)
                    self.log("Out Queue put:", self.platform_file_dir.get())
                    if rect[0] < 0 or rect[1] < 0:
                        showwarning(
                            APP_TITLE,
                            "메이플 창 위치를 가져오는데 실패했습니다.\n메이플 촹의 좌측 상단 코너가 화면 내에 있도록 메이플 창을 움직여주세요."
                        )

                    else:
                        cap.capture()
                        self.macro_process_out_queue.put(
                            ("start", keymap, self.platform_file_dir.get()))
                        self.macro_start_button.configure(state=DISABLED)
                        self.macro_end_button.configure(state=NORMAL)
                        self.platform_file_button.configure(state=DISABLED)
Пример #2
0
    def start_macro(self):
        # print(MapleScreenCapturer().ms_get_screen_hwnd())
        if not self.macro_process:
            self.toggle_macro_process()
        keymap = self.get_keymap()
        if not keymap:
            showerror(APP_TITLE,
                      "Failed to read key settings. Please reset the key.")
        else:
            if not self.platform_file_dir.get():
                showwarning(APP_TITLE, "Please select a terrain file.")
            else:
                if not MapleScreenCapturer().ms_get_screen_hwnd():
                    showwarning(
                        APP_TITLE,
                        "Maple window was not found. Please run Maple")
                else:

                    cap = MapleScreenCapturer()
                    hwnd = cap.ms_get_screen_hwnd()
                    rect = cap.ms_get_screen_rect(hwnd)
                    self.log("MS hwnd", hwnd)
                    self.log("MS rect", rect)
                    self.log("Out Queue put:", self.platform_file_dir.get())
                    if rect[0] < 0 or rect[1] < 0:
                        showwarning(
                            APP_TITLE,
                            "Failed to get the location of the Maple window.\nMove the Maple window so that the top left corner of the Maple is within the screen.."
                        )

                    else:
                        cap.capture()
                        self.macro_process_out_queue.put(
                            ("start", keymap, self.platform_file_dir.get()))
                        self.macro_start_button.configure(state=DISABLED)
                        self.macro_end_button.configure(state=NORMAL)
                        self.platform_file_button.configure(state=DISABLED)
Пример #3
0
# -*- coding:utf-8 -*-
import sys
sys.path.append("../src")
from screen_processor import MapleScreenCapturer
import cv2, time, imutils, math, glob, random
import numpy as np
cap = MapleScreenCapturer()
from win32gui import SetForegroundWindow

x, y, w, h = 450, 180, 500, 130
ds = None
while True:
    img = cap.capture(rect=[0, 0, 1600, 900], set_focus=False)
    img_arr = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
    final_img = imutils.resize(img_arr, width=200)
    cv2.imshow("s to save image", final_img)
    inp = cv2.waitKey(1)

    if inp == ord("s"):
        SetForegroundWindow(cap.ms_get_screen_hwnd())
        time.sleep(0.3)
        ds = cap.capture(set_focus=False)
        ds = cv2.cvtColor(np.array(ds), cv2.COLOR_RGB2BGR)
        ds = ds[y:y + h, x:x + w]
        print("saved")

    elif inp == ord("q"):
        cv2.destroyAllWindows()
        break
    elif inp == ord("r"):
        imgpath = "C:\\Users\\tttll\\PycharmProjects\\MacroSTory\\rune_trainer\\images\\screenshots\\finished\\*.png"
Пример #4
0
# -*- coding:utf-8 -*-
from screen_processor import MapleScreenCapturer, StaticImageProcessor
import cv2, imutils, time
import numpy as np
cap = MapleScreenCapturer()
ct = StaticImageProcessor(cap)
from win32gui import SetForegroundWindow
while True:
    img = cap.capture(set_focus=False)
    img_arr = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
    img_arr = img_arr[
        ct.default_minimap_scan_area[1]:ct.default_minimap_scan_area[3],
        ct.default_minimap_scan_area[0]:ct.default_minimap_scan_area[2]]
    #grayscale = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2GRAY)
    #blurred = cv2.GaussianBlur(grayscale, (7,7), 5)
    #eroded = cv2.erode(blurred, (7,7))
    #dilated = cv2.dilate(eroded, (7,7))
    #canny = cv2.Canny(eroded, threshold1=210, threshold2=255)
    #canny = imutils.resize(dilated, width = 500)
    final_img = img_arr

    cv2.imshow("", imutils.resize(final_img, width=400))

    inp = cv2.waitKey(1)
    if inp == ord("q"):
        cv2.destroyAllWindows()
        break
    elif inp == ord("c"):
        SetForegroundWindow(cap.ms_get_screen_hwnd())
        time.sleep(1)
        ds = cap.capture(set_focus=False)
Пример #5
0
class RuneDetector:
    def __init__(self,
                 model_path,
                 labels=None,
                 screen_capturer=None,
                 key_mgr=None):
        """
        Run just Once to initialize
        :param model_path: Path to trained keras model
        :param labels: dictionary with class names as keys, integer as values
        example: {'down': 0, 'left': 1, 'right': 2, 'up': 3}
        """
        self.logger = logging.getLogger("RuneDetector")
        self.logger.setLevel(logging.DEBUG)

        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        fh = logging.FileHandler("logging.log")
        fh.setLevel(logging.DEBUG)
        fh.setFormatter(formatter)
        self.logger.addHandler(fh)
        self.labels = labels if labels else {
            'down': 0,
            'left': 1,
            'right': 2,
            'up': 3
        }
        self.model_path = model_path
        with device("/cpu:0"):  # Use cpu for evaluation
            model = load_model(self.model_path)
            #model.compile(optimizer="adam", loss='categorical_crossentropy', metrics=['accuracy'])
            model.load_weights(self.model_path)

        self.model = model

        self.rune_roi_1366 = [450, 180, 500, 130]  # x, y, w, h
        self.rune_roi_1024 = [295, 180, 500, 133]
        self.rune_roi_800 = [170, 200, 440, 135]
        self.rune_roi = self.rune_roi_800  # set as default rune roi
        self.screen_processor = MapleScreenCapturer(
        ) if not screen_capturer else screen_capturer
        self.key_mgr = KeyboardInputManager() if not key_mgr else key_mgr

    def capture_roi(self):
        screen_rect = self.screen_processor.ms_get_screen_rect(
            self.screen_processor.ms_get_screen_hwnd())
        screen_width = screen_rect[2] - screen_rect[0]

        if screen_width > 1300:
            self.rune_roi = self.rune_roi_1366
        elif screen_width > 1000:
            self.rune_roi = self.rune_roi_1024
        elif screen_width > 800:
            self.rune_roi = self.rune_roi_800

        captured_image = self.screen_processor.capture(set_focus=False,
                                                       rect=screen_rect)

        captured_roi = cv2.cvtColor(np.array(captured_image),
                                    cv2.COLOR_RGB2BGR)

        captured_roi = captured_roi[self.rune_roi[1]:self.rune_roi[1] +
                                    self.rune_roi[3],
                                    self.rune_roi[0]:self.rune_roi[0] +
                                    self.rune_roi[2]]

        return captured_roi

    def preprocess(self, img):
        """
        finds and returns sorted list of 60 by 60 grayscale images of circles, centered
        :param img: BGR image of roi containing circle
        :return: list of grayscale images each containing a circle
        """
        hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        hsv_img[:, :, 1] = 255
        hsv_img[:, :, 2] = 255
        bgr_img = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2BGR)
        gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)

        circles = cv2.HoughCircles(gray_img,
                                   cv2.HOUGH_GRADIENT,
                                   1,
                                   gray_img.shape[0] / 8,
                                   param1=100,
                                   param2=30,
                                   minRadius=18,
                                   maxRadius=30)
        temp_list = []
        img_index = 1
        if circles is not None:
            circles = np.round(circles[0, :]).astype("int")
            for (x, y, r) in circles:

                cropped = gray_img[max(0, int(y - 60 / 2)):int(y + 60 / 2),
                                   max(0, int(x - 60 / 2)):int(x +
                                                               60 / 2)].astype(
                                                                   np.float32)

                temp_list.append((cropped, (x, y)))
                img_index += 1

        temp_list = sorted(temp_list, key=lambda x: x[1][0])
        return_list = []
        for image in temp_list:
            return_list.append(image[0])

        return return_list

    def images2tensor(self, img_list):
        """
        Creates a tf compliant tensor by stacking images in img_list
        :param img_list:
        :return: np.array of shape [1, 60, 60, 1]
        """
        return np.vstack([np.reshape(x, [1, 60, 60, 1]) for x in img_list])

    def classify(self, tensor, batch_size=4):
        """
        Runs tensor through model and returns list of direction in string.
        :param tensor: input tensor
        :param batch_size: batch size
        :return: size of strings "up", "down", "left", "right"
        """
        return_list = []
        result = self.model.predict(tensor, batch_size=batch_size)
        for res in result:
            final_class = np.argmax(res, axis=-1)
            for key, val in self.labels.items():
                if final_class == val:
                    return_list.append(key)

        return return_list

    def solve_auto(self):
        """
        Solves rune if present and sends key presses.
        :return: -1 if rune not detected, result of classify() if successful
        """
        img = self.capture_roi()
        processed_imgs = self.preprocess(img)
        if len(processed_imgs) != 4:
            self.logger.debug("Failed to extract 4 ROI from processed image")
            return -1
        #cv2.imwrite("roi.png", img)
        tensor = self.images2tensor(processed_imgs)
        result = self.classify(tensor)
        if GetKeyState(VK_NUMLOCK):
            self.key_mgr.single_press(DIK_NUMLOCK)
            time.sleep(0.2)
        self.logger.debug("Solved rune with solution %s" % (str(result)))
        for inp in result:
            if inp == "up":
                self.key_mgr.single_press(DIK_UP)
            elif inp == "down":
                self.key_mgr.single_press(DIK_DOWN)
            elif inp == "left":
                self.key_mgr.single_press(DIK_LEFT)
            elif inp == "right":
                self.key_mgr.single_press(DIK_RIGHT)
            time.sleep(0.1)
        return len(processed_imgs)

    def press_space(self):
        self.key_mgr.single_press(DIK_SPACE)

    def solve(self):
        """
        Solves rune if present and just returns solution.
        :return: -1 if rune not detected, result of classify() if successful
        """
        img = self.capture_roi()
        processed_imgs = self.preprocess(img)
        if len(processed_imgs) != 4:
            return -1
        tensor = self.images2tensor(processed_imgs)
        result = self.classify(tensor)

        return result