예제 #1
0
파일: main.py 프로젝트: ssmith21/0srsBOT
def q(debug_type, debugImg, threshold):
    hsv_filter_vals = HsvFilter(114, 0, 0, 179, 255, 255, 0, 0, 62, 233)
    if (debug_type == 1):
        ''' When we're not looking for processed images
        '''
        while (True):
            if (cv.waitKey(1) == ord('q')):
                cv.destroyAllWindows()
                break
            screenshot = wincap.get_screenshot()
            rectangles = debugImg.find(screenshot, threshold)
            output_img = debugImg.drawRectangles(screenshot, rectangles)
            cv.imshow('Matches', output_img)
    if (debug_type == 2):
        ''' When we want to test for processed images
        '''
        debugImg.init_control_gui()
        while (True):
            if (cv.waitKey(1) == ord('q')):
                cv.destroyAllWindows()
                break
            screenshot = wincap.get_screenshot()
            processed_img = debugImg.apply_hsv_filter(screenshot)
            cv.imshow('Bot vision', processed_img)
    if (debug_type == 3):
        ''' When we want to see what the bot sees
        '''
        while (True):
            if (cv.waitKey(1) == ord('q')):
                cv.destroyAllWindows()
                break
            screenshot = wincap.get_screenshot()
            processed_img = debugImg.apply_hsv_filter(screenshot,
                                                      hsv_filter_vals)
            cv.imshow('Bot vision', processed_img)
    if (debug_type == 4):
        ''' When we want to detect needles in processed img haystack (rectangles)
        '''
        while (True):
            if (cv.waitKey(1) == ord('q')):
                cv.destroyAllWindows()
                break
            screenshot = wincap.get_screenshot()
            processed_img = debugImg.apply_hsv_filter(screenshot,
                                                      hsv_filter_vals)
            rectangles_points = debugImg.find(processed_img, threshold)
            output_img_regu = debugImg.drawRectangles(screenshot,
                                                      rectangles_points)
            cv.imshow('Matches', output_img_regu)
    if (debug_type == 5):
        ''' When we want to detect needles in processed img haystack (crosshairs)
        '''
        while (True):
            if (cv.waitKey(1) == ord('q')):
                cv.destroyAllWindows()
                break
            screenshot = wincap.get_screenshot()
            processed_img = debugImg.apply_hsv_filter(screenshot,
                                                      hsv_filter_vals)
            rectangles_points = debugImg.find(processed_img, threshold)
            crosshair_points = debugImg.getClickPoints(rectangles_points)
            output_img_proc = debugImg.drawCrosshairs(processed_img,
                                                      crosshair_points)
            cv.imshow('Matches', output_img_proc)
예제 #2
0
 def get_hsv_filter_from_controls(self):
     # Get current positions of all trackbars
     hsv_filter = HsvFilter()
     hsv_filter.hMin = cv.getTrackbarPos('HMin', self.TRACKBAR_WINDOW)
     hsv_filter.sMin = cv.getTrackbarPos('SMin', self.TRACKBAR_WINDOW)
     hsv_filter.vMin = cv.getTrackbarPos('VMin', self.TRACKBAR_WINDOW)
     hsv_filter.hMax = cv.getTrackbarPos('HMax', self.TRACKBAR_WINDOW)
     hsv_filter.sMax = cv.getTrackbarPos('SMax', self.TRACKBAR_WINDOW)
     hsv_filter.vMax = cv.getTrackbarPos('VMax', self.TRACKBAR_WINDOW)
     hsv_filter.sAdd = cv.getTrackbarPos('SAdd', self.TRACKBAR_WINDOW)
     hsv_filter.sSub = cv.getTrackbarPos('SSub', self.TRACKBAR_WINDOW)
     hsv_filter.vAdd = cv.getTrackbarPos('VAdd', self.TRACKBAR_WINDOW)
     hsv_filter.vSub = cv.getTrackbarPos('VSub', self.TRACKBAR_WINDOW)
     return hsv_filter
예제 #3
0
from vision import Vision
from hsvfilter import HsvFilter

# Change the working directory to the folder this script is in.
# Doing this because I'll be putting the files from each video in their own folder on GitHub
os.chdir(os.path.dirname(os.path.abspath(__file__)))

# initialize the WindowCapture class
wincap = WindowCapture('Albion Online Client')
# initialize the Vision class
vision_limestone = Vision('albion_limestone_processed.jpg')
# initialize the trackbar window
vision_limestone.init_control_gui()

# limestone HSV filter
hsv_filter = HsvFilter(0, 180, 129, 15, 229, 243, 143, 0, 67, 0)

loop_time = time()
while (True):

    # get an updated image of the game
    screenshot = wincap.get_screenshot()

    # pre-process the image
    processed_image = vision_limestone.apply_hsv_filter(screenshot, hsv_filter)

    # do object detection
    rectangles = vision_limestone.find(processed_image, 0.46)

    # draw the detection results onto the original image
    output_image = vision_limestone.draw_rectangles(screenshot, rectangles)
예제 #4
0
class FishingBot:

    #properties
    fish_pos_x = None
    fish_pos_y = None
    fish_last_time = None
    detect_text_enable = False
    botting = False

    FISH_RANGE = 74
    FISH_VELO_PREDICT = 30

    BAIT_POSITION = (473, 750)
    FISH_POSITION = (440, 750)

    FILTER_CONFIG = [49, 0, 58, 134, 189, 189, 0, 0, 0, 0]

    FISH_WINDOW_CLOSE = (430, 115)

    # set position of the fish windows
    # this value can be diferent by the sizes of the game window

    FISH_WINDOW_SIZE = (280, 226)
    FISH_WINDOW_POSITION = (163, 125)

    wincap = None

    fishfilter = Filter() if detect_text_enable else None

    # Load the needle image

    needle_img = cv.imread('images/fiss.jpg', cv.IMREAD_UNCHANGED)
    needle_img_clock = cv.imread('images/clock.jpg', cv.IMREAD_UNCHANGED)

    # Some time cooldowns

    detect_text = True

    # Limit time

    initial_time = None

    end_time_enable = False

    end_time = 0

    # for fps

    loop_time = time()

    # The mouse click cooldown

    timer_mouse = time()

    # The timer beteween the states

    timer_action = time()

    bait_time = 2
    throw_time = 2
    game_time = 2

    # This is the filter parameters, this help to find the right image
    hsv_filter = HsvFilter(*FILTER_CONFIG)

    state = 0

    def detect(self, haystack_img):

        # match the needle_image with the hasytack image
        result = cv.matchTemplate(haystack_img, self.needle_img,
                                  cv.TM_CCOEFF_NORMED)

        min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)

        # needle_image's dimensions
        needle_w = self.needle_img.shape[1]
        needle_h = self.needle_img.shape[0]

        # get the position of the match image
        top_left = max_loc
        bottom_right = (top_left[0] + needle_w, top_left[1] + needle_h)

        # Draw the circle of the fish limits
        cv.circle(
            haystack_img,
            (int(haystack_img.shape[1] / 2), int(haystack_img.shape[0] / 2)),
            self.FISH_RANGE,
            color=(0, 0, 255),
            thickness=1)

        # Only the max level of match is greater than 0.5
        if max_val > 0.5:
            pos_x = (top_left[0] + bottom_right[0]) / 2
            pos_y = (top_left[1] + bottom_right[1]) / 2

            if self.fish_last_time:
                dist = math.sqrt((pos_x - self.fish_pos_x)**2 +
                                 (self.fish_pos_y - pos_y)**2)
                cv.rectangle(haystack_img,
                             top_left,
                             bottom_right,
                             color=(0, 255, 0),
                             thickness=2,
                             lineType=cv.LINE_4)

                # Calculate the fish velocity
                velo = dist / (time() - self.fish_last_time)

                if velo == 0.0:
                    return (pos_x, pos_y, True)
                elif velo >= 150:

                    # With this velocity the fish position will be predict

                    pro = self.FISH_VELO_PREDICT / dist
                    destiny_x = int(pos_x + (pos_x - self.fish_pos_x) * pro)
                    destiny_y = int(pos_y + (pos_y - self.fish_pos_y) * pro)

                    # Draw the predict line

                    cv.line(haystack_img, (int(pos_x), int(pos_y)),
                            (destiny_x, destiny_y), (0, 255, 0),
                            thickness=3)

                    return (destiny_x, destiny_y, False)

            # get the fish position and the time

            self.fish_pos_x = pos_x
            self.fish_pos_y = pos_y
            self.fish_last_time = time()
        return None

    def detect_minigame(self, haystack_img):
        result = cv.matchTemplate(haystack_img, self.needle_img_clock,
                                  cv.TM_CCOEFF_NORMED)

        min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
        if max_val > 0.9:
            return True

        return False

    def set_to_begin(self, values):

        if values['-ENDTIMEP-']:
            self.end_time_enable = True
            try:
                self.end_time = int(values['-ENDTIME-']) * 60
            except:
                self.end_time = 0

        self.bait_time = values['-BAITTIME-']
        self.throw_time = values['-THROWTIME-']
        self.game_time = values['-STARTGAME-']

        self.wincap = WindowCapture('METIN2')
        self.state = 0
        self.initial_time = time()
        self.timer_action = time()

    def runHack(self):
        screenshot = self.wincap.get_screenshot()

        # crop and aply hsv filter
        crop_img = screenshot[
            self.FISH_WINDOW_POSITION[1]:self.FISH_WINDOW_POSITION[1] +
            self.FISH_WINDOW_SIZE[1],
            self.FISH_WINDOW_POSITION[0]:self.FISH_WINDOW_POSITION[0] +
            self.FISH_WINDOW_SIZE[0]]
        detect_end_img = screenshot[
            self.FISH_WINDOW_POSITION[1]:self.FISH_WINDOW_POSITION[1] +
            self.FISH_WINDOW_SIZE[1],
            self.FISH_WINDOW_POSITION[0]:self.FISH_WINDOW_POSITION[0] +
            self.FISH_WINDOW_SIZE[0]]
        crop_img = self.hsv_filter.apply_hsv_filter(crop_img)

        cv.putText(crop_img, 'FPS: ' + str(1 / (time() - self.loop_time))[:2],
                   (10, 200), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        cv.putText(
            crop_img, 'State: ' + str(self.state) + ' ' +
            str(time() - self.timer_action)[:5], (10, 160),
            cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        self.loop_time = time()

        # Verify total time

        if self.end_time_enable and time() - self.initial_time > self.end_time:
            self.botting = False

        # State to click put the bait in the rod

        if self.state == 0:
            mouse_x = int(self.BAIT_POSITION[0] + self.wincap.offset_x)
            mouse_y = int(self.BAIT_POSITION[1] + self.wincap.offset_y)

            if time() - self.timer_action > self.bait_time:
                self.detect_text = True
                pydirectinput.click(x=mouse_x, y=mouse_y, button='right')
                self.state = 1
                self.timer_action = time()

        # State to throw the bait

        if self.state == 1:
            if time() - self.timer_action > self.throw_time:
                mouse_x = int(self.FISH_POSITION[0] + self.wincap.offset_x)
                mouse_y = int(self.FISH_POSITION[1] + self.wincap.offset_y)
                pydirectinput.click(x=mouse_x, y=mouse_y, button='right')
                self.state = 2
                self.timer_action = time()

        # Delay to start the clicks

        if self.state == 2:
            if time() - self.timer_action > self.game_time:
                self.state = 3
                self.timer_action = time()

        # Countdown to finish the state

        detected_end = self.detect_minigame(detect_end_img)

        if self.state == 3:

            if time() - self.timer_action > 15:
                self.timer_action = time()
                self.state = 0
            if time() - self.timer_action > 5 and detected_end is False:
                self.timer_action = time()
                self.state = 0

            if self.detect_text_enable and time() - self.timer_action > 1.5:
                if self.detect_text:
                    if self.fishfilter.match_with_text(screenshot) is False:
                        mouse_x = int(self.wincap.offset_x +
                                      self.FISH_WINDOW_CLOSE[0])
                        mouse_y = int(self.wincap.offset_y +
                                      self.FISH_WINDOW_CLOSE[1])
                        pydirectinput.click(x=mouse_x,
                                            y=mouse_y,
                                            button='left')
                        pydirectinput.click(x=mouse_x,
                                            y=mouse_y,
                                            button='left')

                self.detect_text = False

        # make the click

        if (time() -
                self.timer_mouse) > 0.3 and self.state == 3 and detected_end:

            # Detect the fish

            square_pos = self.detect(crop_img)

            if square_pos:

                # Recalculate the mouse position with the fish position

                pos_x = square_pos[0]
                pos_y = square_pos[1]

                center_x = self.FISH_WINDOW_SIZE[0] / 2
                center_y = self.FISH_WINDOW_SIZE[1] / 2

                mouse_x = int(pos_x)
                mouse_y = int(pos_y)

                # Verify if the fish is in range

                d = self.FISH_RANGE**2 - ((center_x - mouse_x)**2 +
                                          (center_y - mouse_y)**2)

                # Make the click

                if (d > 0):
                    self.timer_mouse = time()

                    mouse_x = int(pos_x + self.FISH_WINDOW_POSITION[0] +
                                  self.wincap.offset_x)
                    mouse_y = int(pos_y + self.FISH_WINDOW_POSITION[1] +
                                  self.wincap.offset_y)

                    pydirectinput.click(x=mouse_x, y=mouse_y)
        '''
        cv.imshow('Minha Janela', crop_img)

        if cv.waitKey(1) == ord('q'):
            cv.destroyAllWindows()
            return True
        '''

        return crop_img
예제 #5
0
 def get_hsv_filter_from_controls(self):
     hsv_filter = HsvFilter()
     hsv_filter.Hmin = cv2.getTrackbarPos("Hmin", self.TRACKBAR_WINDOW)
     hsv_filter.Smin = cv2.getTrackbarPos("Smin", self.TRACKBAR_WINDOW)
     hsv_filter.Vmin = cv2.getTrackbarPos("Vmin", self.TRACKBAR_WINDOW)
     hsv_filter.Hmax = cv2.getTrackbarPos("Hmax", self.TRACKBAR_WINDOW)
     hsv_filter.Smax = cv2.getTrackbarPos("Smax", self.TRACKBAR_WINDOW)
     hsv_filter.Vmax = cv2.getTrackbarPos("Vmax", self.TRACKBAR_WINDOW)
     hsv_filter.Sadd = cv2.getTrackbarPos("Sadd", self.TRACKBAR_WINDOW)
     hsv_filter.Ssub = cv2.getTrackbarPos("Ssub", self.TRACKBAR_WINDOW)
     hsv_filter.Vadd = cv2.getTrackbarPos("Vadd", self.TRACKBAR_WINDOW)
     hsv_filter.Vsub = cv2.getTrackbarPos("Vsub", self.TRACKBAR_WINDOW)
     return hsv_filter
예제 #6
0
# Note: Neural Net reponse images placed after image 5380

mouse = Controller()

CATEGORIES = ['wait', 'shoot']

# img=data_tools().data_img_read_test(filepath='images/just missed/3642.jpg')

model=tf.keras.models.load_model('models/32x4_v3.2-CNN.model')

wincap = WindowCapture('VALORANT  ')
selected_vision = VisionAdjust()
# new_vision = selected_vision.init_control_gui()

hsv_filter = HsvFilter(0,141,0,179,255,255,0,0,0,0)
y = 442
x = 898
width = 104
height = 169

count=10006

loop_time = time()

while(True):
    screenshot = wincap.get_screenshot()
    crop_img = screenshot[y:y+height, x:x+width]
    output_image_og = selected_vision.apply_hsv_filter(crop_img, hsv_filter)
    output_image = output_image_og.reshape(-1, 104, 169, 3)
    prediction=model.predict([output_image])
예제 #7
0
    cap = cv.VideoCapture('mario.mp4')

    # load the trained models
    neutral_b = cv.CascadeClassifier('cascade_models/cascade_copy.xml')
    jab = cv.CascadeClassifier('cascade_models/jab.xml')
    sheild = cv.CascadeClassifier('cascade_models/sheild.xml')

    count = MoveCounter()
    t = TimeTracker()
    labels = MoveLabels()
    tracker = Tracker()

    # This stores the locations at which images are recognized.
    move_locs = {labels.neutral_b: [], labels.jab: [], labels.shield: []}

    mario_training_filter = HsvFilter(0, 5, 0, 179, 255, 255, 0, 17, 0, 0)

    # initialize vision class
    vision = Vision('')

    while (True):

        # get an updated image of the game
        # USE THIS IF YOU ARE USING SCREENCAPTURE
        # smash_screenshot = wincap.get_screenshot()

        # USE THIS IF YOU ARE READING A VIDEO FILE
        ret, smash_screenshot = cap.read()

        # apply filter to img.
        output_image = vision.apply_hsv_filter(smash_screenshot,
예제 #8
0
 def getHsvFilterFromControls(self):
     hsv_filter = HsvFilter()
     hsv_filter.hMin = cv.getTrackbarPos('HMin', self.TRACKBAR_WINDOW)
     hsv_filter.sMin = cv.getTrackbarPos('SMin', self.TRACKBAR_WINDOW)
     hsv_filter.vMin = cv.getTrackbarPos('VMin', self.TRACKBAR_WINDOW)
     hsv_filter.hMax = cv.getTrackbarPos('HMax', self.TRACKBAR_WINDOW)
     hsv_filter.sMax = cv.getTrackbarPos('SMax', self.TRACKBAR_WINDOW)
     hsv_filter.vMax = cv.getTrackbarPos('VMax', self.TRACKBAR_WINDOW)
     hsv_filter.sAdd = cv.getTrackbarPos('SAdd', self.TRACKBAR_WINDOW)
     hsv_filter.sSub = cv.getTrackbarPos('SSub', self.TRACKBAR_WINDOW)
     hsv_filter.vAdd = cv.getTrackbarPos('VAdd', self.TRACKBAR_WINDOW)
     hsv_filter.vSub = cv.getTrackbarPos('VSub', self.TRACKBAR_WINDOW)
     return hsv_filter