コード例 #1
0
class EndgameSRProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))

    COMPETITIVE_POINTS_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "competitive_points.png"), 0
    )
    COMPETITIVE_POINTS_THRESH = 0.8

    def process(self, frame: Frame) -> bool:
        y = frame.image_yuv[:, :, 0]
        im = self.REGIONS["competitive_points"].extract_one(y)
        _, thresh = cv2.threshold(im, 50, 255, cv2.THRESH_BINARY)
        match = np.max(cv2.matchTemplate(thresh, self.COMPETITIVE_POINTS_TEMPLATE, cv2.TM_CCORR_NORMED))

        frame.overwatch.endgame_sr_match = round(float(match), 5)

        if match > self.COMPETITIVE_POINTS_THRESH:
            sr_image = self.REGIONS["sr"].extract_one(y)
            sr = big_noodle.ocr_int(sr_image)
            if sr is None:
                logger.warning(f"Unable to parse SR")
            else:
                frame.overwatch.endgame_sr = EndgameSR(
                    sr, image=lazy_upload("end_sr", self.REGIONS.blank_out(frame.image), frame.timestamp)
                )
                return True

        return False
コード例 #2
0
class MapLoadingProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))
    TEMPLATES = {
        str(os.path.basename(p)).split(".")[0]: cv2.imread(p, 0)
        for p in glob.glob(
            os.path.join(os.path.dirname(__file__), "data", "*.png"))
    }
    REQUIRED_MATCH = 0.9

    def eager_load(self) -> None:
        self.REGIONS.eager_load()

    def process(self, frame: Frame) -> bool:
        y = frame.image_yuv[:, :, 0]
        region = self.REGIONS["map_name"].extract_one(y)
        _, thresh = cv2.threshold(region, 200, 255, cv2.THRESH_BINARY)

        match, map_name = imageops.match_templates(thresh,
                                                   self.TEMPLATES,
                                                   method=cv2.TM_CCORR_NORMED,
                                                   required_match=0.95)
        if match > self.REQUIRED_MATCH:
            frame.apex.map_loading = MapLoading(map_name)
            return True

        return False
コード例 #3
0
class HomeScreenProcessor(Processor):

    REGIONS = ExtractionRegionsCollection(os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))
    PLAY_TEMPLATE = imageops.imread(os.path.join(os.path.dirname(__file__), "data", "play.png"), 0)
    SEARCH_TEMPLATE = imageops.imread(os.path.join(os.path.dirname(__file__), "data", "search.png"), 0)

    def process(self, frame: Frame) -> bool:
        if frame.valorant.home_screen:
            return True

        # self.REGIONS.draw(frame.debug_image)
        if not imageops.match_thresh_template(
            self.REGIONS["play"].extract_one(frame.image_yuv[:, :, 0]),
            self.PLAY_TEMPLATE,
            130,
            0.8,
        ):
            return False

        if not imageops.match_thresh_template(
            self.REGIONS["search"].extract_one(frame.image_yuv[:, :, 0]),
            self.SEARCH_TEMPLATE,
            100,
            0.8,
        ):
            return False

        play_text = imageops.ocr_region(
            frame,
            self.REGIONS,
            "play",
        )
        if levenshtein.distance(play_text.upper(), "PLAY") > 1:
            return False

        frame.valorant.home_screen = HomeScreen()
        draw_home_screen(frame.debug_image, frame.valorant.home_screen)
        return True

    def ocr_match(self, frame: Frame, region: str, target: str, requirement: float) -> bool:
        text = self.ocr_region(frame, region)
        match = levenshtein.ratio(text.upper(), target.upper())
        logger.debug(
            f"OCR match {text.upper()!r} ~ {target.upper()!r} => {match:.2f} > {requirement:.2f} => {match > requirement}"
        )
        return match > requirement

    def ocr_region(self, frame: Frame, target_region: str):
        region = self.REGIONS[target_region].extract_one(frame.image)
        gray = 255 - imageops.normalise(np.min(region, axis=2))
        text = imageops.tesser_ocr(
            gray,
            engine=imageops.tesseract_lstm,
        )
        return text
コード例 #4
0
class HeroProcessor(Processor):

    REGIONS = ExtractionRegionsCollection(os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))
    # STATE_TEMPLATES = {
    #     k: imageops.imread(os.path.join(os.path.dirname(__file__), 'data', 'state_templates', k + '.png'), 0)
    #     for k in ['eliminated', 'spectating', 'potg']
    # }
    STATES = [
        "ELIMINATED BY",
        "YOU ARE NOW DEATH SPECTATING",
        "YOU ARE NOW SPECTATING",
        "PLAY OF THE GAME BY",
        "PLAY OF THE MATCH BY",
    ]
    WEAPON_TEMPLATES: Optional[List[Tuple[str, np.ndarray]]] = None
    ULT_CONTOUR_TEMPLATE = np.expand_dims(_circle(21, 40), 0)

    # TODO: add dva pilot, ashe
    def __init__(self) -> None:
        if self.WEAPON_TEMPLATES is None:
            self.WEAPON_TEMPLATES = [
                (str(os.path.basename(p)).split(".")[0], _load_template(imageops.imread(p, 0)))
                for p in glob.glob(os.path.join(os.path.dirname(__file__), "data", "weapon_templates", "*.png"))
            ]
            for hero in sorted(data.heroes):
                if hero not in [n[0] for n in self.WEAPON_TEMPLATES]:
                    logger.warning(f"Did not get weapon template for {hero}")
                else:
                    weapon_count = len([t for t in self.WEAPON_TEMPLATES if t[0].startswith(hero)])
                    logger.info(f"Got {weapon_count} weapon templates for {hero}")
        logger.info(
            f"Loaded {len(self.WEAPON_TEMPLATES)} weapon templates",
        )

    def process(self, frame: Frame) -> bool:
        hero_name = self._parse_hero_from_weapon(frame)
        state = self._parse_state(frame)

        ult_status = None
        if hero_name:
            ult_status = self._parse_ult_status(frame)

        frame.overwatch.hero = Hero(
            hero=hero_name,
            ult=ult_status,
            potg="PLAY OF THE" in state,
            spectating="SPECTATING" in state,
            killcam="ELIMINATED BY" == state,
        )

        _draw_hero(frame.debug_image, frame.overwatch.hero)

        return bool(frame.overwatch.hero.hero or state)

    def _parse_hero_from_weapon(self, frame: Frame) -> Optional[str]:
        image = self.REGIONS["weapon"].extract_one(frame.image)
        gray = np.min(image, axis=2)
        thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 205, -51)
        if thresh.shape != (81, 231) and thresh.shape != (100, 220):
            logger.warning("Ignoring weapon image with dimensions %s", thresh.shape)
            return None
        thresh = cv2.resize(
            cv2.copyMakeBorder(thresh, 10, 10, 10, 10, cv2.BORDER_CONSTANT, value=0), (0, 0), fx=0.5, fy=0.5
        )
        assert self.WEAPON_TEMPLATES is not None
        matches = [np.min(cv2.matchTemplate(thresh, t, cv2.TM_SQDIFF_NORMED)) for h, t in self.WEAPON_TEMPLATES]
        match: int = arrayops.argmin(matches)
        if matches[match] < 0.3:
            h = self.WEAPON_TEMPLATES[match][0]
            logger.debug(f"Got hero {h} with match {matches[match]:.2f}")
            return h
        else:
            return None

    def _parse_state(self, frame: Frame) -> str:
        map_info_image = self.REGIONS["potg_eliminted_deathspec"].extract_one(frame.image)
        yellow_text = cv2.inRange(
            cv2.cvtColor(map_info_image, cv2.COLOR_BGR2HSV_FULL),
            ((35 / 360) * 255, 0.5 * 255, 0.8 * 255),
            ((55 / 360) * 255, 1.0 * 255, 1.0 * 255),
        )
        p = np.sum(yellow_text > 0) / np.prod(yellow_text.shape)
        state = ""
        if 0.05 < p < 0.4:
            state_text = big_noodle.ocr(yellow_text, channel=None)
            if state_text and len(state_text) > 5:
                state_text_matches = textops.matches(state_text, self.STATES)
                match_i: int = arrayops.argmin(state_text_matches)
                match = state_text_matches[match_i]
                if match < 7:
                    state = self.STATES[match_i]
                    logger.info(
                        f"Got state={state_text!r} (text fill: {p*100:.0f}%) -> best match: {state!r} (match={match})"
                    )
                else:
                    logger.warning(
                        f'Got state={state_text!r}, but this was not recognized as a valid state (closest was "{self.STATES[match_i]}", match={match})'
                    )
        return state

    def _parse_ult_status(self, frame: Frame) -> Optional[int]:
        ult_image = self.REGIONS["ult"].extract_one(frame.image)
        thresh = imageops.unsharp_mask(ult_image, unsharp=2, weight=4, threshold=240)

        ult_circle = cv2.resize(thresh, (0, 0), fx=0.5, fy=0.5)
        contours, _ = imageops.findContours(ult_circle, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        contour_match = 1.0
        for cnt in contours:
            if 1500 < cv2.contourArea(cnt) < 2500:
                contour_match = min(contour_match, cv2.matchShapes(cnt, self.ULT_CONTOUR_TEMPLATE, 1, 0))

        if contour_match < 0.01:
            logger.debug(f"Got ult contour match {contour_match:1.5f} - ult=100%")
            return 100
        else:
            ult = big_noodle.ocr_int(thresh, channel=None, threshold=None, height=33)
            logger.debug(f"Parsed ult as {ult}%")
            if ult is not None and 0 <= ult <= 99:
                return ult
            else:
                return None
コード例 #5
0
class RoleSelectProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))
    TANK_TEMPLATE = imageops.imread(os.path.join(os.path.dirname(__file__), "data", "tank.png"), 0)
    TANK_LARGE_TEMPLATE = imageops.imread(os.path.join(os.path.dirname(__file__), "data", "tank_large.png"), 0)
    LOCK_TEMPLATE = imageops.imread(os.path.join(os.path.dirname(__file__), "data", "lock.png"), 0)

    REQUIRED_MATCH = 0.95

    def process(self, frame: Frame) -> bool:
        y = frame.image_yuv[:, :, 0]
        tank_region = np.max(self.REGIONS["tank_region"].extract_one(frame.image), axis=2)

        _, thresh = cv2.threshold(tank_region, 100, 255, cv2.THRESH_BINARY)
        # cv2.imshow('thresh1', thresh)

        tank_match_sm = cv2.matchTemplate(thresh, self.TANK_TEMPLATE, cv2.TM_CCORR_NORMED)
        _, match_sm, _, mxloc_sm = cv2.minMaxLoc(tank_match_sm)

        tank_match_lg = cv2.matchTemplate(thresh, self.TANK_LARGE_TEMPLATE, cv2.TM_CCORR_NORMED)
        _, match_lg, _, mxloc_lg = cv2.minMaxLoc(tank_match_lg)

        lock_match = cv2.matchTemplate(thresh, self.LOCK_TEMPLATE, cv2.TM_CCORR_NORMED)
        _, match_lock, _, mxloc_lock = cv2.minMaxLoc(lock_match)

        matched_i = arrayops.argmax([match_sm, match_lg, match_lock])
        # print([match_sm, match_lg, match_lock])
        match = [match_sm, match_lg, match_lock][matched_i]
        matched = ["tank", "tank_lg", "lock"][matched_i]
        best_match_pos = [mxloc_sm, mxloc_lg, mxloc_lock][matched_i]
        match_x = best_match_pos[0]
        # print(matched, match_x)

        frame.overwatch.role_select_match = round(match, 2)

        if match > self.REQUIRED_MATCH:
            grouped = match_x < 150

            logger.debug(
                f"Found match for {matched!r} with match={match:0.3f} ({match_sm:.2f}, {match_lg:.2f}, {match_lock:.2f}), x={match_x} => grouped={grouped}"
            )

            suffix = "_group" if grouped else "_solo"
            frame.overwatch.role_select = RoleSelect(
                placement_text=imageops.tesser_ocr_all(
                    self.REGIONS["placements" + suffix].extract(y), whitelist=string.digits + "/-"
                ),
                sr_text=big_noodle.ocr_all(self.REGIONS["srs" + suffix].extract(y), height=23, invert=True),
                account_name=imageops.tesser_ocr(
                    self.REGIONS["account_name"].extract_one(y), engine=imageops.tesseract_lstm
                ),
                grouped=grouped,
                image=lazy_upload(
                    "role_select", self.REGIONS.blank_out(frame.image), frame.timestamp, selection="last"
                ),
            )
            if frame.debug_image is not None:
                self.REGIONS.draw(frame.debug_image)
            _draw_role_select(frame.debug_image, frame.overwatch.role_select)
            return True

        return False
コード例 #6
0
class TabProcessor(Processor):
    # ExtractionRegionsCollection(regions={
    #   'vs': ExtractionRegions(name="vs", 1 regions),
    #   'blue_names': ExtractionRegions(name="blue_names", 6 regions),
    #   'red_names': ExtractionRegions(name="red_names", 6 regions),
    #   'player_hero': ExtractionRegions(name="player_hero", 2 regions),
    #   'stats': ExtractionRegions(name="stats", 12 regions),
    #   'medals': ExtractionRegions(name="medals", 5 regions)
    # } regions)
    REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))
    TEMPLATES = {
        os.path.basename(p).split(".")[0]:
        _load_template(imageops.imread(p, -1))
        for p in glob.glob(
            os.path.join(os.path.dirname(__file__), "data", "hero_icons",
                         "*.png"))
    }
    HERO_TEMPLATE_THRESH = 100

    def __init__(self, save_name_images: bool = False):
        self.save_name_images = save_name_images

        self._last_matches: List[Optional[str]] = [None for _ in range(12)]

    def process(self, frame: Frame) -> bool:
        if not self.detect_tab(frame):
            return False

        player_name_image, player_hero_image = self.REGIONS[
            "player_hero"].extract(frame.image)
        images = NameImages(
            blue_team=self._mask_roles_out(self.REGIONS["blue_names"].extract(
                frame.image)),
            red_team=self.REGIONS["red_names"].extract(frame.image),
            ult_images=self.REGIONS["ults"].extract(frame.image),
            player_name_image=player_name_image,
            player_hero_image=player_hero_image,
            hero_icons_red=self.REGIONS["hero_icons_red"].extract(frame.image),
            hero_icons_blue=self.REGIONS["hero_icons_blue"].extract(
                frame.image),
        )

        player_hero_text = big_noodle.ocr(player_hero_image)
        hero = textops.best_match(
            player_hero_text.lower(),
            [h[1].key for h in data.heroes.items()],
            list(data.heroes.keys()),
            threshold=3,
        )

        heroes_played = self.parse_heroes(images)
        map_text, mode_text = self.parse_map_info(frame)

        stats: Optional[Stats]
        if hero:
            stat_values = []
            for i, im in enumerate(self.REGIONS["stats"].extract(frame.image)):
                masked = self._filter_digit_components(im)
                stat_values.append(digit.ocr(masked, 1.0))

            stat_names_row_1 = [s.name for s in data.generic_stats[:3]]
            stat_names_row_2 = [s.name for s in data.generic_stats[3:]]
            hero_stat_names_row_1 = [
                s.name for s in data.heroes[hero].stats[0]
            ]
            hero_stat_names_row_2 = [
                s.name for s in data.heroes[hero].stats[1]
            ]
            stat_names = stat_names_row_1 + hero_stat_names_row_1 + stat_names_row_2 + hero_stat_names_row_2

            stat_parsed: Dict[str, Optional[int]] = dict(
                zip(stat_names, stat_values))

            if stat_parsed["objective time"] is not None:
                stat_parsed["objective time"] = textops.mmss_to_seconds(
                    stat_parsed["objective time"])
                logger.debug(
                    f'Transformed MMSS objective time to {stat_parsed["objective time"]}'
                )

            stats = Stats(
                hero,
                eliminations=stat_parsed["eliminations"],
                objective_kills=stat_parsed["objective kills"],
                objective_time=stat_parsed["objective time"],
                hero_damage_done=stat_parsed["hero damage done"],
                healing_done=stat_parsed["healing done"],
                deaths=stat_parsed["deaths"],
                hero_specific_stats={
                    s.name: stat_parsed[s.name]
                    for s in itertools.chain.from_iterable(
                        data.heroes[hero].stats)
                },
            )
            logger.info(f"Parsed stats as {stats}")

        else:
            logger.warning(f"Could not recognise {player_hero_text} as a hero")
            stats = None

        frame.overwatch.tab_screen = TabScreen(
            map=map_text,
            mode=mode_text,
            blue_team=big_noodle.ocr_all(images.blue_team, channel="max"),
            blue_team_hero=heroes_played[6:12],
            blue_team_ults=[0 for _ in range(6)],
            red_team=big_noodle.ocr_all(images.red_team, channel="r"),
            red_team_hero=heroes_played[:6],
            player_name=big_noodle.ocr(player_name_image),
            player_hero=hero,
            stats=stats,
        )
        _draw_tab_screen(frame.debug_image, frame.overwatch.tab_screen)

        return True

    def _mask_roles_out(self, ims: List[np.ndarray]) -> List[np.ndarray]:
        """
        Mask out the role icons that appear to the left of the blue team names
        """
        r = []
        for im in ims:
            _, rank_mask = cv2.threshold(np.max(im, axis=2), 250, 255,
                                         cv2.THRESH_BINARY)
            rank_mask = cv2.erode(rank_mask, None)
            rank_mask = cv2.dilate(rank_mask, np.ones((11, 7)))
            masked = cv2.bitwise_and(
                im, 255 - cv2.cvtColor(rank_mask, cv2.COLOR_GRAY2BGR))
            r.append(masked)

        return r

    def _filter_digit_components(self, im: np.ndarray) -> np.ndarray:
        im = np.min(im, axis=2)
        t = imageops.otsu_thresh(im, 100, 255)
        _, mask = cv2.threshold(im, t, 255, cv2.THRESH_BINARY)

        dmask = cv2.dilate(mask, np.ones((3, 3)))
        gray = cv2.bitwise_and(im, dmask)

        # nmask = np.full_like(mask, 255)
        # labels, components = imageops.connected_components(mask)
        # for c1 in components:
        #     for c2 in components:
        #         if c1 is c2:
        #             continue
        #         if c1.x < c2.x < c1.x + c1.w or c1.x < c2.x + c2.w < c1.x + c1.w:
        #             # c1 is above/below c2
        #             nmask[labels == c1.label] = 0
        #         #     nmask[labels == c2.label] = 0
        #
        #
        # cv2.imshow('mask', np.vstack([im, mask, dmask, nmask, gray]))
        # cv2.waitKey(0)

        return gray

    def parse_heroes(self, images: NameImages) -> List[Optional[str]]:
        hero_played: List[Optional[str]] = [None for _ in range(12)]
        for i, icon in enumerate(images.hero_icons_red +
                                 images.hero_icons_blue):
            icon = cv2.resize(icon, (0, 0), fx=0.5, fy=0.5)
            last = self._last_matches[i]
            dontcheck = None
            if last:
                # check the hero this player was playing last
                dontcheck = last
                t, mask = self.TEMPLATES[last]
                match = np.min(
                    cv2.matchTemplate(icon, t, cv2.TM_SQDIFF, mask=mask))
                if match < self.HERO_TEMPLATE_THRESH:
                    hero_played[i] = last
                else:
                    # hero has changed
                    last = None
            if not last:
                for hero_name, (t, mask) in self.TEMPLATES.items():
                    if hero_name == dontcheck:
                        # already tested
                        continue
                    match = np.min(
                        cv2.matchTemplate(icon, t, cv2.TM_SQDIFF, mask=mask))
                    if match < self.HERO_TEMPLATE_THRESH:
                        self._last_matches[i] = hero_name
                        hero_played[i] = hero_name
                        break
        return hero_played

    def parse_map_info(self,
                       frame: Frame) -> Tuple[Optional[str], Optional[str]]:
        map_info_image = self.REGIONS["map_info"].extract_one(frame.image)
        yellow_text = cv2.inRange(
            cv2.cvtColor(map_info_image, cv2.COLOR_BGR2HSV_FULL),
            ((30 / 360) * 255, 0.5 * 255, 0.6 * 255),
            ((45 / 360) * 255, 1.0 * 255, 1.0 * 255),
        )
        yellow_text = cv2.filter2D(yellow_text, -1, np.ones((4, 2)) / (4 * 2))
        yellow_text_left = np.argmax(np.sum(yellow_text, axis=0) / 255 > 4)
        map_image, mode_image = (
            map_info_image[:, :yellow_text_left - 20],
            map_info_image[:, yellow_text_left - 5:],
        )
        map_text = imageops.tesser_ocr(np.min(map_image, axis=2),
                                       whitelist=string.ascii_uppercase + " ",
                                       scale=2,
                                       invert=True)
        mode_text = imageops.tesser_ocr(np.max(mode_image, axis=2),
                                        whitelist=string.ascii_uppercase + " ",
                                        scale=2,
                                        invert=True)
        if len(map_text) < 4 or len(mode_text) < 4:
            logger.warning(
                f"Unexpected map/mode text: {map_text} | {mode_text}")
            return None, None
        else:
            logger.debug(f"Got map={map_text}, mode={mode_text}")
            return map_text, mode_text

    VS_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "vs_template.png"), 0)

    VS_MATCH_THRESHOLD = 0.6

    def detect_tab(self, frame: Frame) -> bool:
        region = self.REGIONS["vs"].extract_one(frame.image)
        region = cv2.resize(region, (50, 50), cv2.INTER_NEAREST)
        region_gray = np.min(region, axis=2)

        # threshold of around 200, allow for flux/lower brightness settings bringing the range down
        _, thresh = cv2.threshold(region_gray,
                                  np.percentile(region_gray.ravel(), 93), 255,
                                  cv2.THRESH_BINARY)

        match = 1 - float(
            np.min(
                cv2.matchTemplate(thresh, self.VS_TEMPLATE,
                                  cv2.TM_SQDIFF_NORMED)))

        frame.overwatch.tab_match = round(match, 5)
        return match > self.VS_MATCH_THRESHOLD
コード例 #7
0
class MenuProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))
    READY = imageops.imread(os.path.join(os.path.dirname(__file__), "data", "ready.png"), 0)
    CANCEL = imageops.imread(os.path.join(os.path.dirname(__file__), "data", "cancel.png"), 0)
    REQUIRED_MATCH = 0.9

    CROWN = imageops.imread(os.path.join(os.path.dirname(__file__), "data", "crown.png"), 0)

    def eager_load(self):
        self.REGIONS.eager_load()

    def process(self, frame: Frame):
        if frame.apex.apex_play_menu_match:
            return frame.apex.apex_play_menu_match >= self.REQUIRED_MATCH

        y = frame.image_yuv[:, :, 0]

        ready_button = self.REGIONS["ready_button"].extract_one(y)
        t, thresh = cv2.threshold(ready_button, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

        ready_match = np.max(cv2.matchTemplate(thresh, self.READY, cv2.TM_CCORR_NORMED))
        if ready_match >= self.REQUIRED_MATCH:
            cancel_match = 0.0
        else:
            cancel_match = np.max(cv2.matchTemplate(thresh, self.CANCEL, cv2.TM_CCORR_NORMED))
        frame.apex.apex_play_menu_match = round(float(max(ready_match, cancel_match)), 5)
        _draw_buttons_match(frame.debug_image, ready_match, cancel_match, self.REQUIRED_MATCH)

        if ready_match >= self.REQUIRED_MATCH or cancel_match >= self.REQUIRED_MATCH:
            player_name_image = self.REGIONS["player_name"].extract_one(y)
            mate1, mate2 = self.REGIONS["squadmates"].extract(y)

            rank_text_region = self.REGIONS["rank_text"].extract_one(y)
            rank_text = imageops.tesser_ocr(rank_text_region, invert=True, engine=imageops.tesseract_lstm)

            rp_text_region = self.REGIONS["rp_text"].extract_one(y)
            rp_text = imageops.tesser_ocr(rp_text_region, invert=True, engine=imageops.tesseract_lstm)

            frame.apex.apex_play_menu = PlayMenu(
                player_name=self._ocr_playername(player_name_image),
                squadmates=(self._ocr_playername(mate1), self._ocr_playername(mate2)),
                ready=cancel_match >= self.REQUIRED_MATCH,
                rank_text=rank_text,
                rp_text=rp_text,
            )
            self.REGIONS.draw(frame.debug_image)
            _draw_play_menu(frame.debug_image, frame.apex.apex_play_menu)

            return True

        else:
            return False

    def _ocr_playername(self, player_name_image: np.ndarray) -> str:
        # crop out crown
        _, thresh = cv2.threshold(player_name_image, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
        mnv, mxv, mnl, mxl = cv2.minMaxLoc(cv2.matchTemplate(thresh, self.CROWN, cv2.TM_CCORR_NORMED))
        if mxv > 0.99:
            player_name_image = player_name_image[:, mxl[0] + self.CROWN.shape[1] :]

        player_name = imageops.tesser_ocr(player_name_image, scale=4)
        return player_name
コード例 #8
0
class YourSquadProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))
    TEMPLATES = {
        k: imageops.imread(os.path.join(os.path.dirname(__file__), "data", k + ".png"), 0)
        for k in ["your_squad", "your_selection", "champion_squad"]
    }

    REQUIRED_MATCH = 0.95

    def __init__(self):
        self.duos = False
        self.duos_last_seen = 0

    def eager_load(self):
        self.REGIONS.eager_load()

    def process(self, frame: Frame):
        y = frame.image_yuv[:, :, 0]

        your_squad_image = self.REGIONS["your_squad"].extract_one(y)
        t, thresh = cv2.threshold(your_squad_image, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

        match, key = imageops.match_templates(thresh, self.TEMPLATES, cv2.TM_CCORR_NORMED, self.REQUIRED_MATCH)
        frame.apex.your_squad_match = round(match, 4)
        if match < self.REQUIRED_MATCH:
            return False

        name1_trios = np.min(self.REGIONS["names"].extract_one(frame.image), axis=2)
        name1_duos = np.min(self.REGIONS["names_duos"].extract_one(frame.image), axis=2)
        # name1_thresh_value = max(np.max(name1_duos), np.max(name1_trios)) * 0.95
        name1_thresh_value = 240
        # logger.debug(f"Name thresh: {name1_thresh_value}")

        name1_trios_score = int(np.sum(name1_trios > name1_thresh_value))
        name1_duos_score = int(np.sum(name1_duos > name1_thresh_value))
        logger.debug(f"Trios name score: {name1_trios_score} vs duos name score: {name1_duos_score}")

        # self.duos = name1_duos_score and name1_duos_score > name1_trios_score
        self.duos = name1_trios_score < 100
        logger.info(f"Using duos={self.duos}")

        if key == "your_squad":
            names_region_name = "names_duos" if self.duos else "names"
            names = imageops.tesser_ocr_all(
                self.REGIONS[names_region_name].extract(y),
                engine=imageops.tesseract_lstm,
                invert=True,
            )
            frame.apex.your_squad = YourSquad(
                tuple(self._to_name(n) for n in names),
                mode="duos" if self.duos else None,
                images=lazy_upload(
                    "your_squad",
                    np.hstack(self.REGIONS[names_region_name].extract(frame.image)),
                    frame.timestamp,
                ),
            )
            self.REGIONS.draw(frame.debug_image)
            _draw_squad(frame.debug_image, frame.apex.your_squad)
        elif key == "your_selection":
            frame.apex.your_selection = YourSelection(
                name=self._to_name(
                    imageops.tesser_ocr(
                        self.REGIONS["names"].extract(y)[1],
                        engine=imageops.tesseract_lstm,
                        invert=True,
                    )
                ),
                image=lazy_upload(
                    "your_selection",
                    self.REGIONS["names"].extract(frame.image)[1],
                    frame.timestamp,
                ),
            )
            self.REGIONS.draw(frame.debug_image)
            _draw_squad(frame.debug_image, frame.apex.your_selection)
        elif key == "champion_squad":
            names_region_name = "names_duos" if self.duos else "names"
            names = imageops.tesser_ocr_all(
                self.REGIONS[names_region_name].extract(y),
                engine=imageops.tesseract_lstm,
                invert=True,
            )
            frame.apex.champion_squad = ChampionSquad(
                tuple(self._to_name(n) for n in names),
                mode="duos" if self.duos else None,
                images=lazy_upload(
                    "champion_squad",
                    np.hstack(self.REGIONS[names_region_name].extract(frame.image)),
                    frame.timestamp,
                ),
            )
            self.REGIONS.draw(frame.debug_image)
            _draw_squad(frame.debug_image, frame.apex.champion_squad)

        return True

    def _to_name(self, name_text: str) -> Optional[str]:
        for s1, s2 in "[(", "{(", "])", "})":
            name_text = name_text.replace(s1, s2)
        if len(name_text) > 3 and name_text[0] == "(" and name_text[-1] == ")":
            return name_text[1:-1].replace(" ", "").replace("(", "").replace(")", "")
        else:
            logger.warning(f"Got name {name_text!r} for player: not correctly bracketed")
            return name_text.replace(" ", "").replace("(", "").replace(")", "")
コード例 #9
0
class SquadSummaryProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))

    TEMPLATES = {
        k: imageops.imread(
            os.path.join(os.path.dirname(__file__), "data", k + ".png"), 0)
        for k in
        ["squad_eliminated", "champions_of_the_arena", "match_summary"]
    }

    REQUIRED_MATCH = 0.75

    def eager_load(self):
        self.REGIONS.eager_load()

    def process(self, frame: Frame) -> bool:
        y = frame.image_yuv[:, :, 0]
        champions_eliminated = self.REGIONS[
            "champions_eliminated"].extract_one(y)
        t, thresh = cv2.threshold(champions_eliminated, 0, 255,
                                  cv2.THRESH_BINARY | cv2.THRESH_OTSU)

        # cv2.imshow('thresh', thresh)

        match, key = imageops.match_templates(thresh, self.TEMPLATES,
                                              cv2.TM_CCORR_NORMED,
                                              self.REQUIRED_MATCH)
        frame.apex.squad_summary_match = round(match, 4)
        if match > self.REQUIRED_MATCH:
            champions = key in ["champions_of_the_arena"]

            duos_empty_area = self.REGIONS["duos_empty_area"].extract_one(
                frame.image_yuv[:, :, 0])
            duos_sum = np.sum(duos_empty_area > 100)
            duos = duos_sum < 100
            logger.debug(f"Got duos_sum={duos_sum} => duos={duos}")

            shunt = 0
            if duos:
                duos_shunt_area = self.REGIONS["duos_shunt_area"].extract_one(
                    frame.image_yuv[:, :, 0])
                duos_shunt_sum = np.sum(duos_shunt_area > 100)
                duos_shunt = duos_shunt_sum < 100
                logger.debug(
                    f"Got duos_shunt_sum={duos_shunt_sum} => duos_shunt={duos_shunt}"
                )
                if duos_shunt:
                    shunt = 270

            frame.apex.squad_summary = SquadSummary(
                champions=champions,
                placed=self._process_yellowtext(
                    self.REGIONS["placed"].extract_one(frame.image)),
                squad_kills=self._process_yellowtext(
                    self.REGIONS["squad_kills"].extract_one(frame.image)),
                player_stats=self._process_player_stats(y, duos, shunt),
                elite=False,
                mode="duos" if duos else None,
                image=lazy_upload(
                    "squad_summary",
                    self.REGIONS.blank_out(frame.image),
                    frame.timestamp,
                    selection="last",
                ),
            )
            self.REGIONS.draw(frame.debug_image)
            _draw_squad_summary(frame.debug_image, frame.apex.squad_summary)
            return True

        return False

    def _process_yellowtext(self, image: np.ndarray) -> Optional[int]:
        # mask out only yellow text (digits)
        yellow = cv2.inRange(image, (0, 40, 150), (90, 230, 255))
        yellow = cv2.dilate(yellow, None)
        yellowtext_image = cv2.bitwise_and(
            image, cv2.cvtColor(yellow, cv2.COLOR_GRAY2BGR))
        yellowtext_image_g = np.max(yellowtext_image, axis=2)
        yellowtext_image_g = cv2.erode(yellowtext_image_g, np.ones((2, 2)))

        text = imageops.tesser_ocr(
            yellowtext_image_g,
            engine=imageops.tesseract_lstm,
            scale=4,
            blur=4,
            invert=True,
        )
        otext = text
        text = text.upper()
        for s1, s2 in "|1", "I1", "L1", "O0", "S5", "B6":
            text = text.replace(s1, s2)
        for hashchar in "#H":
            text = text.replace(hashchar, "")
        logger.info(f"Got text={otext} -> {text}")

        try:
            return int(text)
        except ValueError:
            logger.warning(f"Could not parse {text!r} as int")
            return None

    def _process_player_stats(self,
                              y: np.ndarray,
                              duos: bool = False,
                              shunt: int = 0) -> Tuple[PlayerStats, ...]:
        name_images = self.REGIONS["names"].shunt(x=shunt).extract(y)
        names = []
        for im in name_images:
            # self._mask_components_touching_edges(im)
            im = 255 - cv2.bitwise_and(
                im,
                cv2.dilate(
                    cv2.threshold(im, 0, 255,
                                  cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1],
                    None,
                ),
            )
            im = cv2.resize(im, (0, 0), fx=2, fy=2)
            im = cv2.GaussianBlur(im, (0, 0), 1)

            name = imageops.tesser_ocr(
                im,
                engine=imageops.tesseract_lstm,
            ).replace(" ", "")
            match = np.mean(imageops.tesseract_lstm.AllWordConfidences())
            logger.info(f"Got name {name!r} ~ {match:1.2f}")
            if match < 0.75:
                name = imageops.tesser_ocr(
                    im,
                    engine=imageops.tesseract_only,
                )
                logger.info(f"Using {name!r} instead")
            names.append(name)

        stat_images = self.REGIONS["stats"].shunt(x=shunt).extract(y)

        # for im in stat_images:
        #     self._mask_components_touching_edges(im)

        stats = imageops.tesser_ocr_all(
            stat_images,
            engine=ocr.tesseract_ttlakes_digits_specials,
        )

        for i in range(len(stats)):
            value = stats[i]
            logger.debug(f"Got stat {i}: {value!r}")
            if value:
                value = value.lower().replace(" ", "")
                for c1, c2 in "l1", "i1", "o0", (":", ""):
                    value = value.replace(c1, c2)
                value = textops.strip_string(value, string.digits + "/")
            if i < 3:
                try:
                    stats[i] = tuple([int(v) for v in value.split("/")])
                except ValueError as e:
                    logger.warning(f'Could not parse {value!r} as 3 ints" {e}')
                    stats[i] = None
            elif 6 <= i <= 8:
                # survival time
                if stats[i] is not None:
                    try:
                        seconds = int(value)
                    except ValueError as e:
                        logger.warning(
                            f'Could not parse "{stats[i]}" as int: {e}')
                        seconds = None
                    else:
                        seconds = mmss_to_seconds(seconds)
                        logger.info(f"MM:SS {stats[i]} -> {seconds}")
                    stats[i] = seconds
            else:
                try:
                    stats[i] = int(value)
                except ValueError as e:
                    logger.warning(f'Could not parse {value!r} as int" {e}')
                    stats[i] = None

        # typing: ignore
        # noinspection PyTypeChecker
        count = 3 if not duos else 2
        r = tuple([PlayerStats(names[i], *stats[i::3]) for i in range(count)])

        for s in r:
            if not s.kills:
                pass
            elif len(s.kills) == 3:
                s.assists = s.kills[1]
                s.knocks = s.kills[2]
                s.kills = s.kills[0]
            else:
                s.kills = s.kills[0]

        logger.info(f"Got {pprint.pformat(r)}")
        return r

    def _mask_components_touching_edges(self,
                                        im: np.ndarray,
                                        threshold=100) -> bool:
        masked = False
        _, t = cv2.threshold(im, threshold, 255, cv2.THRESH_BINARY)
        mask, components = imageops.connected_components(t)
        for c in components[1:]:
            if c.y <= 1 or c.y + c.h >= im.shape[0] - 1:
                mask = (mask != c.label).astype(np.uint8) * 255
                mask = cv2.erode(mask, None)
                im[:] = cv2.bitwise_and(im, mask)
                masked = c.area > 50
        return masked
コード例 #10
0
class EliminationsProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))

    WERE_TEMPLATE = imageops.imread(os.path.join(os.path.dirname(__file__), "data", "were.png"), 0)
    WERE_MASK = cv2.dilate(WERE_TEMPLATE, np.ones((4, 4)))

    ELIMINATED_TEMPLATE = imageops.imread(os.path.join(os.path.dirname(__file__), "data", "eliminated.png"), 0)
    ELIMINATED_MASK = cv2.dilate(ELIMINATED_TEMPLATE, np.ones((4, 4)))

    def process(self, frame: Frame) -> bool:
        region = self.REGIONS["eliminations"].extract_one(frame.image)
        region_b = np.min(region, axis=2)
        _, thresh = cv2.threshold(region_b, 200, 255, cv2.THRESH_BINARY)

        eliminated_locations = self._get_locations(
            imageops.matchTemplate(
                thresh,
                self.ELIMINATED_TEMPLATE,
                cv2.TM_SQDIFF_NORMED,
            ),
            0.6,
            region_name="eliminated",
        )
        if not eliminated_locations:
            return False

        self._draw_locations(
            frame.debug_image,
            self.REGIONS["eliminations"].regions[0][:2],
            eliminated_locations,
            self.ELIMINATED_TEMPLATE.shape,
            "ELIMINATED",
        )

        were_locations = self._get_locations(
            imageops.matchTemplate(
                thresh,
                self.WERE_TEMPLATE,
                cv2.TM_SQDIFF_NORMED,
            ),
            0.6,
            max_matches=2,
            region_name="were",
        )
        self._draw_locations(
            frame.debug_image,
            self.REGIONS["eliminations"].regions[0][:2],
            were_locations,
            self.WERE_TEMPLATE.shape,
            "WERE",
        )

        eliminated_by_image = None
        elimination_images = []
        for ((x, y), m) in eliminated_locations:
            if were_locations:
                is_were_eliminated = min([abs(y - y2) for ((_, y2), _) in were_locations]) < 10
            else:
                is_were_eliminated = False

            if not is_were_eliminated:
                line_x = self.REGIONS["eliminations"].regions[0][0] + x
                line_w = (frame.image.shape[1] // 2 - line_x) * 2
                name_x = line_x + 135
                name_w = line_w - 132
            else:
                line_x = self.REGIONS["eliminations"].regions[0][0] + x - 125
                line_w = (frame.image.shape[1] // 2 - line_x) * 2
                name_x = line_x + 295
                name_w = line_w - 285

            line_y = self.REGIONS["eliminations"].regions[0][1] + y - 4
            line_h = 40

            line = frame.image[line_y : line_y + line_h, name_x : name_x + name_w]
            if not is_were_eliminated:
                elimination_images.append(line)
            else:
                # cv2.imshow('line', frame.image[
                #                    line_y: line_y + line_h,
                #                    name_x: name_x + name_w
                #                    ])
                # cv2.waitKey(0)
                eliminated_by_image = line

            if frame.debug_image is not None:
                cv2.rectangle(frame.debug_image, (name_x, line_y), (name_x + name_w, line_y + line_h), (0, 255, 0))

        if eliminated_by_image is not None:
            elimination_images.append(eliminated_by_image)

        eliminations = big_noodle.ocr_all(elimination_images, channel="max", height=30)
        if eliminated_by_image is not None:
            eliminated_by = eliminations[-1]
            eliminations = eliminations[:-1]
        else:
            eliminated_by = None

        frame.overwatch.eliminations = Eliminations(eliminations, eliminated_by)
        return True

    def _get_locations(
        self, match: np.ndarray, thresh: float, max_matches: int = 6, region_name: str = ""
    ) -> List[Tuple[Tuple[int, int], float]]:
        match[match == np.inf] = 0

        r = []
        for i in range(max_matches):
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
            if min_val > thresh:
                logger.debug(
                    f'Rejected match {min_val:.2f} at {min_loc} in region{" " + region_name if region_name else ""}'
                )
                break
            logger.debug(
                f'Found match {min_val:.2f} at {min_loc} in region{" " + region_name if region_name else ""}'
            )

            match[
                clamp(0, min_loc[1] - 20, match.shape[0]) : clamp(0, min_loc[1] + 20, match.shape[0]),
                clamp(0, min_loc[0] - 20, match.shape[1]) : clamp(0, min_loc[0] + 20, match.shape[1]),
            ] = 1
            r.append((min_loc, min_val))

        return r

    def _draw_locations(
        self,
        debug_image: Optional[np.ndarray],
        image_offset: Tuple[int, int],
        locations: List[Tuple[Tuple[int, int], float]],
        template_shape: Tuple[int, int],
        name: str,
    ):

        if debug_image is None:
            return

        # debug_image = debug_image[
        #     image_offset[1]:,
        #     image_offset[0]:
        # ]

        for i, ((x, y), match) in enumerate(locations):
            x += image_offset[0]
            y += image_offset[1]

            cv2.rectangle(debug_image, (x, y), (x + template_shape[1], y + template_shape[0]), (0, 0, 255), 1)
            cv2.putText(
                debug_image,
                f"{(x, y)}: {name}, match={match:.3f}, index={i}",
                (x, y - 10),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.5,
                (0, 0, 255),
            )
コード例 #11
0
class WeaponProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "..", "..", "data", "regions",
                     "16_9.zip"))
    CLIP_DIGITS = [
        imageops.imread(
            os.path.join(os.path.dirname(__file__), "data", "clip_digits",
                         f"{d}.png"),
            0,
        ) for d in string.digits
    ]
    AMMO_DIGITS = [
        imageops.imread(
            os.path.join(os.path.dirname(__file__), "data", "ammo_digits",
                         f"{d}.png"),
            0,
        ) for d in string.digits
    ]

    def __init__(self):
        self.CLIP_WEIGHTS = self.AMMO_WEIGHTS = []
        for typ in "CLIP", "AMMO":
            weights = []
            for im in getattr(self, typ + "_DIGITS"):
                weight = np.zeros(im.shape, dtype=np.float)
                weight[im > 0] = 1 / np.sum(im > 0)
                weight[im == 0] = -3 / np.sum(im == 0)
                weights.append(weight)
            setattr(self, typ + "_WEIGHTS", weights)

    def eager_load(self):
        self.REGIONS.eager_load()

    def process(self, frame: Frame):
        y = cv2.cvtColor(frame.image, cv2.COLOR_BGR2YUV)[:, :, 0]

        weapon_images = self.REGIONS["weapon_names"].extract(y)
        weapon_images = [255 - imageops.normalise(i) for i in weapon_images]

        weapon_names = imageops.tesser_ocr_all(
            weapon_images,
            whitelist=string.ascii_uppercase,
            engine=imageops.tesseract_lstm,
            scale=2,
        )

        selected_weapons_regions = self.REGIONS[
            "selected_weapon_tell"].extract(frame.image)
        selected_weapons_colours = [
            np.median(r, axis=(0, 1)) for r in selected_weapons_regions
        ]

        def thresh_clip(im):
            im = np.max(im, axis=2)
            threshim = np.tile(im[:, 0], (im.shape[1], 1)).T
            im = cv2.subtract(im, threshim)
            tim = im > 20
            return tim

        frame.apex.weapons = Weapons(
            weapon_names,
            selected_weapons=(
                (
                    int(selected_weapons_colours[0][0]),
                    int(selected_weapons_colours[0][1]),
                    int(selected_weapons_colours[0][2]),
                ),
                (
                    int(selected_weapons_colours[1][0]),
                    int(selected_weapons_colours[1][1]),
                    int(selected_weapons_colours[1][2]),
                ),
            ),
            clip=self._ocr_digits(
                [im > 200 for im in self.REGIONS["clip"].extract(y)],
                self.CLIP_WEIGHTS),
            ammo=self._ocr_digits(
                [
                    thresh_clip(im)
                    for im in self.REGIONS["ammo"].extract(frame.image)
                ],
                self.AMMO_WEIGHTS,
            ),
        )

        # self.REGIONS.draw(frame.debug_image)
        _draw_weapons(frame.debug_image, frame.apex.weapons)

        return frame.apex.weapons.selected_weapons is not None

    def _ocr_digits(self, ims: List[np.ndarray],
                    weights: List[np.ndarray]) -> Optional[int]:
        digits = []
        # cv2.imshow('digits', np.hstack(ims).astype(np.uint8) * 255)
        for i, im in enumerate(ims):
            if np.sum(im) < 50:
                continue
            best = None
            for d, w in enumerate(weights):
                score = np.sum(np.multiply(im, w))
                if score > 0.95:
                    digits.append(str(d))
                    break
                if not best or score > best[0]:
                    best = score, d
            else:
                if best[0] > 0.1:
                    digits.append(str(best[1]))
                else:
                    logger.warning(
                        f"Unable to OCR clip digit {i} - best match: {best[1]} @ {best[0]:.2f}"
                    )
        if not digits:
            return None
        try:
            return int("".join(digits))
        except Exception as e:
            logger.warning(f"Unable to parse OCR of clip: {digits!r}: {e}")
            return None
コード例 #12
0
class AgentSelectProcessor(Processor):

    REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))
    AGENT_NAME_TEMPLATES: Dict[AgentName, np.ndarray] = {
        agent_name: cv2.copyMakeBorder(
            imageops.imread(
                os.path.join(os.path.dirname(__file__), "data", "agent_names",
                             agent_name.lower() + ".png"), 0),
            10,
            10,
            10,
            10,
            cv2.BORDER_CONSTANT,
        )
        #     cv2.resize(
        #     cv2.imread(os.path.join(os.path.dirname(__file__), 'data', 'agent_names', agent_name + '.png'), 0),
        #     (0, 0),
        #     fx=0.5,
        #     fy=0.5,
        # )
        for agent_name in agents
        # if os.path.exists(os.path.join(os.path.dirname(__file__), 'data', 'agent_names', agent_name.lower() + '.png'))
    }
    AGENT_TEMPLATE_REQUIRED_MATCH = 0.95

    RANK_TEMPLATES: Dict[str, Tuple[np.ndarray, np.ndarray]] = {
        str(os.path.basename(p)).rsplit(".", 1)[0]: load_rank_template(p)
        for p in glob.glob(
            os.path.join(os.path.dirname(__file__), "data", "ranks", "*.png"))
    }

    LOCK_IN_BUTTON_COLOR = (180, 210, 140)

    def __init__(self):
        pass

    def process(self, frame: Frame) -> bool:
        agent_name_yuv = self.REGIONS["agent_name"].extract_one(
            frame.image_yuv)
        agent_name_thresh = cv2.inRange(agent_name_yuv, (200, 85, 120),
                                        (255, 115, 150))
        # if hasattr(frame, 'source_image'):
        # 	cv2.imshow('agent_name_yuv', agent_name_yuv)
        # 	cv2.imshow('agent_name_thresh', agent_name_thresh)
        # 	cv2.imwrite(
        # 		os.path.join(os.path.dirname(__file__), 'data', 'agent_names', os.path.basename(frame.source_image)),
        # 		agent_name_thresh
        # 	)

        match, best_match = imageops.match_templates(
            agent_name_thresh,
            self.AGENT_NAME_TEMPLATES,
            method=cv2.TM_CCORR_NORMED,
            required_match=0.95,
            # verbose=True,
        )
        # self.REGIONS.draw(frame.debug_image)

        if match > self.AGENT_TEMPLATE_REQUIRED_MATCH:
            selected_agent_ims = self.REGIONS["selected_agents"].extract(
                frame.image)
            selected_agent_ims_gray = [
                255 - imageops.normalise(np.max(im, axis=2), bottom=50)
                for im in selected_agent_ims
            ]
            selected_agent_texts = imageops.tesser_ocr_all(
                selected_agent_ims_gray,
                engine=imageops.tesseract_lstm,
            )
            logger.info(f"Got selected_agent_texts={selected_agent_texts}")

            picking = True
            for i, text in enumerate(selected_agent_texts):
                for word in textops.strip_string(text, string.ascii_letters +
                                                 " .").split(" "):
                    match = levenshtein.ratio(word, best_match)
                    logger.debug(
                        f"Player {i}: Got match {match:.2f} for {word!r} = {best_match!r}"
                    )
                    if match > 0.7:
                        logger.info(
                            f"Found matching locked in agent {text!r} for selecting agent {best_match!r} - selection locked"
                        )
                        picking = False

            game_mode = imageops.ocr_region(frame, self.REGIONS, "game_mode")

            ranks = []
            for i, im in enumerate(self.REGIONS["player_ranks"].extract(
                    frame.image)):
                match, matched_rank = imageops.match_templates(
                    im,
                    self.RANK_TEMPLATES,
                    method=cv2.TM_SQDIFF,
                    use_masks=True,
                    required_match=15,
                    previous_match_context=("player_ranks", i),
                )
                ranks.append((matched_rank, round(match, 3)))

            player_name_ims = self.REGIONS["player_names"].extract(frame.image)
            player_name_gray = [
                255 - imageops.normalise(np.max(im, axis=2), bottom=50)
                for im in player_name_ims
            ]
            player_names = imageops.tesser_ocr_all(
                player_name_gray, engine=imageops.tesseract_lstm)

            frame.valorant.agent_select = AgentSelect(
                best_match,
                locked_in=not picking,
                map=imageops.ocr_region(frame, self.REGIONS, "map"),
                game_mode=game_mode,
                player_names=player_names,
                agents=selected_agent_texts,
                ranks=ranks,
                image=lazy_upload("agent_select",
                                  self.REGIONS.blank_out(frame.image),
                                  frame.timestamp,
                                  selection="last"),
            )
            draw_agent_select(frame.debug_image, frame.valorant.agent_select)
            return True

        return False
コード例 #13
0
class MatchSummaryProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))

    MATCH_SUMMARY_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "match_summary.png"),
        0)
    XP_BREAKDOWN_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "xp_breakdown.png"), 0)
    SCORE_REPORT_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "score_report.png"), 0)
    REQUIRED_MATCH = 0.75

    PLACED_COLOUR = (32, 61, 238)

    XP_STATS = [
        "Won Match",
        "Top 3 Finish",
        "Time Survived",
        "Kills",
        "Damage Done",
        "Revive Ally",
        "Respawn Ally",
    ]
    XP_STATS_NORMED = [s.replace(" ", "").upper() for s in XP_STATS]
    SUBS = ["[(", "{(", "])", "})"]

    def eager_load(self):
        self.REGIONS.eager_load()

    def process(self, frame: Frame) -> bool:
        y = frame.image_yuv[:, :, 0]
        your_squad_image = self.REGIONS["match_summary"].extract_one(y)
        t, thresh = cv2.threshold(your_squad_image, 0, 255,
                                  cv2.THRESH_BINARY | cv2.THRESH_OTSU)
        match = np.max(
            cv2.matchTemplate(thresh, self.MATCH_SUMMARY_TEMPLATE,
                              cv2.TM_CCORR_NORMED))
        frame.apex.match_summary_match = round(float(match), 5)
        if match >= self.REQUIRED_MATCH:
            self.REGIONS.draw(frame.debug_image)
            placed = self._get_placed(frame)

            image_title = "match_summary"
            xp_stats, score_report = None, None

            xp_breakdown_title_image = self.REGIONS[
                "xp_breakdown"].extract_one(y)
            _, xp_breakdown_title_thresh = cv2.threshold(
                xp_breakdown_title_image, 150, 255, cv2.THRESH_BINARY)
            xp_breakdown_title_match = np.max(
                cv2.matchTemplate(
                    xp_breakdown_title_thresh,
                    self.XP_BREAKDOWN_TEMPLATE,
                    cv2.TM_CCORR_NORMED,
                ))
            if xp_breakdown_title_match > self.REQUIRED_MATCH:
                xp_stats = self._parse_xp_breakdown(y)
                image_title += "_xp_breakdown"
            else:
                score_report_title_image = self.REGIONS[
                    "score_report"].extract_one(y)
                _, score_report_title_thresh = cv2.threshold(
                    score_report_title_image, 150, 255, cv2.THRESH_BINARY)
                score_report_title_match = np.max(
                    cv2.matchTemplate(
                        score_report_title_thresh,
                        self.SCORE_REPORT_TEMPLATE,
                        cv2.TM_CCORR_NORMED,
                    ))
                if score_report_title_match > self.REQUIRED_MATCH:
                    score_report = self._parse_score_report(y)
                    image_title += "_score_report"

            if placed is not None:
                frame.apex.match_summary = MatchSummary(
                    placed=placed,
                    xp_stats=xp_stats,
                    score_report=score_report,
                    image=lazy_upload(
                        image_title,
                        self.REGIONS.blank_out(frame.image),
                        frame.timestamp,
                        selection="last",
                    ),
                )
                _draw_match_summary(frame.debug_image,
                                    frame.apex.match_summary)
                return True

        return False

    def _parse_xp_breakdown(self, y: np.ndarray) -> XPStats:
        xp_breakdown_image = self.REGIONS["xp_fields"].extract_one(y)
        xp_breakdown_image = cv2.adaptiveThreshold(
            xp_breakdown_image,
            255,
            cv2.ADAPTIVE_THRESH_MEAN_C,
            cv2.THRESH_BINARY_INV,
            63,
            -30,
        )
        lines = imageops.tesser_ocr(
            xp_breakdown_image,
            whitelist=string.ascii_letters + string.digits + "() \n",
            engine=imageops.tesseract_lstm_multiline,
        )
        for s1, s2 in self.SUBS:
            lines = lines.replace(s1, s2)

        xp_stats = XPStats()
        for line in lines.splitlines():
            stat_name, stat_value = self._parse_stat(line)
            if stat_name == "Won Match":
                xp_stats.won = True
            elif stat_name == "Top 3 Finish":
                xp_stats.top3_finish = True
            elif stat_name and stat_value is not None:
                # require stat value parsed correctly
                if stat_name == "Time Survived":
                    xp_stats.time_survived = mmss_to_seconds(stat_value)
                elif stat_name == "Kills":
                    xp_stats.kills = stat_value
                elif stat_name == "Damage Done":
                    xp_stats.damage_done = stat_value
                elif stat_name == "Revive Ally":
                    xp_stats.revive_ally = stat_value
                elif stat_name == "Respawn Ally":
                    xp_stats.respawn_ally = stat_value
        return xp_stats

    def _parse_score_report(self, y: np.ndarray) -> ScoreReport:
        rp_report_image = self.REGIONS["rp_fields"].extract_one(y)

        lines = []
        for line in range(3):
            line_im = rp_report_image[line * 40 + 5:(line + 1) * 40 - 7, 5:]
            lines.append(
                imageops.tesser_ocr(line_im,
                                    engine=imageops.tesseract_lstm,
                                    invert=True,
                                    scale=2))

        score_report = ScoreReport()
        for line in lines:
            valid = False
            if ":" in line:
                stat_name, stat_value = line.lower().replace(" ",
                                                             "").split(":", 1)
                if stat_name == "entrycost":
                    score_report.entry_rank = stat_value.lower()
                    valid = True
                elif stat_name == "kills":
                    try:
                        score_report.kills = int(stat_value.replace("o", "0"))
                    except ValueError:
                        logger.warning(
                            f'Could not parse Score Report > kills: {stat_value!r}" as int'
                        )
                    else:
                        valid = True
                elif stat_name == "matchplacement":
                    stat_value = stat_value.replace("#", "")
                    try:
                        score_report.placement = int(
                            stat_value.replace("o", "0").split("/", 1)[0])
                    except ValueError:
                        logger.warning(
                            f'Could not parse Score Report > placement: {stat_value!r}" as placement'
                        )
                    else:
                        valid = True
            if not valid:
                logger.warning(f"Unknown line in score report: {line!r}")

        score_adjustment_image = self.REGIONS["score_adjustment"].extract_one(
            y)
        score_adjustment_text = imageops.tesser_ocr(
            score_adjustment_image,
            engine=imageops.tesseract_lstm,
            invert=True,
            scale=1)
        score_adjustment_text_strip = (textops.strip_string(
            score_adjustment_text, alphabet=string.digits + "RP+-").replace(
                "RP", "").replace("+", "").replace("-", ""))
        try:
            score_report.rp_adjustment = int(score_adjustment_text_strip)
        except ValueError:
            logger.warning(
                f'Could not parse Score Report > score adjustment: {score_adjustment_text!r}" as valid adjustment'
            )

        current_rp_image = self.REGIONS["current_rp"].extract_one(y)
        current_rp_text = imageops.tesser_ocr(current_rp_image,
                                              engine=imageops.tesseract_lstm,
                                              invert=True,
                                              scale=1)
        current_rp_text_strip = textops.strip_string(current_rp_text,
                                                     alphabet=string.digits +
                                                     "RP").replace("RP", "")
        try:
            score_report.current_rp = int(current_rp_text_strip)
        except ValueError:
            logger.warning(
                f'Could not parse Score Report > current RP: {current_rp_text!r}" as valid RP'
            )

        return score_report

    def _parse_stat(self, line: str) -> Tuple[Optional[str], Optional[int]]:
        if len(line) > 5:
            parts = line.split("(", 1)
            if len(parts) > 1:
                stat_name_s, stat_value_s = parts[:2]
            else:
                stat_name_s, stat_value_s = line, None
            match, stat_name_normed = textops.matches_ratio(
                stat_name_s.replace(" ", "").upper(), self.XP_STATS_NORMED)
            if match > 0.8:
                stat_name = self.XP_STATS[self.XP_STATS_NORMED.index(
                    stat_name_normed)]
                if stat_value_s:
                    stat_value = self._parse_stat_number(stat_value_s)
                    if stat_value is not None:
                        logger.info(
                            f"Parsed {stat_name}={stat_value} ({line!r} ~ {match:1.2f})"
                        )
                        return stat_name, stat_value
                    else:
                        logger.info(
                            f"Unable to parse value for {stat_name} ({line!r} ~ {match:1.2f})"
                        )
                        return stat_name, None
                else:
                    return stat_name, None
            else:
                logger.warning(f"Don't know how to parse stat {line!r}")
                return None, None
        elif line:
            logger.warning(f"Ignoring stat {line!r} - too short")
            return None, None
        else:
            return None, None

    def _parse_stat_number(self, stat_value_s: str) -> Optional[int]:
        stat_value_s = stat_value_s.upper()

        # common errors in parsing digits
        for s1, s2 in "D0", "I1", "L1":
            stat_value_s = stat_value_s.replace(s1, s2)

        # remove brackets, spaces, X (e.g. in "Kills (x3)"), time separators, commas
        stat_value_s = "".join(c for c in stat_value_s if c not in "() X:.,;|")

        try:
            return int(stat_value_s)
        except ValueError:
            return None

    def _get_placed(self, frame: Frame) -> Optional[int]:
        placed_image = self.REGIONS["squad_placed"].extract_one(
            frame.image).copy()
        cv2.normalize(placed_image, placed_image, 0, 255, cv2.NORM_MINMAX)
        orange = cv2.inRange(
            placed_image,
            np.array(self.PLACED_COLOUR) - 40,
            np.array(self.PLACED_COLOUR) + 40,
        )
        text = imageops.tesser_ocr(orange, whitelist=string.digits + "#")
        if text and text[0] == "#":
            try:
                placed = int(text[1:])
            except ValueError:
                logger.warning(f"Could not parse {text!r} as number")
                return None
            else:
                logger.debug(f"Parsed {text!r} as {placed}")
                if 1 <= placed <= 30:
                    return placed
                else:
                    logger.warning(f"Rejected placed={placed}")
        else:
            logger.warning(f'Rejected placed text {text!r} - did not get "#"')
            return None
コード例 #14
0
class EndgameProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))

    RESULTS = ["VICTORY", "DEFEAT", "DRAW"]

    VALID_STATS_NAMES = list(data.heroes.keys()) + [ALL_HEROES]

    def process(self, frame: Frame) -> bool:
        if self.detect_endgame(frame):

            result, map_text = self.parse_result_and_map(frame)
            if not result or not map_text:
                return True

            stats = self.parse_stats(frame)

            frame.overwatch.endgame = Endgame(result=result,
                                              map=map_text,
                                              stats=stats)

            return True

        return False

    def parse_stats(self, frame: Frame) -> Optional[Stats]:
        hero = self.parse_stats_hero(frame)
        if not hero:
            # ALL HEREOS now shows as no text - just parse empty as ALL HEROES and detect failed parses
            hero = ALL_HEROES
        if hero in self.VALID_STATS_NAMES:
            stats = dict(
                zip(
                    [s.name for s in data.generic_stats],
                    big_noodle.ocr_all_int(self.REGIONS["stats"].extract(
                        frame.image),
                                           channel="max",
                                           height=56),
                ))
            logger.debug(f"Parsed stats: {stats}")

            if hero == ALL_HEROES and sum(v is not None
                                          for v in stats.values()) <= 2:
                # because we parse unknowns as ALL HEREOS, if the stats failed to parse this is probably not a stats screen
                logger.info(
                    f"Did not get valid stats for potential ALL HEROES stats - ignoring"
                )
                return None

            if stats["objective time"] is not None:
                stats["objective time"] = textops.mmss_to_seconds(
                    stats["objective time"])
                logger.debug(
                    f'Transformed MMSS objective time to {stats["objective time"]}'
                )

            if hero == ALL_HEROES:
                hero_specific_stats = None
            else:
                stat_names_row_1 = [s.name for s in data.heroes[hero].stats[0]]
                stat_names_row_2 = [s.name for s in data.heroes[hero].stats[1]]
                stat_names = stat_names_row_1 + stat_names_row_2
                logger.debug(
                    f"Hero: {hero} has {len(stat_names)} hero specific stats: {stat_names}"
                )

                images = self.REGIONS[f"hero_stats_{len(stat_names)}"].extract(
                    frame.image)
                normed = [
                    ((image.astype(np.float) / np.percentile(image, 98)) *
                     255).clip(0, 255).astype(np.uint8) for image in images
                ]
                # cv2.imshow('ims', np.vstack((np.hstack(images[:len(stat_names)]), np.hstack(normed[:len(stat_names)]))))
                # cv2.waitKey(0)
                stat_values = digit.ocr_images(normed[:len(stat_names)],
                                               scale=0.73)
                hero_specific_stats = dict(zip(stat_names, stat_values))
                logger.info(f"Parsed {hero} stats: {hero_specific_stats}")

            return Stats(
                hero,
                eliminations=stats["eliminations"],
                objective_kills=stats["objective kills"],
                objective_time=stats["objective time"],
                hero_damage_done=stats["hero damage done"],
                healing_done=stats["healing done"],
                deaths=stats["deaths"],
                hero_specific_stats=hero_specific_stats,
            )
        elif hero:
            logging.error(
                f"Parsed hero name as {hero!r} but was not in list of valid names"
            )
            return None
        else:
            return None

    HERO_STAT_TEMPLATES = []
    for h in VALID_STATS_NAMES:
        for t in load_hero_templates(h):
            HERO_STAT_TEMPLATES.append((h, t))
    HERO_NAME_TEMPLATE_MATCH_THRESHOLD = 0.3

    def parse_stats_hero(self, frame: Frame) -> Optional[str]:
        hero_image = np.min(self.REGIONS["hero_stat_name"].extract_one(
            frame.image),
                            axis=2)
        hero_image_thresh = cv2.adaptiveThreshold(
            hero_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,
            31, -10)
        matches = sorted([(np.min(
            cv2.matchTemplate(hero_image_thresh, t, cv2.TM_SQDIFF_NORMED)), n)
                          for (n, t) in self.HERO_STAT_TEMPLATES])
        logger.debug("Found hero stat matches: " +
                     ", ".join(f"({n}: {m:1.2f})"
                               for (m, n) in matches[:5]) + "...")
        if matches[0][0] < self.HERO_NAME_TEMPLATE_MATCH_THRESHOLD:
            hero = matches[0][1]
            logger.info(f"Classifying stats hero as {hero}")
            return hero
        else:
            logger.debug("Could not identify hero")
            return None

    def parse_result_and_map(
            self, frame: Frame) -> Tuple[Optional[str], Optional[str]]:
        result_im = self.REGIONS["result"].extract_one(frame.image)
        gray = np.max(result_im, axis=2)
        # mask out white/gray text (this is map and match time info)
        white_text = ((gray > 100) &
                      (np.ptp(result_im, axis=2) < 20)).astype(np.uint8) * 255
        white_text = cv2.erode(white_text, None)
        white_text = np.sum(white_text, axis=0) / 255
        right = np.argmax(white_text > 2)
        if right > 150:
            right -= 10
            logger.info(
                f"Trimming width of result image {gray.shape[1]} -> {right} to cut white text"
            )
            gray = gray[:, :right]
        else:
            right = gray.shape[1]
        result_text = imageops.tesser_ocr(gray,
                                          whitelist="".join(
                                              set("".join(self.RESULTS))),
                                          invert=True)
        result = textops.matches(result_text, self.RESULTS)
        if np.min(result) > 2:
            logger.warning(
                f"Could not identify result from {result_text!r} (match={np.min(result)})"
            )
            return None, None

        result = self.RESULTS[arrayops.argmin(result)]
        logger.debug(f"Got result {result} from {result_text!r}")
        # TODO: test this with "draw" result
        map_image = self.REGIONS["map_name"].extract_one(frame.image)[:,
                                                                      right:]
        gray = np.min(map_image, axis=2)
        map_text = textops.strip_string(
            imageops.tesser_ocr(gray,
                                whitelist=string.ascii_uppercase + " :'",
                                invert=True,
                                scale=2),
            string.ascii_uppercase + " ",
        )
        logger.debug(f"Parsed map as {map_text}")

        return result, map_text

    LEAVE_GAME_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data",
                     "leave_game_template.png"), 0)
    LEAVE_GAME_TEMPLATE_THRESH = 0.6

    def detect_endgame(self, frame: Frame) -> bool:
        leave_game_button = self.REGIONS["leave_game_button"].extract_one(
            frame.image)
        # leave_game_button = cv2.resize(leave_game_button, (0, 0), fx=0.5, fy=0.5)

        gray = np.min(leave_game_button, axis=2)
        _, thresh = cv2.threshold(gray, 0, 255,
                                  cv2.THRESH_BINARY | cv2.THRESH_OTSU)

        frame.overwatch.endgame_match = round(
            1 - float(
                np.min(
                    cv2.matchTemplate(thresh, self.LEAVE_GAME_TEMPLATE,
                                      cv2.TM_SQDIFF_NORMED))), 5)
        return frame.overwatch.endgame_match > self.LEAVE_GAME_TEMPLATE_THRESH
コード例 #15
0
class HeroSelectProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))

    ASSEMBLE_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data",
                     "assemble_your_team.png"), 0)
    ASSEMBLE_THRESH = 0.8

    ASSEMBLE_HSV_RANGE = [(0, 0, 200), (255, 15, 255)]

    def process(self, frame: Frame) -> bool:
        im = self.REGIONS["assemble_your_team"].extract_one(frame.image)
        thresh = cv2.inRange(cv2.cvtColor(im, cv2.COLOR_BGR2HSV_FULL),
                             self.ASSEMBLE_HSV_RANGE[0],
                             self.ASSEMBLE_HSV_RANGE[1])
        match = np.max(
            cv2.matchTemplate(thresh, self.ASSEMBLE_TEMPLATE,
                              cv2.TM_CCORR_NORMED))

        frame.overwatch.assemble_your_team_match = round(float(match), 5)
        if match > self.ASSEMBLE_THRESH:
            map_image = self.REGIONS["map"].extract_one(frame.image)
            map_thresh = imageops.otsu_thresh_lb_fraction(
                np.min(map_image, axis=2), 1.1)
            map_text = big_noodle.ocr(map_thresh, channel=None, threshold=None)

            mode_image = self.REGIONS["mode"].extract_one(frame.image_yuv[:, :,
                                                                          0])
            mode_thresh = cv2.adaptiveThreshold(mode_image, 255,
                                                cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                                cv2.THRESH_BINARY, 23, -10)
            mode_text = big_noodle.ocr(mode_thresh,
                                       channel=None,
                                       threshold=None)

            name_images = self.REGIONS["blue_names"].extract(frame.image)
            blue_thresh = [
                cv2.inRange(i, (200, 145, 0), (255, 255, 130))
                for i in name_images
            ]
            green_thresh = [
                cv2.inRange(i, (0, 220, 200), (100, 255, 255))
                for i in name_images
            ]
            name_thresh = [
                cv2.bitwise_or(i1, i2)
                for i1, i2 in zip(blue_thresh, green_thresh)
            ]
            names = [
                big_noodle.ocr(
                    i[:, :, 1],
                    channel=None,
                    threshold=t,
                    # debug=True
                ) for i, t in zip(name_images, name_thresh)
            ]

            frame.overwatch.assemble_your_team = AssembleYourTeam(
                map=map_text,
                mode=mode_text,
                blue_names=names,
                is_in_queue=self.detect_in_queue(frame),
            )
            return True

        return False

    TIME_ELAPSED_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "time_elapsed.png"), 0)
    TIME_ELAPSED_MATCH_THRESHOLD = 0.75

    def detect_in_queue(self, frame: Frame) -> bool:
        region = self.REGIONS["time_elapsed"].extract_one(frame.image_yuv[:, :,
                                                                          0])

        _, thresh = cv2.threshold(region, 130, 255, cv2.THRESH_BINARY)
        match = np.max(
            cv2.matchTemplate(thresh, self.TIME_ELAPSED_TEMPLATE,
                              cv2.TM_CCORR_NORMED))
        logger.debug(f"Time elapsed match={match:.1f}")

        return match > self.TIME_ELAPSED_MATCH_THRESHOLD
コード例 #16
0
class TopHudProcessor(Processor):

    REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))

    AGENT_TEMPLATES = {
        name: load_agent_template(
            os.path.join(os.path.dirname(__file__), "data", "agents",
                         name.lower() + ".png"))
        for name in agents
    }
    AGENT_TEMPLATES_FLIP = {
        name: (images[0][:, ::-1], images[1][:, ::-1])
        for name, images in AGENT_TEMPLATES.items()
    }
    AGENT_TEMPLATE_REQUIRED_MATCH = 0.1

    HAVE_ULT_SIGNAL = np.array([1] * 5 + [0] * 44 + [1] * 5, dtype=np.float)

    SPIKE_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "spike.png"), 0)

    def process(self, frame: Frame) -> bool:
        teams = self._parse_teams(frame)

        frame.valorant.top_hud = TopHud(
            score=self.parse_score(frame),
            teams=teams,
            has_ult_match=self._parse_ults(frame, teams),
            has_spike_match=self._parse_spike(frame, teams),
        )
        draw_top_hud(frame.debug_image, frame.valorant.top_hud)

        # self.REGIONS.draw(frame.debug_image)

        return frame.valorant.top_hud.score[
            0] is not None or frame.valorant.top_hud.score[1] is not None

    def parse_score(self, frame: Frame) -> Tuple[Optional[int], Optional[int]]:
        score_ims = self.REGIONS["scores"].extract(frame.image)
        score_gray = [np.min(im, axis=2) for im in score_ims]
        score_norm = [
            imageops.normalise(im, bottom=80, top=100) for im in score_gray
        ]

        # debugops.normalise(score_gray[0])
        # cv2.imshow('score_ims', np.hstack(score_ims))
        # cv2.imshow('score_gray', np.hstack(score_gray))
        # cv2.imshow('score_ys_norm', np.hstack(score_norm))

        score = imageops.tesser_ocr_all(score_norm,
                                        expected_type=int,
                                        invert=True,
                                        engine=din_next_regular_digits)
        logger.debug(f"Got score={score}")
        return score[0], score[1]

    def _parse_teams(self, frame: Frame) -> Tuple[TeamComp, TeamComp]:
        agents = []
        for i, agent_im in enumerate(self.REGIONS["teams"].extract(
                frame.image)):
            blurlevel = cv2.Laplacian(agent_im, cv2.CV_64F).var()
            if blurlevel < 100:
                agents.append(None)
                logger.debug(f"Got agent {i}=None (blurlevel={blurlevel:.2f})")
            else:
                templates = self.AGENT_TEMPLATES
                if i > 4:
                    templates = self.AGENT_TEMPLATES_FLIP
                # cv2.imshow('agent', self.AGENT_TEMPLATES_FLIP['Raze'][0])
                match, r_agent = imageops.match_templates(
                    agent_im,
                    templates,
                    method=cv2.TM_SQDIFF_NORMED,
                    required_match=self.AGENT_TEMPLATE_REQUIRED_MATCH,
                    use_masks=True,
                    previous_match_context=(self.__class__.__name__,
                                            "_parse_teams", i),
                    # verbose=True
                )
                agent = r_agent
                if match > self.AGENT_TEMPLATE_REQUIRED_MATCH:
                    agent = None

                logger.debug(
                    f"Got agent {i}={agent} (best={r_agent}, match={match:.3f}, blurlevel={blurlevel:.1f})"
                )
                agents.append(agent)
        return cast_teams((agents[:5], agents[5:]))

    def _parse_ults(
            self, frame: Frame,
            teams: Tuple[TeamComp, TeamComp]) -> Tuple[FiveOFloat, FiveOFloat]:
        ults = []
        for i, ult_im in enumerate(self.REGIONS["has_ult"].extract(
                frame.image)):
            if not teams[i // 5][i % 5]:
                ults.append(None)
                continue

            matches = [0.0]
            ult_hsv = cv2.cvtColor(ult_im, cv2.COLOR_BGR2HSV_FULL)

            ult_col = np.median(ult_hsv, axis=(0, ))
            ult_col = ult_col.astype(np.float)

            # The median pixel value for each channel should be the value of the "yellow"
            # Compute the abs offset from this value
            ult_col = np.abs(ult_col - np.median(ult_col, axis=(0, )))

            for c in range(3):
                # Compute the maximum (filtered with a width 5 bloxfilter) value and normalize by this
                # Check both sides of the image, as they may be different and use the lower of the two then clip the higher so it matches
                ult_col_hi = np.convolve(ult_col[:, c], [1 / 5] * 5)
                avg_diff_at_edge = min(
                    np.max(ult_col_hi[:len(ult_col_hi) // 2]),
                    np.max(ult_col_hi[len(ult_col_hi) // 2:]))
                # print(i, c, avg_diff_at_edge)
                if avg_diff_at_edge < 15:
                    # Not significant difference
                    continue

                have_ult_thresh_1d = np.clip(ult_col[:, c] / avg_diff_at_edge,
                                             0, 1)

                # This leaves have_ult_thresh_1d as a signal [0, 1] where 0 is match to the has ult colour,
                # and 1 is match to the outside

                # Correlate this (normalizing to [-1, 1] to make the correlation normalized) with the expected width for the has ult block
                have_ult_correlation = np.correlate(
                    have_ult_thresh_1d * 2 - 1,
                    self.HAVE_ULT_SIGNAL * 2 - 1) / len(self.HAVE_ULT_SIGNAL)
                have_ult_match = np.max(have_ult_correlation)
                #
                # if not frame.get('warmup') and i == 2:
                #     import matplotlib.pyplot as plt
                #     plt.figure()
                #     plt.imshow(ult_im)
                #     plt.figure()
                #     plt.plot(have_ult_thresh_1d)
                #     plt.plot(have_ult_correlation)
                #     plt.show()

                matches.append(have_ult_match)
            have_ult_match = np.max(matches)
            logger.debug(f"Got player {i} has ult match={have_ult_match:.3f}")
            ults.append(round(float(have_ult_match), 3))

        return cast_teams((ults[:5], ults[5:]))

    def _parse_spike(self, frame: Frame, teams: Tuple[TeamComp,
                                                      TeamComp]) -> FiveOFloat:
        spikes = []
        for i, ult_im in enumerate(self.REGIONS["has_spike"].extract(
                frame.image_yuv[:, :, 0])[:5]):
            if not teams[0][i % 5]:
                spikes.append(None)
                continue

            _, thresh = cv2.threshold(ult_im, 240, 255, cv2.THRESH_BINARY)
            match = np.max(
                cv2.matchTemplate(thresh, self.SPIKE_TEMPLATE,
                                  cv2.TM_CCORR_NORMED))
            #     cv2.imshow('match', thresh)
            #     cv2.waitKey(0)
            spikes.append(round(float(match), 3))

        return cast(FiveOFloat, spikes)
コード例 #17
0
class CombatProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))
    REQUIRED_MATCH = 0.75
    TEMPLATES = {
        # these need to go first, so they can mask out the others
        "ASSIST, ELIMINATION":
        imageops.imread(
            os.path.join(os.path.dirname(__file__), "data",
                         "assist_elimination.png"), 0),
        "ASSIST, KNOCK DOWN":
        imageops.imread(
            os.path.join(os.path.dirname(__file__), "data",
                         "assist_knockdown.png"), 0),
        "ELIMINATED":
        imageops.imread(
            os.path.join(os.path.dirname(__file__), "data", "eliminated.png"),
            0),
        "KNOCKED DOWN":
        imageops.imread(
            os.path.join(os.path.dirname(__file__), "data",
                         "knocked_down.png"), 0),
    }

    def eager_load(self):
        self.REGIONS.eager_load()

    def process(self, frame: Frame) -> bool:
        image_region = self.REGIONS["kill_text"]
        region = image_region.extract_one(frame.image_yuv[:, :, 0])
        region_color = image_region.extract_one(frame.image)
        thresh = imageops.unsharp_mask(region, 3.5, 6, 254)
        events: List[Event] = []
        for event_type, template in self.TEMPLATES.items():
            match = cv2.matchTemplate(thresh, template, cv2.TM_CCORR_NORMED)
            for _ in range(4):
                mnv, mxv, mnl, mxl = cv2.minMaxLoc(match)
                if mxv > self.REQUIRED_MATCH:

                    width = ((1920 // 2) -
                             (mxl[0] + image_region.regions[0][0])
                             ) * 2 - template.shape[1]
                    left = mxl[0] + template.shape[1]

                    logger.info(
                        f"Saw {event_type} ~ {mxv:1.2f}: w={width}, x={left}")

                    # name_image = region_color[
                    #     max(0, mxl[1] - 5):min(mxl[1] + template.shape[0] + 5, region.shape[0]),
                    #     left:left + width
                    # ]
                    # cv2.imshow('name', name_image)
                    # print(width)
                    # debugops.test_tesser_engines(name_image)
                    # cv2.waitKey(0)

                    events.append(
                        Event(
                            event_type,
                            width,
                            # None,
                            # None,
                            round(mxv, 4),
                        ))

                    match[max(0, mxl[1] - 20):min(mxl[1] + 20, match.shape[0]),
                          max(0, mxl[0] - 20):min(mxl[0] +
                                                  20, match.shape[0]), ] = 0
                    thresh[max(0, mxl[1] - 10):min(mxl[1] +
                                                   30, match.shape[0]), :] = 0
                else:
                    break

        if len(events):
            frame.apex.combat_log = CombatLog(events)
            _draw_log(frame.debug_image, frame.apex.combat_log)
            return True
        else:
            return False
コード例 #18
0
class MinimapProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "..", "..", "data", "regions",
                     "16_9.zip"))
    SPECTATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "spectate.png"), 0)
    THRESHOLD = 0.1

    offset_x = 0
    offset_y = 0

    @classmethod
    def load_map(cls, path: str):
        im = imageops.imread(path, 0)
        w_h = max(im.shape[0] + 480, im.shape[1] + 480)
        cls.offset_y = (w_h - im.shape[0]) // 2
        cls.offset_x = (w_h - im.shape[1]) // 2
        cls.MAP = cv2.copyMakeBorder(
            im,
            cls.offset_y,
            cls.offset_y,
            cls.offset_x,
            cls.offset_x,
            cv2.BORDER_CONSTANT,
        )
        # cls.MAP_TEMPLATE = cv2.convertScaleAbs(cls.MAP, alpha=1.6, beta=-30)

        LUT = np.linspace(-5, 10, 256)
        LUT = 1 / (1 + np.exp(-LUT * 1.5))
        cls.LUT = (LUT * 255).astype(np.uint8)
        cls.MAP_TEMPLATE = cv2.LUT(cls.MAP, cls.LUT)
        cls.MAP_TEMPLATE = cv2.GaussianBlur(cls.MAP_TEMPLATE, (0, 0),
                                            1.5)  # .astype(np.float)
        # cls.MAP_TEMPLATE *= 1.1
        # cls.MAP_TEMPLATE = np.clip(cls.MAP_TEMPLATE, 0, 255).astype(np.uint8)

        cls.MAP = cls.MAP_TEMPLATE
        # cls.MAP_TEMPLATE = cv2.GaussianBlur(cls.MAP, (0, 0), 1.1).astype(np.float)
        # cls.MAP_TEMPLATE *= 2
        # cls.MAP_TEMPLATE = np.clip(cls.MAP_TEMPLATE, 0, 255).astype(np.uint8)

    MAP_VERSION = 0

    def __init__(self, use_tflite: bool = True):
        self.map_rotated = deque(maxlen=10)
        self.map_rotate_in_config = None
        if use_tflite:
            from overtrack_cv.core.tflite import TFLiteModel

            self.model = TFLiteModel(
                os.path.join(os.path.dirname(__file__), "data",
                             "minimap_filter.tflite"))
        else:
            from overtrack_cv.core.tf import load_model

            self.model = load_model(
                os.path.join(os.path.dirname(__file__), "data",
                             "minimap_filter")
                # "C:/Users/simon/overtrack_2/training/apex_minimap/v3/v15/checkpoint"
            )
            # from tensorflow.python.keras.saving import export_saved_model
            # export_saved_model(self.model, os.path.join(os.path.dirname(__file__), 'data', 'minimap_filter'), serving_only=True)

        self.current_game: Optional[CurrentGame] = None
        self.current_composite: Optional[RingsComposite] = None

    def eager_load(self):
        self.REGIONS.eager_load()
        self._check_for_rotate_setting()
        self._check_for_map_update()

    def _check_for_rotate_setting(self):
        try:
            # noinspection PyUnresolvedReferences
            from client.util import knownpaths

            games_path = knownpaths.get_path(knownpaths.FOLDERID.SavedGames,
                                             knownpaths.UserHandle.current)
            config_path = os.path.join(games_path, "Respawn", "Apex",
                                       "profile", "profile.cfg")
            value = None
            with open(config_path) as f:
                for line in f.readlines():
                    if line.startswith("hud_setting_minimapRotate"):
                        value = line.split()[1].strip().replace('"', "")
            if value:
                pvalue = value.lower() in ["1", "true"]
                logger.info(
                    f"Extracted hud_setting_minimapRotate: {value!r} from {config_path} - setting rotate to {pvalue}"
                )
                self.map_rotate_in_config = pvalue
            else:
                logger.info(
                    f"Could not find hud_setting_minimapRotate in {config_path} - setting rotate to autodetect"
                )
                self.map_rotate_in_config = None

        except:
            logger.exception(
                f"Failed to read hud_setting_minimapRotate from profile - setting rotate to autodetect"
            )
            self.map_rotate_in_config = None

    def _check_for_map_update(self):
        logger.info("Checking for map updates")
        try:
            r = requests.get(
                "https://overtrack-client-2.s3-us-west-2.amazonaws.com/dynamic/apex-map/current.json"
            )
            logger.info(f"Checking for map update: {r} {r.content!r}")
            if r.status_code == 404:
                logger.info("Map updates not enabled")
                return

            data = r.json()
            if data["version"] <= self.MAP_VERSION:
                logger.info(
                    f'Current version {self.MAP_VERSION} is up to date - update version is {data["version"]}'
                )
                return
            else:
                maps_path = os.path.join(
                    os.path.join(os.path.dirname(__file__), "data", "maps"))
                os.makedirs(maps_path, exist_ok=True)

                map_path = os.path.join(maps_path, f'{data["version"]}.png')
                if os.path.exists(map_path):
                    try:
                        self.__class__.load_map(map_path)
                    except:
                        logger.info("Map corrupted")
                        os.remove(map_path)
                    else:
                        logger.info(
                            f'Loaded map {data["version"]} from {map_path}')
                        return

                logger.info(
                    f'Downloading map {data["version"]} from {data["url"]} to {map_path}'
                )
                with requests.get(data["url"], stream=True) as r:
                    r.raise_for_status()
                    with open(map_path, "wb") as f:
                        for chunk in r.iter_content(chunk_size=8192):
                            if chunk:
                                f.write(chunk)
                self.__class__.load_map(map_path)

        except:
            logger.exception("Failed to check for map update")

    def update(self):
        self._check_for_rotate_setting()
        self._check_for_map_update()

    def process(self, frame: Frame):
        spectate_image = frame.image_yuv[40:40 + 30, 670:670 + 130, 0]
        _, spectate_image_t = cv2.threshold(spectate_image, 220, 255,
                                            cv2.THRESH_BINARY)
        is_spectate = np.max(
            cv2.matchTemplate(spectate_image_t, self.SPECTATE,
                              cv2.TM_CCORR_NORMED)) > 0.9

        if not is_spectate:
            map_image = frame.image[50:50 + 240, 50:50 + 240]
        else:
            map_image = frame.image[114:114 + 240, 50:50 + 240]

        map_image_y = cv2.cvtColor(map_image, cv2.COLOR_BGR2YUV)[:, :, 0]
        map_image_y = cv2.LUT(map_image_y, self.LUT)
        map_image_y = cv2.GaussianBlur(map_image_y, (0, 0),
                                       1.5).astype(np.float)
        map_image_y *= 1.5
        map_image_y = np.clip(map_image_y, 0, 255).astype(np.uint8)

        t0 = time.perf_counter()
        filtered_minimap, filtered_rings = [
            np.clip(p[0], 0, 255).astype(np.uint8) for p in self.model.predict(
                np.expand_dims(map_image, axis=0).astype(np.float32))
        ]
        logger.debug(f"predict {(time.perf_counter() - t0) * 1000:.2f}")

        filtered = np.concatenate(
            (
                np.expand_dims(map_image_y[8:-8, 8:-8], axis=-1),
                cv2.resize(
                    filtered_rings,
                    (filtered_minimap.shape[1], filtered_minimap.shape[0]),
                    interpolation=cv2.INTER_NEAREST,
                ),
            ),
            axis=-1,
        )

        # location, min_loc, min_val = self._get_location(filtered[:, :, 0])
        location = None
        zoom = self._get_zoom(frame)

        t0 = time.perf_counter()
        if self.map_rotate_in_config or (
                len(self.map_rotated) and
            (sum(self.map_rotated) / len(self.map_rotated)) > 0.75):
            # 75% of last 10 frames said map was rotated - check rotated first
            logger.debug(f"Checking rotated first")
            bearing = self._get_bearing(frame, frame.debug_image)
            if bearing is not None:
                location = self._get_location(map_image_y, bearing, zoom=zoom)
                logger.debug(f"Got rotated location={location}")
            if (location is None or location.match > self.THRESHOLD
                ) and self.map_rotate_in_config is None:
                # try unrotated
                alt_location = self._get_location(map_image_y, zoom=zoom)
                logger.debug(f"Got unrotated location={alt_location}")
                if location is None or alt_location.match < location.match:
                    location = alt_location
                    bearing = None
        else:
            logger.debug(f"Checking unrotated first")
            location = self._get_location(map_image_y, zoom=zoom)
            logger.debug(f"Got unrotated location={location}")
            if location.match > self.THRESHOLD and self.map_rotate_in_config is None:
                bearing = self._get_bearing(frame, frame.debug_image)
                if bearing is not None:
                    alt_location = self._get_location(map_image_y,
                                                      bearing,
                                                      zoom=zoom)
                    logger.debug(f"Got rotated location={alt_location}")
                    if alt_location.match < location.match:
                        location = alt_location
                    else:
                        bearing = None
        logger.debug(f"match {(time.perf_counter() - t0) * 1000:.2f}")

        logger.debug(f"Got location: {location}")
        if location:
            self.map_rotated.append(location.bearing is not None)

            t0 = time.perf_counter()
            self._update_composite(frame, location, filtered_rings)
            logger.debug(
                f"update composite {(time.perf_counter() - t0) * 1000:.2f}")

            t0 = time.perf_counter()
            blur = cv2.GaussianBlur(filtered, (0, 0), 4)

            blur[:, :, 0] = 0
            edges = self.filter_edge(blur, 50, 20, 20, 10)
            edges[:5, :] = 0
            edges[-5:, :] = 0
            edges[:, :5] = 0
            edges[:, -5:] = 0
            logger.debug(
                f"filter edges {(time.perf_counter() - t0) * 1000:.2f}")

            t0 = time.perf_counter()
            frame.apex.minimap = Minimap(
                location,
                None,
                None,
                spectate=is_spectate,
                rings_composite=self.current_composite,
                version=3,
            )
            logger.debug(
                f"get circles {(time.perf_counter() - t0) * 1000:.2f}")

            try:
                _draw_map_location(
                    frame.debug_image,
                    frame.minimap,
                    self.MAP,
                    self.offset_x,
                    self.offset_y,
                    self.MAP_TEMPLATE,
                    filtered,
                    edges,
                    self.current_composite,
                )
            except:
                logger.exception("Failed to draw debug map location")

            return True
        elif location:
            try:
                _draw_map_location(
                    frame.debug_image,
                    Minimap(
                        location,
                        None,
                        None,
                    ),
                    self.MAP,
                    self.MAP_TEMPLATE,
                    filtered,
                    None,
                    self.current_composite,
                )
            except Exception as e:
                pass
                # traceback.print_exc(e)
        return False

    def _get_zoom(self, frame):
        zoom = 1
        if "game_time" in frame:
            if frame.game_time > 1100:
                # round 5 closing / ring 6
                zoom = 0.375
            elif frame.game_time > 980:
                # round 4 closing / ring 5
                zoom = 0.75
        return zoom

    def _get_bearing(self, frame: Frame,
                     debug_image: Optional[np.ndarray]) -> Optional[int]:
        bearing_image = self.REGIONS["bearing"].extract_one(
            frame.image_yuv[:, :, 0])
        _, bearing_thresh = cv2.threshold(bearing_image, 190, 255,
                                          cv2.THRESH_BINARY)

        if debug_image is not None:
            debug_image[90:90 + bearing_image.shape[0],
                        1020:1020 + bearing_image.shape[1], ] = cv2.cvtColor(
                            bearing_image, cv2.COLOR_GRAY2BGR)
            debug_image[90:90 + bearing_image.shape[0],
                        1100:1100 + bearing_image.shape[1], ] = cv2.cvtColor(
                            bearing_thresh, cv2.COLOR_GRAY2BGR)

        bearing = imageops.tesser_ocr(
            bearing_thresh,
            expected_type=int,
            engine=ocr.tesseract_ttlakes_digits,
            warn_on_fail=False,
        )
        if bearing is None or not 0 <= bearing <= 360:
            logger.debug(f"Got invalid bearing: {bearing}")
            return None
        if bearing is not None:
            logger.debug(f"Got bearing={bearing}")
            return bearing
        else:
            return None

    def _get_location(
        self,
        region: np.ndarray,
        bearing: Optional[int] = None,
        zoom: Optional[float] = None,
        base_template=None,
    ) -> Location:
        if base_template is None:
            base_template = self.MAP_TEMPLATE

        rot = None
        if bearing is None:
            map_template = base_template
        else:
            height, width = base_template.shape[:2]
            rot = cv2.getRotationMatrix2D(
                (base_template.shape[1] // 2, base_template.shape[0] // 2),
                bearing - 360,
                1,
            )
            map_template = cv2.warpAffine(base_template, rot, (width, height))

        if zoom and zoom != 1:
            region = cv2.resize(region, (0, 0), fx=zoom, fy=zoom)

        # cv2.imshow('map_template', map_template)
        match = cv2.matchTemplate(map_template, region, cv2.TM_SQDIFF_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)

        coords = (
            min_loc[0] + int(240 * zoom) // 2 - 8,
            min_loc[1] + int(240 * zoom) // 2 - 8,
        )
        if rot is not None:
            inv = cv2.invertAffineTransform(rot)
            coords = cv2.transform(np.array([[coords]]), inv)[0][0]

        return Location(
            tuple(np.array(coords) - (self.offset_x, self.offset_y)),
            min_val,
            bearing=bearing,
            zoom=zoom,
        )  # , match

    def _update_composite(self, frame: Frame, location: Location,
                          filtered: np.ndarray) -> None:
        current_game: Optional[CurrentGame] = getattr(frame, "current_game",
                                                      None)
        game_time: Optional[float] = getattr(frame, "game_time", None)

        if current_game:
            if current_game is not self.current_game:
                logger.info(
                    f"Creating new RingsComposite for {frame.current_game}")
                self.current_game = frame.current_game
                self.current_composite = RingsComposite()
        else:
            self.current_game = None
            self.current_composite = None

        if self.current_game and game_time and self.current_composite:
            ring_state = apex_data.get_round_state(game_time)

            to_add = []
            if ring_state.ring_index and not ring_state.ring_closing:
                to_add.append(
                    (ring_state.ring_index, filtered[:, :, 1], "outer ring"))
            else:
                logger.debug(
                    f"Not adding outer ring to composite for game time {s2ts(game_time)}, closing={ring_state.ring_closing}"
                )

            if ring_state.next_ring_index:
                to_add.append(
                    (ring_state.next_ring_index, filtered[:, :,
                                                          0], "inner ring"))
            else:
                logger.debug(
                    f"Not adding inner ring to composite for game time {s2ts(game_time)}"
                )

            for index, image, name in to_add:
                logger.debug(
                    f"Adding {name} to ring composite {index} (approx {np.sum(image > 128)} observed ring pixels) for game time {s2ts(game_time)}"
                )

                # TODO: handle zoom, handle non-rotating
                if location.bearing is None:
                    image_t = image
                else:
                    image = cv2.copyMakeBorder(
                        image,
                        image.shape[0] // 5,
                        image.shape[0] // 5,
                        image.shape[1] // 5,
                        image.shape[1] // 5,
                        cv2.BORDER_CONSTANT,
                    )
                    image_t = cv2.warpAffine(
                        image,
                        cv2.getRotationMatrix2D(
                            (image.shape[1] // 2, image.shape[0] // 2),
                            360 - location.bearing,
                            1,
                        ),
                        (image.shape[0], image.shape[1]),
                    )
                image_t = cv2.resize(image_t, (0, 0),
                                     fx=location.zoom,
                                     fy=location.zoom)

                if index not in self.current_composite.images:
                    self.current_composite.images[
                        index] = SerializableRingsComposite(
                            np.zeros((
                                self.MAP_TEMPLATE.shape[0] // 2,
                                self.MAP_TEMPLATE.shape[1] // 2,
                            )))
                target = self.current_composite.images[index].array

                # TODO: handle borders
                try:
                    y = location.y // 2 - image_t.shape[0] // 2
                    x = location.x // 2 - image_t.shape[1] // 2
                    target[y:y + image_t.shape[0],
                           x:x + image_t.shape[1]] += image_t.astype(
                               np.float) / 255.0
                except:
                    logger.exception("Failed to add ring to composite")

    def filter_edge(
        self,
        im: np.ndarray,
        thresh: int,
        edge_type_box_size: int,
        edge_type_widening: int,
        edge_extraction_size: int,
    ) -> np.ndarray:
        thresh = im > thresh

        x_edge_prominent = cv2.boxFilter(im,
                                         0, (2, edge_type_box_size),
                                         normalize=True)
        y_edge_prominent = cv2.boxFilter(im,
                                         0, (edge_type_box_size, 2),
                                         normalize=True)
        greater = (x_edge_prominent > y_edge_prominent).astype(np.uint8)
        greater = cv2.dilate(greater, np.ones((1, edge_type_widening)))
        greater = cv2.erode(greater, np.ones((edge_type_widening, 1)))

        w_edge = thresh & (cv2.dilate(im, np.ones(
            (1, edge_extraction_size))) == im)
        h_edge = thresh & (cv2.dilate(im, np.ones(
            (edge_extraction_size, 1))) == im)

        # cv2.imshow('x_edge_prominent', x_edge_prominent)
        # cv2.imshow('y_edge_prominent', y_edge_prominent)
        # cv2.imshow('greater', greater * 255)
        # cv2.imshow('w_edge', w_edge.astype(np.uint8) * 255)
        # cv2.imshow('h_edge', h_edge.astype(np.uint8) * 255)

        edge = (w_edge * greater) + (h_edge * (1 - greater))
        return edge
コード例 #19
0
class PostgameProcessor(Processor):

    REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))
    SCOREBOARD_REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "data", "regions",
                     "scoreboard", "16_9.zip"))
    RESULTS = {
        "victory":
        imageops.imread(
            os.path.join(os.path.dirname(__file__), "data", "victory.png"), 0),
        "defeat":
        imageops.imread(
            os.path.join(os.path.dirname(__file__), "data", "defeat.png"), 0),
    }
    RESULT_TEMPLATE_REQUIRED_MATCH = 0.8

    # TAB_SELECTED_TEMPLATE = np.array([0] * 50 + [3] * 73 + [5] * 24 + [3] * 73 + [0] * 50)
    # TABS = [
    #     ('summary', 240),
    #     ('scoreboard', 335)
    # ]
    SCOREBOARD_SORT_MODES = [
        textops.strip_string("Individually Sorted").upper(),
        textops.strip_string("Grouped By Team").upper(),
    ]

    AGENT_TEMPLATES = {
        name: load_agent_template(
            os.path.join(os.path.dirname(__file__), "data", "agents",
                         name.lower() + ".png"))
        for name in agents
    }
    AGENT_TEMPLATE_REQUIRED_MATCH = 50

    def process(self, frame: Frame) -> bool:
        result_y = self.REGIONS["result"].extract_one(frame.image_yuv[:, :, 0])
        _, result_thresh = cv2.threshold(result_y, 220, 255, cv2.THRESH_BINARY)
        match, result = imageops.match_templates(
            result_thresh,
            self.RESULTS,
            cv2.TM_CCORR_NORMED,
            required_match=self.RESULT_TEMPLATE_REQUIRED_MATCH,
            previous_match_context=(self.__class__.__name__, "result"),
        )

        if match > self.RESULT_TEMPLATE_REQUIRED_MATCH:
            logger.debug(f"Round result is {result} with match={match}")

            score_ims = self.REGIONS["scores"].extract(frame.image)
            score_gray = [
                imageops.normalise(np.max(im, axis=2)) for im in score_ims
            ]
            scores = imageops.tesser_ocr_all(
                score_gray,
                expected_type=int,
                engine=din_next_regular_digits,
                invert=True,
            )
            logger.debug(f"Round score is {scores}")

            frame.valorant.postgame = Postgame(
                victory=result == "victory",
                score=(scores[0], scores[1]),
                map=imageops.ocr_region(frame, self.REGIONS, "map"),
                game_mode=imageops.ocr_region(frame, self.REGIONS,
                                              "game_mode"),
                image=lazy_upload("postgame",
                                  self.REGIONS.blank_out(frame.image),
                                  frame.timestamp),
            )
            draw_postgame(frame.debug_image, frame.valorant.postgame)

            sort_mode_gray = np.min(
                self.SCOREBOARD_REGIONS["scoreboard_sort_mode"].extract_one(
                    frame.image),
                axis=2)
            sort_mode_filt = 255 - imageops.normalise(sort_mode_gray,
                                                      bottom=75)
            # cv2.imshow('sort_mode_gray', sort_mode_gray)
            sort_mode = imageops.tesser_ocr(sort_mode_filt,
                                            engine=imageops.tesseract_lstm)

            sort_mode_match = max([
                levenshtein.ratio(
                    textops.strip_string(sort_mode).upper(), expected)
                for expected in self.SCOREBOARD_SORT_MODES
            ])
            logger.debug(
                f"Got scoreboard sort mode: {sort_mode!r} match={sort_mode_match:.2f}"
            )

            if sort_mode_match > 0.75:
                frame.valorant.scoreboard = self._parse_scoreboard(frame)
                draw_scoreboard(frame.debug_image, frame.valorant.scoreboard)

            return True

        return False

    def _parse_scoreboard(self, frame: Frame) -> Scoreboard:
        agent_images = self.SCOREBOARD_REGIONS["agents"].extract(frame.image)

        name_images = self.SCOREBOARD_REGIONS["names"].extract(frame.image)

        stat_images = self.SCOREBOARD_REGIONS["stats"].extract(frame.image)
        stat_images_filt = [
            self._filter_statrow_image(im) for im in stat_images
        ]
        stat_image_rows = [
            stat_images_filt[r * 8:(r + 1) * 8] for r in range(10)
        ]

        # cv2.imshow(
        #     'stats',
        #     np.vstack([
        #         np.hstack([self._filter_statrow_image(n)] + r)
        #         for n, r in zip(name_images, stat_image_rows)
        #     ])
        # )

        stats = []
        for i, (agent_im, name_im, stat_row) in enumerate(
                zip(agent_images, name_images, stat_image_rows)):
            agent_match, agent = imageops.match_templates(
                agent_im,
                self.AGENT_TEMPLATES,
                method=cv2.TM_SQDIFF,
                required_match=self.AGENT_TEMPLATE_REQUIRED_MATCH,
                use_masks=True,
                previous_match_context=(self.__class__.__name__, "scoreboard",
                                        "agent", i),
            )
            if agent_match > self.AGENT_TEMPLATE_REQUIRED_MATCH:
                agent = None

            row_bg = name_im[np.max(name_im, axis=2) < 200]
            row_color = np.median(row_bg, axis=0).astype(np.int)

            # cv2.imshow('name', self._filter_statrow_image(name_im))
            # cv2.waitKey(0)
            stat = PlayerStats(
                agent,
                imageops.tesser_ocr(
                    self._filter_statrow_image(name_im),
                    engine=imageops.tesseract_lstm,
                ),
                row_color[0] > row_color[2],
                *imageops.tesser_ocr_all(
                    stat_row,
                    expected_type=int,
                    engine=din_next_regular_digits,
                ),
            )
            stats.append(stat)
            logger.debug(
                f"Got player stats: {stat} - agent match={agent_match:.2f}, row colour={tuple(row_color)}"
            )

        return Scoreboard(
            stats,
            image=lazy_upload("scoreboard",
                              self.SCOREBOARD_REGIONS.blank_out(frame.image),
                              frame.timestamp),
        )

    def _filter_statrow_image(self, im):
        im_gray = np.min(im, axis=2).astype(np.float)
        bgcol = np.percentile(im_gray, 90)
        im_norm = im_gray - bgcol
        im_norm = im_norm / np.max(im_norm)
        im = 255 - np.clip(im_norm * 255, 0, 255).astype(np.uint8)
        return im
コード例 #20
0
class TimerProcessor(Processor):

    REGIONS = ExtractionRegionsCollection(os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))
    SPIKE_PLANTED_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "spike_planted.png"), 0
    )
    SPIKE_PLANTED_REQUIRED_MATCH = 0.5
    BUY_PHASE_TEMPLATE = imageops.imread(os.path.join(os.path.dirname(__file__), "data", "buy_phase.png"), 0)

    def process(self, frame: Frame) -> bool:
        # timer_y = self.REGIONS['timer'].extract_one(frame.image_yuv[:, :, 0])
        # _, timer_y_thresh = cv2.threshold(timer_y, 230, 255, cv2.THRESH_BINARY)

        spike_planted_im = self.REGIONS["spike_planted"].extract_one(frame.image)
        spike_planted_thresh = cv2.inRange(
            spike_planted_im,
            (0, 0, 130),
            (10, 10, 250),
        )
        # cv2.imshow('spike_planted_im', spike_planted_im)
        # cv2.imshow('spike_planted_thresh', spike_planted_thresh)
        # cv2.imshow('SPIKE_PLANTED_TEMPLATE', self.SPIKE_PLANTED_TEMPLATE)
        spike_planted_match = np.max(
            cv2.matchTemplate(
                spike_planted_thresh,
                self.SPIKE_PLANTED_TEMPLATE,
                cv2.TM_CCORR_NORMED,
            )
        )
        logger.debug(f"Spike planted match: {spike_planted_match:.2f}")
        spike_planted = bool(spike_planted_match > self.SPIKE_PLANTED_REQUIRED_MATCH)

        if spike_planted:
            buy_phase = False
        else:
            buy_phase_gray = np.min(self.REGIONS["buy_phase"].extract_one(frame.image), axis=2)
            buy_phase_norm = imageops.normalise(buy_phase_gray, bottom=80)
            # cv2.imshow('buy_phase_norm', buy_phase_norm)
            buy_phase_match = np.max(
                cv2.matchTemplate(buy_phase_norm, self.BUY_PHASE_TEMPLATE, cv2.TM_CCORR_NORMED)
            )
            logger.debug(f"Buy phase match: {buy_phase_match}")
            buy_phase = buy_phase_match > 0.9

        countdown_text = None
        if not spike_planted:
            countdown_gray = np.min(self.REGIONS["timer"].extract_one(frame.image), axis=2)
            countdown_norm = 255 - imageops.normalise(countdown_gray, bottom=80)
            # debugops.test_tesser_engines(
            #     countdown_norm
            # )
            countdown_text = imageops.tesser_ocr(
                countdown_norm,
                # whitelist=string.digits + ':.',
                engine=imageops.tesseract_only,
            )

            if len(countdown_text) > 6:
                countdown_text = None

        frame.valorant.timer = Timer(
            spike_planted=spike_planted,
            buy_phase=buy_phase,
            countdown=countdown_text,
        )
        draw_timer(frame.debug_image, frame.valorant.timer)
        return frame.valorant.timer.valid
コード例 #21
0
class ScoreProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))

    RESULTS = ["VICTORY", "DEFEAT", "DRAW"]

    ROUND_N_COMPLETE = re.compile(r"R[DO]UN[DO]([0-9O]{1,2})COMPLETE")

    def process(self, frame: Frame) -> bool:
        if self.detect_score_screen(frame):
            logger.debug(
                f"Matched score screen with match={frame.overwatch.score_screen_match}"
            )

            score = self.parse_score_screen(frame)
            if score:
                frame.overwatch.score_screen = score

            return True
        elif self.detect_final_score(frame):
            logger.debug(
                f"Matched final score with match={frame.overwatch.final_score_match}"
            )

            final = self.parse_final_score(frame)
            if final:
                frame.overwatch.final_score = final

            return True

        return False

    def parse_score_screen(self, frame: Frame) -> Optional[ScoreScreen]:
        score_ims = self.REGIONS["score"].extract(frame.image)
        try:
            blue_score, red_score = big_noodle.ocr_all_int(score_ims,
                                                           height=212)
        except ValueError as ve:
            logger.warning(f"Failed to parse scores: {ve}")
            return None

        logger.debug(f"Got score {blue_score} / {red_score}")

        # manual thresholding
        im = self.REGIONS["round_text"].extract_one(frame.image)
        # debugops.manual_thresh_otsu(im)
        # im = np.min(im, axis=2)
        # _, thresh = cv2.threshold(im, imageops.otsu_thresh(im, 200, 255), 255, cv2.THRESH_BINARY)
        # round_text = big_noodle.ocr(thresh, threshold=None, height=70, debug=True)
        round_text = big_noodle.ocr(im,
                                    channel="min",
                                    threshold="otsu_above_mean",
                                    height=72,
                                    debug=False)

        round_number = None
        match = self.ROUND_N_COMPLETE.match(round_text)
        if match:
            round_number = int(match.group(1).replace("O", "0"))
            logger.debug(
                f"Got round {round_number} from round string {round_text!r}")
        else:
            logger.warning(
                f"Could not parse round from round string {round_text!r}")

        return ScoreScreen(blue_score, red_score, round_number)

    def parse_final_score(self, frame: Frame) -> Optional[FinalScore]:
        score_ims = self.REGIONS["final_score"].extract(frame.image)
        score_ims = [
            imageops.otsu_thresh_lb_fraction(im, 1.4) for im in score_ims
        ]
        try:
            blue_score, red_score = big_noodle.ocr_all_int(score_ims,
                                                           channel=None,
                                                           threshold=None,
                                                           height=127)
        except ValueError as ve:
            logger.warning(f"Failed to parse final score: {ve}")
            return None

        logger.debug(f"Got final score {blue_score} / {red_score}")

        im = cv2.cvtColor(
            self.REGIONS["final_result_text"].extract_one(frame.image),
            cv2.COLOR_BGR2HSV_FULL)
        thresh = cv2.inRange(im, (0, 0, 240), (255, 255, 255))
        result_text = big_noodle.ocr(thresh,
                                     channel=None,
                                     threshold=None,
                                     height=120,
                                     debug=False)
        matches = textops.matches(result_text, self.RESULTS)
        result: Optional[str]
        if np.min(matches) > 2:
            if blue_score is not None and red_score is not None:
                if blue_score > red_score:
                    result = "VICTORY"
                elif red_score > blue_score:
                    result = "DEFEAT"
                else:
                    result = "DRAW"
                logger.warning(
                    f"Could not identify result from {result_text!r} (match={np.min(matches)}) - "
                    f"using score {blue_score}, {red_score} to infer result={result}"
                )
            else:
                logger.warning(
                    f"Could not identify result from {result_text!r} (match={np.min(matches)}) and did not parse scores"
                )
                result = None
        else:
            result = self.RESULTS[arrayops.argmin(matches)]
            logger.debug(f"Got result {result} from {result_text!r}")

        return FinalScore(blue_score, red_score, result)

    COMPLETE_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data",
                     "complete_template.png"), 0)
    COMPLETE_TEMPLATE_THRESH = 0.6

    def detect_score_screen(self, frame: Frame) -> bool:
        text_region = self.REGIONS["complete_text"].extract_one(frame.image)
        text_region = cv2.resize(text_region, (0, 0), fx=0.5, fy=0.5)
        _, thresh = cv2.threshold(np.min(text_region, 2), 200, 255,
                                  cv2.THRESH_BINARY)
        frame.overwatch.score_screen_match = round(
            1 - float(
                np.min(
                    cv2.matchTemplate(thresh, self.COMPLETE_TEMPLATE,
                                      cv2.TM_SQDIFF_NORMED))), 5)
        return frame.overwatch.score_screen_match > self.COMPLETE_TEMPLATE_THRESH

    FINAL_SCORE_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data",
                     "final_score_template.png"), 0)
    FINAL_SCORE_TEMPLATE_THRESH = 0.6

    def detect_final_score(self, frame: Frame) -> bool:
        text_region = self.REGIONS["final_score_text"].extract_one(frame.image)
        text_region = cv2.resize(text_region, (0, 0), fx=0.75, fy=0.75)
        thresh = imageops.otsu_thresh_lb_fraction(text_region, 0.8)
        frame.overwatch.final_score_match = round(
            1 - float(
                np.min(
                    cv2.matchTemplate(thresh, self.FINAL_SCORE_TEMPLATE,
                                      cv2.TM_SQDIFF_NORMED))), 5)
        return frame.overwatch.final_score_match > self.FINAL_SCORE_TEMPLATE_THRESH
コード例 #22
0
class MatchStatusProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "..", "..", "data", "regions",
                     "16_9.zip"))
    HEAD_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "head.png"), 0)
    SKULL_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "skull.png"), 0)
    RANK_TEMPLATES = [(
        rank,
        imageops.imread(
            os.path.join(os.path.dirname(__file__), "data", "ranks",
                         rank + ".png")),
        cv2.cvtColor(
            imageops.imread(
                os.path.join(os.path.dirname(__file__), "data", "ranks",
                             rank + ".png"),
                -1,
            )[:, :, 3],
            cv2.COLOR_GRAY2BGR,
        ),
    ) for rank in data.ranks]
    MODE_TEMPLATES = [(
        mode,
        imageops.imread(
            os.path.join(os.path.dirname(__file__), "data", "modes",
                         mode + ".png")),
        cv2.cvtColor(
            imageops.imread(
                os.path.join(os.path.dirname(__file__), "data", "modes",
                             mode + ".png"),
                -1,
            )[:, :, 3],
            cv2.COLOR_GRAY2BGR,
        ),
    ) for mode in ["duos"]]
    SUBS = [
        "?2",
        "O0",
        "L1",
        "I1",
        "B6",
    ]

    def __init__(self):
        super().__init__()
        self.last_rank_template = 0

    def eager_load(self):
        self.REGIONS.eager_load()

    def process(self, frame: Frame):
        y = cv2.cvtColor(frame.image, cv2.COLOR_BGR2YUV)[:, :, 0]

        # The text moves depending on normal or elite queue
        # Look for the "head" template showing players alive
        head_region = np.max(self.REGIONS["head_region"].extract_one(
            frame.image),
                             axis=2)
        _, head_thresh = cv2.threshold(head_region, 200, 255,
                                       cv2.THRESH_BINARY)
        head_match = cv2.matchTemplate(head_thresh, self.HEAD_TEMPLATE,
                                       cv2.TM_CCORR_NORMED)
        mnv, mxv, mnl, mxl = cv2.minMaxLoc(head_match)
        frame.apex.match_status_match = round(float(mxv), 2)
        if mxv < 0.9:
            return False

        badge_image = self.REGIONS["rank_badge"].extract_one(frame.image)
        # cv2.imshow('rank_badge_image', badge_image)
        # print(rank_badge_matches)

        # 90 for unranked, 15 for ranked
        has_badge = mxl[0] < 30

        mode = None
        if has_badge:
            mode_badge_matches = self._parse_badge(badge_image,
                                                   self.MODE_TEMPLATES)
            if mode_badge_matches[0] < 750:
                mode = "duos"

        squads_left_text = self._parse_squads_left_text(y, has_badge)
        squads_left = self._get_squads_left(squads_left_text, mode)
        if not squads_left:
            mode = "solos"
            solos_players_left = self._get_squads_left(squads_left_text, mode)
        else:
            solos_players_left = None

        if not mode and has_badge:
            mode = "ranked"

        if mode == "ranked":
            rank_badge_matches = self._parse_badge(badge_image,
                                                   self.RANK_TEMPLATES)
            rank_text_image = self.REGIONS["rank_text"].extract_one(
                frame.image_yuv[:, :, 0])
            rank_text = imageops.tesser_ocr(
                rank_text_image,
                whitelist="IV",
                scale=3,
                invert=True,
                engine=imageops.tesseract_only,
            )
            rp_text_image = self.REGIONS["ranked_rp"].extract_one(
                frame.image_yuv[:, :, 0])
            rp_text = imageops.tesser_ocr(
                rp_text_image,
                whitelist=string.digits + "+-RP",
                scale=3,
                invert=True,
                engine=imageops.tesseract_only,
            )
        else:
            rank_badge_matches = None
            rank_text = None
            rp_text = None

        frame.apex.match_status = MatchStatus(
            squads_left=squads_left,
            players_alive=self._get_players_alive(y, has_badge)
            if squads_left and squads_left > 4 else None,
            kills=self._get_kills(y, mode),
            ranked=mode == "ranked",
            rank_badge_matches=rank_badge_matches,
            rank_text=rank_text,
            rp_text=rp_text,
            solos_players_left=solos_players_left,
            mode=mode,
        )
        self.REGIONS.draw(frame.debug_image)
        _draw_status(frame.debug_image, frame.apex.match_status)
        return True

    def _parse_squads_left_text(self, luma: np.ndarray,
                                has_badge: bool) -> str:
        prefix = "ranked_" if has_badge else ""
        region = self.REGIONS[prefix + "squads_left"].extract_one(luma)
        squads_left_text = imageops.tesser_ocr(region,
                                               engine=imageops.tesseract_lstm,
                                               scale=2,
                                               invert=True).upper()
        squads_left_text = ("".join(c for c in squads_left_text
                                    if c in string.ascii_uppercase +
                                    string.digits + " ").strip().replace(
                                        "B", "6"))
        return squads_left_text

    def _get_squads_left(self,
                         squads_left_text: str,
                         mode: Optional[str] = None) -> Optional[int]:
        expected_text = "SQUADSLEFT"
        expected_max_squads = 30
        if mode == "solos":
            expected_text = "PLAYERSLEFT"
            expected_max_squads = 60
        elif mode == "duos":
            expected_max_squads = 30

        text_match = levenshtein.ratio(squads_left_text[2:].replace(" ", ""),
                                       expected_text)
        if text_match > 0.8:
            number_text = squads_left_text[:3].split(" ", 1)[0]
            for s1, s2 in self.SUBS:
                number_text = number_text.replace(s1, s2)
            try:
                squads_left = int(number_text)
            except ValueError:
                logger.warning(
                    f"Failed to parse {number_text!r} as int - extracted from {squads_left_text!r}"
                )
                return None
            else:
                if 2 <= squads_left <= expected_max_squads:
                    return squads_left
                else:
                    logger.warning(
                        f"Got squads_left={squads_left} - rejecting. Extracted from {squads_left_text!r}"
                    )
                    return None
        elif text_match > 0.6:
            logger.warning(
                f'Refusing to parse "{squads_left_text} as squads left - match={text_match}'
            )
            return None
        else:
            return None

    def _get_players_alive(self, luma: np.ndarray,
                           has_badge: bool) -> Optional[int]:
        prefix = "ranked_" if has_badge else ""
        region = self.REGIONS[prefix + "alive"].extract_one(luma)
        players_alive = imageops.tesser_ocr(
            region,
            engine=ocr.tesseract_ttlakes_digits,
            scale=4,
            expected_type=int)
        # shows a '?' if below 10
        if players_alive and 10 <= players_alive <= 60:
            return players_alive
        else:
            logger.warning(f"Rejecting players_alive={players_alive}")
            return None

    def _get_kills(self, luma: np.ndarray, mode: str) -> Optional[int]:
        prefix = (mode + "_") if mode else ""
        key = prefix + "kills"
        if key not in self.REGIONS.regions:
            key = "kills"
        region = self.REGIONS[key].extract_one(luma)
        _, kills_thresh = cv2.threshold(region, 0, 255,
                                        cv2.THRESH_BINARY | cv2.THRESH_OTSU)
        kills_thresh = cv2.copyMakeBorder(kills_thresh,
                                          5,
                                          5,
                                          0,
                                          5,
                                          cv2.BORDER_CONSTANT,
                                          value=0)
        match = cv2.matchTemplate(kills_thresh, self.SKULL_TEMPLATE,
                                  cv2.TM_CCORR_NORMED)
        mn, mx, mnloc, mxloc = cv2.minMaxLoc(match)
        if mx > 0.9:
            kills_image = region[:, mxloc[0] + self.SKULL_TEMPLATE.shape[1]:]
            # cv2.imshow('kills', cv2.resize(kills_image, (100, 100)))

            kills_text = (imageops.tesser_ocr(kills_image,
                                              engine=imageops.tesseract_lstm,
                                              scale=2,
                                              invert=True).upper().strip())
            for s1, s2 in self.SUBS:
                kills_text = kills_text.replace(s1, s2)
            try:
                kills = int(kills_text)
                if 0 < kills <= 50:
                    return kills
                else:
                    logger.warning(f"Rejecting kills={kills}")
                    return None
            except ValueError:
                logger.warning(f"Cannot parse kills={kills_text!r} as int")
                return None
        else:
            return None

    def _parse_badge(
            self, badge_image: np.ndarray,
            badges: List[Tuple[str, np.ndarray,
                               np.ndarray]]) -> Tuple[float, ...]:
        matches = []
        for rank, template, mask in badges:
            match = np.min(
                matchTemplate(badge_image, template, cv2.TM_SQDIFF, mask=mask))
            matches.append(round(match, 1))
        return tuple(matches)
コード例 #23
0
class KillfeedProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(
        os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))

    FRIENDLY_KILL_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "friendly_kill.png"),
        0)
    ENEMY_KILL_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "enemy_kill.png"), 0)
    KILL_THRESHOLD = 0.95

    AGENT_DEATH_TEMPLATES: Dict[AgentName, Tuple[np.ndarray, np.ndarray]] = {
        name: load_agent_template(name)
        for name in agents
    }
    AGENT_KILLER_TEMPLATES: Dict[AgentName, Tuple[np.ndarray, np.ndarray]] = {
        n: (a[0][:, ::-1], a[1][:, ::-1])
        for n, a in AGENT_DEATH_TEMPLATES.items()
    }
    AGENT_THRESHOLD = 0.1

    WEAPON_NAMES = [
        "classic",
        "shorty",
        "frenzy",
        "ghost",
        "sheriff",
        "stinger",
        "spectre",
        "bucky",
        "judge",
        "bulldog",
        "guardian",
        "phantom",
        "vandal",
        "marshal",
        "operator",
        "ares",
        "odin",
        "knife",
        "brimstone.incendiary",
        "brimstone.orbital_strike",
        "jett.blade_storm",
        "phoenix.blaze",
        "phoenix.hot_hands",
        "raze.blast_pack",
        "raze.boom_bot",
        "raze.paint_shells",
        "raze.showstopper",
        "sova.hunters_fury",
        "sova.shock_dart",
        "breach.aftershock",
        "viper.snake_bite",
    ]
    WEAPON_IMAGES = {
        n: imageops.imread(
            os.path.join(os.path.dirname(__file__), "data", "weapons",
                         n + ".png"), 0)
        for n in WEAPON_NAMES
    }
    for n, im in WEAPON_IMAGES.items():
        assert im.shape[
            1] <= 145, f"{n} image dimensions too high: {im.shape[1]}"
    WEAPON_TEMPLATES = {
        w: cv2.GaussianBlur(
            cv2.dilate(
                cv2.copyMakeBorder(image, 5, 35 - image.shape[0], 5,
                                   145 - image.shape[1], cv2.BORDER_CONSTANT),
                None,
            ),
            (0, 0),
            0.5,
        )
        for w, image in WEAPON_IMAGES.items()
    }
    WEAPON_THRESHOLD = 0.85

    WALLBANG_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "kill_modifiers",
                     "wallbang.png"), 0)
    HEADSHOT_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "kill_modifiers",
                     "headshot.png"), 0)
    KILL_MODIFIER_THRESHOLD = 0.75

    def process(self, frame: Frame) -> bool:
        x, y, w, h = self.REGIONS["killfeed"].regions[0]
        region = self.REGIONS["killfeed"].extract_one(frame.image)

        h, s, v = cv2.split(cv2.cvtColor(region, cv2.COLOR_BGR2HSV_FULL))
        h -= 50
        # cv2.imshow('h', h)
        # cv2.imshow('s', s)
        # cv2.imshow('v', v)

        friendly_kill_match = cv2.matchTemplate(h, self.FRIENDLY_KILL_TEMPLATE,
                                                cv2.TM_CCORR_NORMED)
        enemy_kill_match = cv2.matchTemplate(h, self.ENEMY_KILL_TEMPLATE,
                                             cv2.TM_CCORR_NORMED)
        kill_match = np.max(
            np.stack((friendly_kill_match, enemy_kill_match), axis=-1),
            axis=2,
        )

        kill_rows = []

        for i in range(9):
            mnv, mxv, mnl, mxl = cv2.minMaxLoc(kill_match)
            if mxv < self.KILL_THRESHOLD:
                break

            kill_match[
                max(0, mxl[1] - self.FRIENDLY_KILL_TEMPLATE.shape[0] //
                    2):min(mxl[1] + self.FRIENDLY_KILL_TEMPLATE.shape[0] //
                           2, kill_match.shape[0]),
                max(0, mxl[0] - self.FRIENDLY_KILL_TEMPLATE.shape[1] //
                    2):min(mxl[0] + self.FRIENDLY_KILL_TEMPLATE.shape[1] //
                           2, kill_match.shape[1]), ] = 0

            center = (
                int(mxl[0] + x + 20),
                int(mxl[1] + y + self.FRIENDLY_KILL_TEMPLATE.shape[0] // 2),
            )
            friendly_kill_v = friendly_kill_match[mxl[1], mxl[0]]
            enemy_kill_v = enemy_kill_match[mxl[1], mxl[0]]
            logger.debug(
                f"Found kill match at {center}: friendly_kill_v={friendly_kill_v:.4f}, enemy_kill_v={enemy_kill_v:.4f}"
            )

            kill_rows.append(
                KillRowPosition(
                    index=i,
                    match=round(float(mxv), 4),
                    center=center,
                    friendly=bool(friendly_kill_v > enemy_kill_v),
                ))

        kill_rows.sort(key=lambda r: r.center[1])
        if len(kill_rows):
            kills = []

            for row in kill_rows:
                killed_agent, killed_agent_match, killed_agent_x = self._parse_agent(
                    frame, row, True)
                if killed_agent_match > self.AGENT_THRESHOLD * 2:
                    continue

                killer_agent, killer_agent_match, killer_agent_x = self._parse_agent(
                    frame, row, False)
                if killer_agent_match > self.AGENT_THRESHOLD * 2:
                    continue

                if killed_agent_match > self.AGENT_THRESHOLD and killer_agent_match > self.AGENT_THRESHOLD:
                    # both invalid - dont bother logging
                    continue
                elif killed_agent_match > self.AGENT_THRESHOLD:
                    logger.warning(
                        f"Ignoring kill {row} - killed_agent_match={killed_agent_match:.1f} ({killed_agent})"
                    )
                    continue
                elif killer_agent_match > self.AGENT_THRESHOLD:
                    logger.warning(
                        f"Ignoring kill {row} - killer_agent_match={killer_agent_match:.1f} ({killer_agent})"
                    )
                    continue

                killed_name = self._parse_killed_name(frame, row,
                                                      killed_agent_x)
                if killed_name is None:
                    logger.warning(
                        f"Ignoring kill {row} - killed name failed to parse")
                    continue

                weapon, weapon_match, wallbang_match, headshot_match, weapon_x = self._parse_weapon(
                    frame, row, killer_agent_x, killer_agent)

                killer_name = self._parse_killer_name(frame, row,
                                                      killer_agent_x, weapon_x)
                if killer_name is None:
                    logger.warning(
                        f"Ignoring kill {row} - killer name failed to parse")
                    continue

                kill = Kill(
                    y=int(row.center[1]),
                    row_match=round(float(row.match), 4),
                    killer_friendly=row.friendly,
                    killer=KillfeedPlayer(
                        agent=killer_agent,
                        agent_match=round(killer_agent_match, 4),
                        name=killer_name,
                    ),
                    killed=KillfeedPlayer(
                        agent=killed_agent,
                        agent_match=round(killed_agent_match, 4),
                        name=killed_name,
                    ),
                    weapon=weapon,
                    weapon_match=round(weapon_match, 2),
                    wallbang=wallbang_match > self.KILL_MODIFIER_THRESHOLD,
                    wallbang_match=round(wallbang_match, 4),
                    headshot=headshot_match > self.KILL_MODIFIER_THRESHOLD,
                    headshot_match=round(headshot_match, 4),
                )
                kills.append(kill)
                logger.debug(f"Got kill: {kill}")

                if frame.debug_image is not None:
                    s = (
                        f"{row.match:.2f} | "
                        f"{killer_agent} ({killer_agent_match:.4f}) {killer_name!r} >"
                        f' {weapon} {"* " if kill.headshot else ""}{"- " if kill.wallbang else ""}> '
                        f"{killed_agent} ({killed_agent_match:.4f}) {killed_name!r}"
                    )
                    (w, _), _ = cv2.getTextSize(s, cv2.FONT_HERSHEY_SIMPLEX,
                                                0.6, 1)
                    for c, t in ((0, 0, 0), 3), ((0, 255, 128), 1):
                        cv2.putText(
                            frame.debug_image,
                            s,
                            (killer_agent_x - (w + 35), row.center[1] + 5),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            0.6,
                            c,
                            t,
                        )

            if len(kills):
                frame.valorant.killfeed = Killfeed(kills=kills, )

                draw_weapon_templates(frame.debug_image, self.WEAPON_TEMPLATES)

                return True

        return False

    _last_image_id = None
    _last_image_names = set()

    def _get_region(self,
                    image,
                    y1,
                    y2,
                    x1,
                    x2,
                    c=None,
                    debug_name=None,
                    debug_image=None):
        if y1 < 0:
            y1 = image.shape[0] + y1
        if y2 < 0:
            y2 = image.shape[0] + y2
        if x1 < 0:
            x1 = image.shape[1] + x1
        if x2 < 0:
            x2 = image.shape[1] + x2
        if debug_image is not None:
            co = str2col(debug_name)
            cv2.rectangle(
                debug_image,
                (x1, y1),
                (x2, y2),
                co,
            )
            if id(debug_image) != self._last_image_id:
                self._last_image_names.clear()
                self._last_image_id = id(debug_image)
            if debug_name and debug_name not in self._last_image_names:
                self._last_image_names.add(debug_name)
                for col, th in ((0, 0, 0), 3), (co, 1):
                    cv2.putText(
                        debug_image,
                        debug_name,
                        (x1, y1 - 5),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.5,
                        col,
                        th,
                    )
        region = image[y1:y2, x1:x2, ]
        if c is not None:
            region = region[:, :, c]
        return region

    def _parse_agent(self, frame: Frame, row: KillRowPosition,
                     agent_death: bool) -> Tuple[AgentName, float, int]:
        if agent_death:
            region_x = frame.image.shape[1] - 120
            agent_im = self._get_region(
                frame.image,
                row.center[1] - 20,
                row.center[1] + 20,
                -120,
                -35,
                debug_name="killed_agent",
                debug_image=frame.debug_image,
            )
        else:
            region_x = frame.image.shape[1] - 600
            agent_im = self._get_region(
                frame.image,
                row.center[1] - 20,
                row.center[1] + 20,
                -600,
                row.center[0] - 60,
                debug_name="killer_agent",
                debug_image=frame.debug_image,
            )

        import matplotlib.pyplot as plt

        # cv2.imwrite(f'C:/tmp/agents2/{region_x}.png', agent_im)
        agent_matches = {}
        agent_match_m = []
        t = None
        for a, t in [self.AGENT_KILLER_TEMPLATES,
                     self.AGENT_DEATH_TEMPLATES][agent_death].items():
            match = cv2.matchTemplate(agent_im,
                                      t[0],
                                      cv2.TM_SQDIFF_NORMED,
                                      mask=t[1])
            agent_matches[a] = match
            agent_match_m.append(match)
            # print(a, f'{np.min(match):,}')
            # cv2.imshow(a, t[0])

        import matplotlib.pyplot as plt

        plt.figure()
        plt.imshow(np.vstack(agent_match_m))
        for i, a in enumerate(agents):
            plt.text(-40, int((i + 0.5) * match.shape[0]), a)
            plt.text(
                match.shape[1],
                int((i + 0.5) * match.shape[0]),
                f"{np.min(agent_matches[a]):,}".rjust(12),
                fontdict={"family": "monospace"},
            )
        plt.show()

        agent_match_m = np.min(np.stack(agent_match_m, axis=-1), axis=2)
        mnv, mxv, mnl, mxl = cv2.minMaxLoc(agent_match_m)
        # print(mnv, mnl)

        # print(agent_matches)
        # print(list(zip(self.AGENT_DEATH_TEMPLATES.keys(), agent_match_m)))
        # import matplotlib.pyplot as plt

        # plt.figure()
        # t = self.AGENT_DEATH_TEMPLATES['Breach']
        # plt.imshow(cv2.matchTemplate(agent_im, t[0], cv2.TM_SQDIFF, mask=t[1]))
        # plt.show()

        # plt.figure()
        # plt.imshow(cv2.cvtColor(agent_im, cv2.COLOR_BGR2RGB))
        # plt.figure()
        # plt.imshow(cv2.cvtColor(np.hstack([v[0] for v in self.AGENT_DEATH_TEMPLATES.values()]), cv2.COLOR_BGR2RGB))
        # plt.show()

        agent, agent_match = None, float("inf")
        for a, m in agent_matches.items():
            v = m[mnl[1], mnl[0]]
            if v < agent_match:
                agent_match = v
                agent = a

        return agent, float(agent_match), int(region_x + mnl[0])

    def _parse_killed_name(self, frame, row, killed_agent_x) -> Optional[str]:
        killed_name_gray = self._get_region(
            frame.image_yuv,
            row.center[1] - 10,
            row.center[1] + 10,
            row.center[0] + 10,
            killed_agent_x - 10,
            0,
            debug_name="killed_name",
            debug_image=frame.debug_image,
        )
        if killed_name_gray.shape[1] == 0:
            return None
        killed_name_norm = 255 - imageops.normalise(killed_name_gray, min=170)
        return textops.strip_string(
            imageops.tesser_ocr(killed_name_norm,
                                engine=imageops.tesseract_lstm).upper(),
            alphabet=string.ascii_uppercase + string.digits + "# ",
        )

    def _parse_weapon(
            self, frame, row, killer_agent_x,
            killer_agent) -> Tuple[Optional[str], float, float, float, int]:
        weapon_region_left = killer_agent_x + 60
        weapon_region_right = row.center[0] - 20
        weapon_gray = self._get_region(
            frame.image_yuv,
            row.center[1] - 15,
            row.center[1] + 17,
            weapon_region_left,
            weapon_region_right,
            0,
            debug_name="weapon",
            debug_image=frame.debug_image,
        )
        if weapon_gray.shape[1] == 0:
            return None, 0, 0, 0, weapon_region_right
        weapon_adapt_thresh = np.clip(
            np.convolve(np.percentile(weapon_gray, 10, axis=0),
                        [0.2, 0.6, 0.2],
                        mode="same"),
            160,
            200,
        )
        weapon_thresh = ((weapon_gray - weapon_adapt_thresh > 30) *
                         255).astype(np.uint8)

        kill_modifiers_thresh = weapon_thresh[:, -75:]
        _, wallbang_match, _, wallbang_loc = cv2.minMaxLoc(
            cv2.matchTemplate(kill_modifiers_thresh, self.WALLBANG_TEMPLATE,
                              cv2.TM_CCORR_NORMED))
        _, headshot_match, _, headshot_loc = cv2.minMaxLoc(
            cv2.matchTemplate(kill_modifiers_thresh, self.HEADSHOT_TEMPLATE,
                              cv2.TM_CCORR_NORMED))
        wallbang_match, headshot_match = float(wallbang_match), float(
            headshot_match)
        logger.debug(
            f"wallbang_match={wallbang_match:.2f}, headshot_match={headshot_match:.2f}"
        )

        right = weapon_thresh.shape[1] - 1
        if wallbang_match > self.KILL_MODIFIER_THRESHOLD:
            right = min(right, (weapon_thresh.shape[1] - 75) + wallbang_loc[0])
        if headshot_match > self.KILL_MODIFIER_THRESHOLD:
            right = min(right, (weapon_thresh.shape[1] - 75) + headshot_loc[0])
        if right != weapon_thresh.shape[1] - 1:
            logger.debug(f"Using right={right} (clipping kill modifier)")
            weapon_thresh = weapon_thresh[:, :right]

        # cv2.imwrite(f'C:/tmp/agents2/weap.png', weapon_thresh)

        # import matplotlib.pyplot as plt
        # f, figs = plt.subplots(4)
        # figs[0].imshow(weapon_gray)
        # figs[1].plot(weapon_adapt_thresh)
        # figs[2].imshow(weapon_gray - weapon_adapt_thresh)
        # figs[3].imshow(weapon_thresh)
        # plt.show()
        # cv2.imshow('weapon_thresh', weapon_thresh)

        weapon_image = cv2.dilate(
            cv2.copyMakeBorder(
                weapon_thresh,
                5,
                5,
                5,
                5,
                cv2.BORDER_CONSTANT,
            ),
            np.ones((2, 2)),
        )
        contours, hierarchy = imageops.findContours(weapon_image,
                                                    cv2.RETR_EXTERNAL,
                                                    cv2.CHAIN_APPROX_SIMPLE)
        contours_xywh = [(cnt, cv2.boundingRect(cnt)) for cnt in contours]

        best_weap_match, best_weap = 0, None

        for cnt, (x1, y1, w, h) in sorted(contours_xywh,
                                          key=lambda cnt_xywh: cnt_xywh[1][0],
                                          reverse=True):
            x2, y2 = x1 + w, y1 + h
            a = cv2.contourArea(cnt)

            fromright = weapon_image.shape[1] - x2

            ignore = False
            if w > 145:
                logger.warning(f"Ignoring weapon contour with w={w}")
                ignore = True
            if fromright < 30:
                # contour is far right - could be small agent ability, so be less strict
                if a < 100 or h < 10:
                    logger.debug(
                        f"Ignoring right weapon contour {cv2.boundingRect(cnt)}, fromright={fromright}, a={a}"
                    )
                    ignore = True
                else:
                    logger.debug(
                        f"Allowing potential ability contour {cv2.boundingRect(cnt)}, fromright={fromright}, a={a}"
                    )
            elif a < 200 or h < 16:
                # print('ignore', cv2.boundingRect(cnt), x2, a)
                logger.debug(
                    f"Ignoring weapon contour {cv2.boundingRect(cnt)}, fromright={fromright}, a={a}"
                )
                ignore = True

            if ignore:
                if frame.debug_image is not None and a > 1:
                    cv2.drawContours(
                        frame.debug_image,
                        [cnt],
                        -1,
                        (0, 128, 255),
                        1,
                        offset=(
                            weapon_region_left - 5,
                            row.center[1] - 20,
                        ),
                    )
                continue

            # Draw contour to image, padding l=5, r=10, t=2, b=2
            # The extra width padding prevents abilities matching small parts of large guns
            weapon_im = np.zeros((h + 4, w + 15), dtype=np.uint8)
            cv2.drawContours(
                weapon_im,
                [cnt],
                -1,
                255,
                -1,
                offset=(
                    -x1 + 5,
                    -y1 + 2,
                ),
            )
            if weapon_im.shape[1] > 150:
                weapon_im = weapon_im[:, :150]
            weapon_match, weapon = imageops.match_templates(
                weapon_im,
                {
                    w: t
                    for w, t in self.WEAPON_TEMPLATES.items() if "." not in w
                    or w.lower().startswith(killer_agent.lower() + ".")
                },
                cv2.TM_CCORR_NORMED,
                template_in_image=False,
                required_match=0.96,
                verbose=False,
            )
            if best_weap_match < weapon_match:
                best_weap_match, best_weap = weapon_match, weapon

            valid = weapon_match > self.WEAPON_THRESHOLD

            if frame.debug_image is not None and a > 1:
                cv2.drawContours(
                    frame.debug_image,
                    [cnt],
                    -1,
                    (128, 255, 0) if valid else (0, 0, 255),
                    1,
                    offset=(
                        weapon_region_left - 5,
                        row.center[1] - 20,
                    ),
                )

            if valid:
                if frame.debug_image is not None:
                    x, y = 600, row.center[1] - 15
                    frame.debug_image[y:y + weapon_thresh.shape[0], x:x +
                                      weapon_thresh.shape[1]] = cv2.cvtColor(
                                          weapon_thresh, cv2.COLOR_GRAY2BGR)
                    x -= weapon_im.shape[1] + 10
                    frame.debug_image[y:y + weapon_im.shape[0],
                                      x:x + weapon_im.shape[1]] = cv2.cvtColor(
                                          weapon_im, cv2.COLOR_GRAY2BGR)

                    cv2.line(
                        frame.debug_image,
                        (x, y + weapon_im.shape[0] // 2),
                        (450, self.WEAPON_NAMES.index(weapon) * 40 + 120),
                        (0, 255, 0),
                        2,
                        cv2.LINE_AA,
                    )

                return (
                    weapon,
                    float(weapon_match),
                    float(wallbang_match),
                    float(headshot_match),
                    int(weapon_region_left + x1),
                )

        logger.warning(
            f"Unable to find weapon - best match was {best_weap!r} match={best_weap_match:.2f}"
        )
        return None, 0, 0, 0, weapon_region_right

    def _parse_killer_name(self, frame, row, killer_agent_x,
                           weapon_x) -> Optional[str]:
        killer_name_gray = self._get_region(
            frame.image_yuv,
            row.center[1] - 10,
            row.center[1] + 10,
            killer_agent_x + 35,
            weapon_x - 10,
            0,
            debug_name="killer_name",
            debug_image=frame.debug_image,
        )
        if killer_name_gray.shape[1] == 0:
            return None
        killer_name_norm = 255 - imageops.normalise(killer_name_gray, min=170)
        killer_name = textops.strip_string(
            imageops.tesser_ocr(killer_name_norm,
                                engine=imageops.tesseract_lstm).upper(),
            alphabet=string.ascii_uppercase + string.digits + "#",
        )
        return killer_name
コード例 #24
0
class MenuProcessor(Processor):
    REGIONS = ExtractionRegionsCollection(os.path.join(os.path.dirname(__file__), "data", "regions", "16_9.zip"))

    PLACEMENT_MATCHES_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "placement_matches.png"), 0
    )
    PLACEMENT_MATCHES_TEMPLATE_THRESHOLD = 0.6

    def process(self, frame: Frame) -> bool:
        if frame.overwatch.main_menu or frame.overwatch.play_menu:
            return True

        self.REGIONS.draw(frame.debug_image)
        if self.detect_main_menu(frame):
            version_region = self.REGIONS["version"].extract_one(frame.image)
            thresh = imageops.otsu_thresh_lb_fraction(version_region, 0.75)
            version = imageops.tesser_ocr(thresh, whitelist=string.digits + ".-", invert=True, scale=4, blur=2)

            frame.overwatch.main_menu = MainMenu(version=version)

            _draw_main_menu(frame.debug_image, frame.overwatch.main_menu)

            return True

        elif self.detect_play_menu(frame):
            # placement_region = self.REGIONS['placement_matches'].extract_one(frame.image)
            # placement_region = cv2.cvtColor(placement_region, cv2.COLOR_BGR2GRAY)
            # _, thresh = cv2.threshold(placement_region, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
            # match = 1 - float(np.min(cv2.matchTemplate(thresh, self.PLACEMENT_MATCHES_TEMPLATE, cv2.TM_SQDIFF_NORMED)))
            # is_placements = match > self.PLACEMENT_MATCHES_TEMPLATE_THRESHOLD
            #
            # if not is_placements:
            #     group_sr_region = self.REGIONS['group_sr'].extract_one(frame.image)
            #     color_variance = np.mean(np.var(group_sr_region, axis=(0, 1)))
            #     if color_variance < 100:
            #         # only one color - maybe placement
            #         logger.warning(f'Got low color variance ({color_variance:.2f}) - ignoring parsed SR')
            #         sr = None
            #     else:
            #         sr = self.read_sr(frame)
            # else:
            #     sr = None
            #
            # frame.overwatch.play_menu = PlayMenu(
            #     placements=is_placements,
            #     sr=sr,
            #     image=lazy_upload(
            #         'sr_full',
            #         self.REGIONS['sr_full'].extract_one(frame.image),
            #         frame.timestamp
            #     )
            # )
            #
            # _draw_play_menu(frame.debug_image, frame.overwatch.play_menu)

            return True

        return False

    # def read_sr(self, frame):
    #     group_sr_region = self.REGIONS['group_sr'].extract_one(frame.image)
    #     personal_sr_region = self.REGIONS['personal_sr'].extract_one(frame.image)
    #
    #     # try read personal SR using hue - this can read behind text on raw images
    #     personal_sr_hue = 255 - cv2.cvtColor(personal_sr_region, cv2.COLOR_BGR2HSV_FULL)[:, :, 0]
    #     sr = self._parse_sr(personal_sr_hue, 'personal_hue')
    #     if sr:
    #         return sr
    #
    #     # try read group using grayscale - requires group leader
    #     sr = self._parse_sr(np.min(personal_sr_region, axis=2), 'personal')
    #     if sr:
    #         return sr
    #
    #     # try read "group" SR region - this will reject SR if it is actual group SR
    #     sr = self._parse_sr(np.min(group_sr_region, axis=2), 'group')
    #     if sr:
    #         return sr
    #
    #     return sr
    #
    # def _parse_sr(self, im: np.ndarray, typ: str) -> Optional[int]:
    #     _, thresh = cv2.threshold(im, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    #     thresh = cv2.dilate(thresh, np.ones((5, 5)))
    #
    #     labels, components = imageops.connected_components(thresh)
    #     components = sorted(components, key=lambda c: c.x)
    #     components = [
    #         c for c in components
    #         if c.y and c.x + c.w != im.shape[1] and c.y + c.h != im.shape[0] and
    #         (abs(c.h - 17) > 3 or c.w < 60) and
    #         c.x < 200
    #     ]
    #     if not len(components):
    #         logger.debug(f'{typ}: Got 0 components for sr')
    #         return None
    #
    #     leftmost = components[0].x + components[0].w
    #     if leftmost > 150:
    #         logger.warning(f'{typ}: Rank icon at {leftmost} - rejecting group SR')
    #         return None
    #
    #     logger.info(f'{typ}: Found rank icon at {leftmost}')
    #
    #     im = im[:, leftmost:leftmost + 150]
    #     result = imageops.tesser_ocr(im, int, engine=imageops.tesseract_only)
    #     logger.debug(f'{typ}: Parsed SR as {result}')
    #     if not result:
    #         result = imageops.tesser_ocr(im, int, engine=imageops.tesseract_lstm)
    #         logger.debug(f'{typ}: Parsed SR as {result}')
    #
    #     if result and 500 <= result <= 5000:
    #         return result
    #     else:
    #         logger.warning(f'{typ}: Got invalid SR: {result}')
    #         return None

    OVERWATCH_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "overwatch_template.png"), 0
    )
    OVERWATCH_TEMPLATE_THRESH = 0.6

    def detect_main_menu(self, frame: Frame) -> bool:
        text_region = self.REGIONS["overwatch_text"].extract_one(frame.image)
        text_region = cv2.resize(text_region, (0, 0), fx=0.5, fy=0.5)

        thresh = imageops.otsu_thresh_lb_fraction(text_region, 1.1)
        frame.overwatch.main_menu_match = round(
            1 - float(np.min(cv2.matchTemplate(thresh, self.OVERWATCH_TEMPLATE, cv2.TM_SQDIFF_NORMED))), 5
        )
        return frame.overwatch.main_menu_match > self.OVERWATCH_TEMPLATE_THRESH

    COMPETITIVE_TEMPLATE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "competitive_play.png"), 0
    )
    COMPETITIVE_TEMPLATE_LARGE = imageops.imread(
        os.path.join(os.path.dirname(__file__), "data", "competitive_play_large.png"), 0
    )
    COMPETITIVE_TEMPLATE_THRESH = 0.6

    def detect_play_menu(self, frame: Frame) -> bool:
        competitive_region = self.REGIONS["competitive_play"].extract_one(frame.image)
        competitive_region = cv2.resize(competitive_region, (0, 0), fx=0.5, fy=0.5)

        gray = np.min(competitive_region, axis=2)
        _, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
        match = 0.0
        for t in self.COMPETITIVE_TEMPLATE, self.COMPETITIVE_TEMPLATE_LARGE:
            match = max(match, round(1 - float(np.min(cv2.matchTemplate(thresh, t, cv2.TM_SQDIFF_NORMED))), 5))
        frame.overwatch.play_menu_match = match
        return frame.overwatch.play_menu_match > self.COMPETITIVE_TEMPLATE_THRESH