コード例 #1
0
def get_output_room_type(img: Image) -> Optional[str]:
    vw, vh = common.get_vwvh(img)
    roi = img.crop(
        (52.333 * vh, 1.667 * vh, 88.556 * vh, 8.222 * vh)).array[:, :, :3]
    roi_size = roi.shape[0] * roi.shape[1]

    low = np.array([30, 182, 245])
    high = np.array([40, 192, 255])
    mask = cv2.inRange(roi, low, high)
    trade_point = np.count_nonzero(mask) / roi_size

    low = np.array([245, 210, 0])
    high = np.array([255, 220, 10])
    mask = cv2.inRange(roi, low, high)
    manu_point = np.count_nonzero(mask) / roi_size

    low = np.array([195, 230, 65])
    high = np.array([205, 240, 75])
    mask = cv2.inRange(roi, low, high)
    power_point = np.count_nonzero(mask) / roi_size

    if trade_point > manu_point + power_point:
        return trade
    elif manu_point > trade_point + power_point:
        return manu
    elif power_point > trade_point + manu_point:
        return power
    else:
        return None
コード例 #2
0
def is_building_task_product_button(
        img: Image, rect: Tuple[float, float, float, float]) -> bool:
    roi = img.crop(rect)
    ret, thresh = cv2.threshold(roi.array[:, :, 0], 200, 255,
                                cv2.THRESH_BINARY_INV)
    roi_size = thresh.shape[0] * thresh.shape[1]
    return (np.count_nonzero(thresh) / roi_size) > 0.5
コード例 #3
0
def is_need_change_char_reconfirm(
        img: Image, rect: Tuple[float, float, float, float]) -> bool:
    roi = img.crop(rect)
    roi_array = roi.array
    roi_size = roi_array.shape[0] * roi_array.shape[1]
    tmp_array = (roi_array[:, :, 2] - (roi_array[:, :, 0]) > 100)
    return (np.count_nonzero(tmp_array) / roi_size) > 0.5
コード例 #4
0
def is_dormitory_exist(img: Image, rect: Tuple[float, float, float,
                                               float]) -> bool:
    roi = img.crop(rect)
    ret, thresh = cv2.threshold(roi.array[:, :, 0], 55, 255,
                                cv2.THRESH_BINARY_INV)
    roi_size = thresh.shape[0] * thresh.shape[1]
    return (np.count_nonzero(thresh) / roi_size) > 0.5
コード例 #5
0
def get_img_roi(img: Image, rect: Tuple) -> Image:
    rect_int = []
    for i in rect:
        if isinstance(i, float):
            rect_int.append(int(i))
        else:
            rect_int.append(i)
    return img.crop(rect)
コード例 #6
0
def is_char_select(img: Image, rect: Tuple[float, float, float,
                                           float]) -> bool:
    roi = img.crop(rect)
    roi_array = roi.array[:, :, :3]
    roi_size = roi_array.shape[0] * roi_array.shape[1]
    tmp_array = (roi_array[:, :, 1] >
                 (roi_array[:, :, 0]) * 1.2) & (roi_array[:, :, 1] >
                                                (roi_array[:, :, 2]) * 1.2)
    return (np.count_nonzero(tmp_array) / roi_size) > 0.1
コード例 #7
0
def is_can_start_clue_party(img: Image, rect: Tuple[float, float, float,
                                                    float]) -> bool:
    roi = img.crop(rect)
    roi_array = roi.array[:, :, :3]
    low = np.array([255, 255, 255])
    high = np.array([255, 255, 255])
    mask = cv2.inRange(roi_array, low, high)
    roi_size = roi_array.shape[0] * roi_array.shape[1]
    return (np.count_nonzero(mask) / roi_size) > 0.5
コード例 #8
0
def get_manu_product_name(img: Image) -> str:
    vw, vh = common.get_vwvh(img)
    roi = img.crop((21.778 * vh, 85.778 * vh, 43.111 * vh, 90.222 * vh))
    eng = ocr.acquire_engine_global_cached('zh-cn')
    recognize = eng.recognize(roi,
                              70,
                              hints=[ocr.OcrHint.SINGLE_LINE],
                              char_whitelist=known_product_chars).text.replace(
                                  ' ', '')
    return recognize
コード例 #9
0
def is_trade_manu_need_change_char(
        img: Image, rect: Tuple[float, float, float, float]) -> Optional[bool]:
    roi = img.crop(rect).array[:, :, :3]
    roi_size = roi.shape[0] * roi.shape[1]
    hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV)
    if np.count_nonzero(hsv[:, :, 1] > 10) / roi_size < 0.5:
        return None
    if np.count_nonzero((roi[:, :, 0] > roi[:, :, 1] * 1.2) &
                        (roi[:, :, 0] > roi[:, :, 2] * 1.2)) / roi_size > 0.6:
        return True
    return False
コード例 #10
0
def is_clue_get_by_from_friend(
        img: Image, rect: Tuple[float, float, float, float]) -> Optional[bool]:
    roi = img.crop(rect)
    roi_array = roi.array
    hsv = cv2.cvtColor(roi_array, cv2.COLOR_RGB2HSV)
    hsv_size = hsv.shape[0] * hsv.shape[1]
    if (np.count_nonzero(hsv[:, :, 1] > 0) / hsv_size) < 0.1:
        return None
    roi_array = roi_array[roi_array.shape[0] * 1 // 10:roi_array.shape[0] *
                          4 // 10, roi_array.shape[1] * 4 // 5:, :3]
    low = np.array([124, 0, 33])
    high = np.array([126, 2, 35])
    mask = cv2.inRange(roi_array, low, high)
    roi_size = roi_array.shape[0] * roi_array.shape[1]
    return (np.count_nonzero(mask) / roi_size) < 0.05
コード例 #11
0
def is_trade_gold(img: Image) -> bool:
    vw, vh = common.get_vwvh(img)
    roi = img.crop(
        (133.222 * vh, 82.556 * vh, 159.444 * vh, 94.333 * vh)).array[:, :, :3]
    roi_size = roi.shape[0] * roi.shape[1]
    low = np.array([0, 112, 163])
    high = np.array([10, 122, 173])
    mask = cv2.inRange(roi, low, high)
    gold_point = np.count_nonzero(mask) / roi_size

    low = np.array([0, 0, 96])
    high = np.array([10, 10, 106])
    mask = cv2.inRange(roi, low, high)
    not_gold_point = np.count_nonzero(mask) / roi_size
    if gold_point > not_gold_point:
        return True
    else:
        return False
コード例 #12
0
def is_clue_on(img: Image, rect: Tuple[float, float, float, float]) -> bool:
    hsv = cv2.cvtColor(img.crop(rect).array, cv2.COLOR_RGB2HSV)
    roi_size = hsv.shape[0] * hsv.shape[1]
    return (np.count_nonzero(hsv[:, :, 1] > 20) / roi_size) > 0.15
コード例 #13
0
    def recognize_operator_box(self,
                               img: Image.Image,
                               recognize_skill=False,
                               skill_facility_hint=None) -> OperatorBox:
        name_img = img.subview(
            (0, 375, img.width, img.height - 2)).convert('L')
        name_img = imgops.enhance_contrast(name_img, 90, 220)
        name_img = imgops.crop_blackedge2(name_img)
        name_img = Image.fromarray(
            cv2.copyMakeBorder(255 - name_img.array,
                               8,
                               8,
                               8,
                               8,
                               cv2.BORDER_CONSTANT,
                               value=[255, 255, 255]))
        # save image for training ocr
        # name_img.save(os.path.join(config.SCREEN_SHOOT_SAVE_PATH, '%d-%04d.png' % (self.tag, self.seq)))
        self.seq += 1
        # OcrHint.SINGLE_LINE (PSM 7) will ignore some operator names, use raw line for LSTM (PSM 13) here
        # the effect of user-words is questionable, it seldom produce improved output (maybe we need LSTM word-dawg instead)
        ocrresult = self.ocr.recognize(name_img,
                                       ppi=240,
                                       tessedit_pageseg_mode='13',
                                       user_words_file='operators')
        name = ocrresult.text.replace(' ', '')
        if name not in operator_set:
            comparisions = [(n, textdistance.levenshtein(name, n))
                            for n in operator_set]
            comparisions.sort(key=lambda x: x[1])
            self.logger.debug('%s not in operator set, closest match: %s' %
                              (name, comparisions[0][0]))
            if comparisions[0][1] == comparisions[1][1]:
                self.logger.warning('multiple fixes availiable for %r',
                                    ocrresult)
            name = comparisions[0][0]
        mood_img = img.subview(Rect.from_xywh(44, 358, 127,
                                              3)).convert('L').array
        mood_img = np.max(mood_img, axis=0)
        mask = (mood_img >= 200).astype(np.uint8)
        mood = np.count_nonzero(mask) / mask.shape[0] * 24

        tagimg = img.subview((35, 209, 155, 262))
        on_shift = resources.load_image_cached('riic/on_shift.png', 'RGB')
        distracted = resources.load_image_cached('riic/distracted.png', 'RGB')
        rest = resources.load_image_cached('riic/rest.png', 'RGB')
        tag = None
        if imgops.compare_mse(tagimg, on_shift) < 3251:
            tag = 'on_shift'
        elif imgops.compare_mse(tagimg, distracted) < 3251:
            tag = 'distracted'
        elif imgops.compare_mse(tagimg, rest) < 3251:
            tag = 'rest'

        has_room_check = img.subview(Rect.from_xywh(45, 2, 62, 6)).convert('L')
        mse = np.mean(np.power(
            has_room_check.array.astype(np.float32) - 50, 2))
        self.richlogger.logtext(f'has_room_check mse={mse}')
        if mse < 200:
            room_img = img.subview(Rect.from_xywh(42, 6, 74, 30)).array
            room_img = imgops.enhance_contrast(
                Image.fromarray(np.max(room_img, axis=2)), 64, 220)
            room_img = Image.fromarray(255 - room_img.array)
            self.richlogger.logimage(room_img)
            room = self.ocr.recognize(
                room_img,
                ppi=240,
                hints=[ocr.OcrHint.SINGLE_LINE],
                char_whitelist='0123456789FB').text.replace(' ', '')
        else:
            room = None

        if recognize_skill:
            skill1_icon = img.subview(Rect.from_xywh(4, 285, 54, 54))
            skill2_icon = img.subview(Rect.from_xywh(67, 285, 54, 54))
            skill1, score1 = self.recognize_skill(skill1_icon,
                                                  skill_facility_hint)
            skill2, score2 = self.recognize_skill(skill2_icon,
                                                  skill_facility_hint)
        else:
            skill1 = None
            skill2 = None

        skill_icons = []
        if skill1 is not None:
            skill_icons.append(skill1)
        if skill2 is not None:
            skill_icons.append(skill2)
        self.richlogger.logimage(name_img)
        self.richlogger.logtext(repr(ocrresult))
        result = OperatorBox(None,
                             name,
                             mood,
                             tag,
                             room,
                             skill_icons=skill_icons)
        self.richlogger.logtext(repr(result))
        return result