def _logged_ocr(image, *args, **kwargs): logger = richlog.get_logger(os.path.join(config.SCREEN_SHOOT_SAVE_PATH, 'ocrlog.html')) logger.loghtml('<hr/>') logger.logimage(image) ocrresult = ocr.engine.recognize(image, *args, **kwargs) logger.logtext(repr(ocrresult.text)) return ocrresult
def tell_group( groupimg, session, bartop, barbottom, ): logger = get_logger(LOGFILE) logger.logimage(groupimg) grouptext = groupimg.crop((0, barbottom, groupimg.width, groupimg.height)) thim = imgops.enhance_contrast(grouptext.convert('L'), 60) thim = imgops.crop_blackedge(thim) logger.logimage(thim) # groupname = recozh.recognize(thim) # logger.logtext(recozh.recognize(thim)) # if len(recognized_groups) == 0 and any_in('首次', groupname): # groupname = '首次掉落' # elif any_in('声望', groupname) and '声望&龙门币奖励' not in recognized_groups: # groupname = '声望&龙门币奖励' # elif any_in('常规', groupname) and '常规掉落' not in recognized_groups: # groupname = '常规掉落' # elif any_in('特殊', groupname) and '特殊掉落' not in recognized_groups: # groupname = '特殊掉落' # elif any_in('幸运', groupname) and '幸运掉落' not in recognized_groups: # groupname = '幸运掉落' # elif any_in('额物资', groupname) and '额外物资' not in recognized_groups: # groupname = '额外物资' # elif any_in('报酬', groupname) and '报酬' not in recognized_groups: # groupname = '报酬' # elif any_in('理智返还', groupname) and '理智返还' not in recognized_groups: # groupname = '理智返还' comparsions = [(x[0], imgops.compare_ccoeff(*imgops.uniform_size(thim, x[1]))) for x in grouptemplates if x[0] not in session.recognized_groups] comparsions.sort(key=lambda x: x[1], reverse=True) logger.logtext(repr(comparsions)) groupname = comparsions[0][0] if comparsions[0][1] < 0.6: session.low_confidence = True if groupname == '幸运掉落': return (groupname, [('(家具)', 1)]) vw, vh = session.vw, session.vh itemcount = roundint(groupimg.width / (20.370 * vh)) result = [] for i in range(itemcount): itemimg = groupimg.crop( (20.370 * vh * i, 0.000 * vh, 40.741 * vh, 18.981 * vh)) # x1, _, x2, _ = (0.093*vh, 0.000*vh, 19.074*vh, 18.981*vh) itemimg = itemimg.crop((0.093 * vh, 0, 19.074 * vh, itemimg.height)) result.append(item.tell_item(itemimg, session)) return (groupname, result)
def recognize(img): logger = get_logger(LOGFILE) vw, vh = util.get_vwvh(img.size) apimg = img.crop((100 * vw - 22.917 * vh, 2.917 * vh, 100 * vw, 8.194 * vh)).convert('L') reco_Noto, reco_Novecento = load_data() apimg = imgops.enhance_contrast(apimg, 80, 255) logger.logimage(apimg) aptext = reco_Noto.recognize(apimg) logger.logtext(aptext) # print("AP:", aptext) opidimg = img.crop((100 * vw - 55.694 * vh, 11.667 * vh, 100 * vw - 44.028 * vh, 15.139 * vh)).convert('L') opidimg = imgops.enhance_contrast(opidimg, 80, 255) logger.logimage(opidimg) opidtext = reco_Novecento.recognize(opidimg) if opidtext.endswith('-'): opidtext = opidtext[:-1] opidtext = opidtext.upper() logger.logtext(opidtext) # print('operation:', opidtext) delegateimg = img.crop((100 * vw - 32.778 * vh, 79.444 * vh, 100 * vw - 4.861 * vh, 85.417 * vh)).convert('L') logger.logimage(delegateimg) score = np.count_nonzero(np.asarray(delegateimg) > 127) / ( delegateimg.width * delegateimg.height) delegated = score > 0.5 # print('delegated:', delegated) consumeimg = img.crop((100 * vw - 14.306 * vh, 94.028 * vh, 100 * vw - 7.222 * vh, 97.361 * vh)).convert('L') consumeimg = imgops.enhance_contrast(consumeimg, 80, 255) logger.logimage(consumeimg) consumetext = reco_Noto.recognize(consumeimg) consumetext = ''.join(c for c in consumetext if c in '0123456789') logger.logtext(consumetext) return { 'AP': aptext, 'operation': opidtext, 'delegated': delegated, 'consume': int(consumetext) if consumetext.isdigit() else None }
def find_jumping(ary, threshold): logger = get_logger(LOGFILE) ary = np.array(ary, dtype=np.int16) diffs = np.diff(ary) shit = [x for x in enumerate(diffs) if abs(x[1]) >= threshold] groups = [[shit[0]]] for x in shit[1:]: lastgroup = groups[-1] if np.sign(x[1]) == np.sign(lastgroup[-1][1]): lastgroup.append(x) else: groups.append([x]) logger.logtext(repr(groups)) pts = [] for group in groups: pts.append(int(np.average( tuple(x[0] for x in group), weights=tuple(abs(x[1]) for x in group))) + 1) return pts
def tell_item(itemimg, session): logger = get_logger(LOGFILE) logger.logimage(itemimg) # l, t, r, b = scaledwh(80, 146, 90, 28) # print(l/itemimg.width, t/itemimg.height, r/itemimg.width, b/itemimg.height) # numimg = itemimg.crop(scaledwh(80, 146, 90, 28)).convert('L') numimg = imgops.scalecrop(itemimg, 0.39, 0.71, 0.82, 0.84).convert('L') numimg = imgops.crop_blackedge2(numimg, 220) recodata, textreco = load_data() if numimg is not None: numimg = imgops.enhance_contrast(numimg) logger.logimage(numimg) numtext: str = textreco.recognize(numimg) logger.logtext('amount: ' + numtext) amount = int(numtext) if numtext.isdigit() else None else: amount = None # scale = 48/itemimg.height img4reco = np.array( itemimg.resize((48, 48), Image.BILINEAR).convert('RGB')) img4reco[itemmask] = 0 scores = [] for name, templ in recodata.items(): if templ.size != (48, 48): templ = templ.resize((48, 48), Image.BILINEAR) scores.append((name, imgops.compare_mse(img4reco, templ))) scores.sort(key=lambda x: x[1]) itemname, score = scores[0] # maxmatch = max(scores, key=lambda x: x[1]) logger.logtext(repr(scores[:5])) diffs = np.diff([a[1] for a in scores]) if score < 800 and np.any(diffs > 600): logger.logtext('matched %s with mse %f' % (itemname, score)) name = itemname else: logger.logtext('no match') session.low_confidence = True name = None return (name, amount)
def recognize(im): import time t0 = time.monotonic() vw, vh = util.get_vwvh(im.size) logger = get_logger(LOGFILE) lower = im.crop((0, 61.111 * vh, 100 * vw, 100 * vh)) logger.logimage(lower) operation_id = lower.crop( (0, 4.444 * vh, 23.611 * vh, 11.388 * vh)).convert('L') logger.logimage(operation_id) operation_id = imgops.enhance_contrast(imgops.crop_blackedge(operation_id), 80, 220) logger.logimage(operation_id) operation_id_str = reco_novecento_bold.recognize(operation_id).upper() # FIXME: recognizer can't recognize [0o], [-i] well (the game uses sᴍᴀʟʟ ᴄᴀᴘs and the font has sᴍᴀʟʟ ᴄᴀᴘs in ASCII range) # FIXME: currently, we have no 'o' and 'i' in recognizer data as '0' and '-' are used more frequently # operation_name = lower.crop((0, 14.074*vh, 23.611*vh, 20*vh)).convert('L') # operation_name = imgops.enhance_contrast(imgops.crop_blackedge(operation_name)) # logger.logimage(operation_name) stars = lower.crop((23.611 * vh, 6.759 * vh, 53.241 * vh, 16.944 * vh)) logger.logimage(stars) stars_status = tell_stars(stars) level = lower.crop((63.148 * vh, 4.444 * vh, 73.333 * vh, 8.611 * vh)) logger.logimage(level) exp = lower.crop((76.852 * vh, 5.556 * vh, 94.074 * vh, 7.963 * vh)) logger.logimage(exp) items = lower.crop((68.241 * vh, 10.926 * vh, lower.width, 35.000 * vh)) logger.logimage(items) x, y = 6.667 * vh, 18.519 * vh linedet = items.crop((x, y, x + 1, items.height)).convert('L') d = np.asarray(linedet) linetop, linebottom, *_ = find_jumping(d.reshape(linedet.height), 32) linetop += y linebottom += y grouping = items.crop((0, linetop, items.width, linebottom)) grouping = grouping.resize((grouping.width, 1), Image.BILINEAR) grouping = grouping.convert('L') logger.logimage(grouping.resize((grouping.width, 16))) d = np.array(grouping, dtype=np.int16)[0] points = [0, *find_jumping(d, 32)] assert (len(points) % 2 == 0) finalgroups = list(zip(*[iter(points)] * 2)) # each_slice(2) logger.logtext(repr(finalgroups)) imggroups = [ items.crop((x1, 0, x2, items.height)) for x1, x2 in finalgroups ] items = [] session = RecognizeSession() session.vw = vw session.vh = vh for group in imggroups: result = tell_group(group, session, linetop, linebottom) session.recognized_groups.append(result[0]) items.append(result) t1 = time.monotonic() if session.low_confidence: logger.logtext('LOW CONFIDENCE') logger.logtext('time elapsed: %f' % (t1 - t0)) return { 'operation': operation_id_str, 'stars': stars_status, 'items': items, 'low_confidence': session.low_confidence }