def load_image(name, mode=None, imread_flags=None): if imread_flags is None: im = Image.open(open_file(name)) if mode is not None and im.mode != mode: im = im.convert(mode) else: im = Image.open(open_file(name), imread_flags) return im
def screencap(self): s = self.device_session_factory().exec_stream('screencap -p') data = recvall(s, 4194304, True) img = Image.open(BytesIO(data)) if self.screenshot_rotate != 0: img = img.rotate(self.screenshot_rotate) return img
def replay_custom_record(self, record_name, mode=None, back_to_main=None): from util import cvimage as Image record_dir = self.get_record_path(record_name) if record_dir is None: self.logger.error(f'未找到相应的记录: {record_name}') raise RuntimeError(f'未找到相应的记录: {record_name}') with open(record_dir.joinpath('record.json'), 'r', encoding='utf-8') as f: record_data = json.load(f) self.logger.info( f'record description: {record_data.get("description")}') records = record_data['records'] if mode is None: mode = record_data.get('prefer_mode', 'match_template') if mode not in ('match_template', 'point'): self.logger.error(f'不支持的模式: {mode}') raise RuntimeError(f'不支持的模式: {mode}') if back_to_main is None: back_to_main = record_data.get('back_to_main', True) if back_to_main: self.addon(CommonAddon).back_to_main() record_height = record_data['screen_height'] ratio = record_height / self.viewport[1] x, y = 0, 0 for record in records: if record['type'] == 'tap': repeat = record.get('repeat', 1) raise_exception = record.get('raise_exception', True) threshold = record.get('threshold', 0.7) for _ in range(repeat): if mode == 'match_template': screen = self.device.screenshot() gray_screen = screen.convert('L') if ratio != 1: gray_screen = gray_screen.resize( (int(self.viewport[0] * ratio), record_height)) template = Image.open( record_dir.joinpath(record['img'])).convert('L') (x, y), r = imgreco.imgops.match_template( gray_screen, template) x = x // ratio y = y // ratio self.logger.info( f'(x, y), r, record: {(x, y), r, record}') if r < threshold: if raise_exception: self.logger.error('无法识别的图像: ' + record['img']) raise RuntimeError('无法识别的图像: ' + record['img']) break elif mode == 'point': # 这个模式屏幕尺寸宽高比必须与记录中的保持一至 assert record_data['screen_width'] == int( self.viewport[0] * ratio) x, y = record['point'] x = x // ratio y = y // ratio self.device.touch_tap((x, y), offsets=(5, 5)) if record.get('wait_seconds_after_touch'): self.delay(record['wait_seconds_after_touch'])
""" for test purpose usage: python3 -m Arknights.ocr [language=zh-hans-cn] filename """ import importlib import sys import os from pprint import pprint ocr = importlib.import_module(__package__) from util import cvimage as Image if __name__ == '__main__': if 'OCR_IMPL' in os.environ: impl = ocr.get_impl(os.environ['OCR_IMPL']) else: impl = ocr.get_config_impl() print(impl.info) if 2 <= len(sys.argv) <= 3: lang = 'zh-hans-cn' if len(sys.argv) == 2 else sys.argv[1] filename = sys.argv[-1] result = impl.Engine(lang).recognize(Image.open(filename)) pprint(result, width=128) else: print('usage: %s [language=zh-hans-cn] filename' % sys.argv[0])
vw, vh = get_vwvh(img) dlgtype, y = check_dialog(img) assert dlgtype == 'ok' return (25 * vw, y - 4 * vh, 75 * vw, y + 4 * vh) def convert_to_pil(cv_img, color_code=cv.COLOR_BGR2RGB): return Image.fromarray(cv.cvtColor(cv_img, color_code)) def convert_to_cv(pil_img, color_code=cv.COLOR_BGR2RGB): return cv.cvtColor(np.asarray(pil_img), color_code) def softmax(x): """Compute softmax values for each sets of scores in x.""" e_x = np.exp(x - np.max(x)) return e_x / e_x.sum(axis=0) def get_vwvh(size): if isinstance(size, tuple): return (size[0] / 100, size[1] / 100) return (size.width / 100, size.height / 100) if __name__ == "__main__": import sys print(globals()[sys.argv[-2]](Image.open(sys.argv[-1])))
logger.debug('bias: %s', bias) result = {name: (pos + bias) * scale for name, pos in map_vectors.stage_maps[partition].items()} return result def recognize_daily_menu(img, partition): logger.debug('recognizing daily menu in partition %s', partition) names = [x[:-4] for x in resources.get_entries('maps/' + partition)[1]] scale = img.height / 720 img = imgops.scale_to_height(img.convert('RGB'), 720) imgmat = np.asarray(img) match_results = [(name, *imgops.match_template(imgmat, resources.load_image_cached('maps/%s/%s.png' % (partition, name), 'RGB'), method=cv.TM_SQDIFF_NORMED)) for name in names] logger.debug('%s', match_results) result = {name: (np.asarray(pos) * scale, conf) for name, pos, conf in match_results if conf < 0.08} return result def get_daily_menu_entry(viewport, daily_type): vw, vh = common.get_vwvh(viewport) if daily_type == 'material' or daily_type == 'soc': return 62.656*vw, 90.185*vh, 65.677*vw, 96.019*vh else: raise KeyError(daily_type) if __name__ == '__main__': import sys import pprint pprint.pprint(globals()[sys.argv[1]](Image.open(sys.argv[2]), sys.argv[3]))
(29.297 * vw, 26.528 * vh)), np.array((37.109 * vw, 26.528 * vh)), np.array((37.109 * vw, 61.111 * vh)), np.array((29.297 * vw, 61.111 * vh))) else: # FIXME: implement with feature matching? raise NotImplementedError('unsupported aspect ratio') # 从订单列表中进入另一间贸易设施的订单列表 def get_my_sell_task_2(img): """ :returns: [0][1] [3][2] """ aspect = Fraction(*img.size) vw, vh = common.get_vwvh(img) if aspect == Fraction(16, 9): return (np.array( (1.094 * vw, 25.972 * vh)), np.array((16.875 * vw, 25.972 * vh)), np.array((16.875 * vw, 33.472 * vh)), np.array((1.094 * vw, 33.472 * vh))) else: # FIXME: implement with feature matching? raise NotImplementedError('unsupported aspect ratio') if __name__ == "__main__": import sys print(check_main(Image.open(sys.argv[-1])))
import sys if len(sys.argv) > 1: from util import cvimage as Image import imgreco obj = imgreco objname = '.'.join(sys.argv[1:-1]) print('> imgreco.%s(Image.open(%s))' % (objname, repr(sys.argv[-1]))) known = ['imgreco'] tag = object() for k in sys.argv[1:-1]: obj = getattr(obj, k, tag) if obj is tag: print("%s has no attribute %r, try import" % ('.'.join(known), k)) import importlib obj = importlib.import_module('.'.join(known + [k])) print(obj(Image.open(sys.argv[-1]))) else: print('usage: python -m imgreco module_name function_name image_file')
return 'item' icon1 = img.crop((50 * vw + 25.972 * vh, 36.250 * vh, 50 * vw + 54.722 * vh, 61.250 * vh)).convert('RGB') icon2 = resources.load_image_cached('before_operation/no_originium.png', 'RGB') icon1, icon2 = imgops.uniform_size(icon1, icon2) mse3 = imgops.compare_mse(icon1, icon2) logger.logimage(icon1) logger.logtext('mse=%f' % mse3) if mse3 < 500: return None else: return 'originium' def get_ap_refill_confirm_rect(viewport): vw, vh = common.get_vwvh(viewport) return (50 * vw + 49.537 * vh, 77.222 * vh, 50 * vw + 74.352 * vh, 84.815 * vh) def get_ap_refill_cancel_rect(viewport): vw, vh = common.get_vwvh(viewport) return (50 * vw + 14.259 * vh, 77.130 * vh, 50 * vw + 24.352 * vh, 83.611 * vh) if __name__ == "__main__": print(recognize(Image.open(sys.argv[-1])))
def load_image(name, mode=None): im = Image.open(open_file(name)) if mode is not None and im.mode != mode: im = im.convert(mode) return im
def _ensure_pil_image(imgorfile): if isinstance(imgorfile, Image.Image): return imgorfile return Image.open(imgorfile)