def test_process_tile_two_colors(self): expected_dot_profile = bytearray(([0] * 4 + [1] * 8 + [0] * 4) * 4) img = Image.open('testdata/blue-and-red-tile.png') processor = image_processor.ImageProcessor() processor.load_image(img) (color_needs, dot_profile) = processor.process_tile(0, 0, 0, 0) self.assertEqual(color_needs, bytearray([2, 7, 0xff, 0xff])) self.assertEqual(dot_profile, expected_dot_profile) img = Image.open('testdata/red-and-blue-tile.png') processor = image_processor.ImageProcessor() processor.load_image(img) (color_needs, dot_profile) = processor.process_tile(0, 0, 0, 0) self.assertEqual(color_needs, bytearray([7, 2, 0xff, 0xff])) self.assertEqual(dot_profile, expected_dot_profile)
def enable_camera(self): if not self.camera_enabled: self.objects_publisher = rospy.Publisher("/robot/camera/objects", String, queue_size=1) self.image_processor = image_processor.ImageProcessor(self.objects_publisher) self.subscribe_to_camera() self.camera_enabled = True self.sleep(0.2)
def process_image(self, img, palette_text=None, auto_sprite_bg=False, traversal=None): if 'free' in self.args.traversal: self.processor = free_sprite_processor.FreeSpriteProcessor( self.args.traversal) self.processor.set_verbose('--verbose' in sys.argv) self.processor.process_image( img, palette_text, self.args.bg_color.mask, self.args.bg_color.fill, self.args.platform, self.args.is_locked_tiles, self.args.lock_sprite_flips, self.args.allow_overflow) elif self.args.traversal == '8x16': self.processor = eight_by_sixteen_processor.EightBySixteenProcessor( ) self.processor.process_image( img, palette_text, self.args.bg_color.mask, self.args.bg_color.fill, self.args.platform, self.args.traversal, self.args.is_sprite, self.args.is_locked_tiles, self.args.lock_sprite_flips, self.args.allow_overflow) else: self.processor = image_processor.ImageProcessor() if auto_sprite_bg: self.processor._test_only_auto_sprite_bg = auto_sprite_bg self.processor.process_image( img, palette_text, self.args.bg_color.mask, self.args.bg_color.fill, self.args.platform, self.args.traversal, self.args.is_sprite, self.args.is_locked_tiles, self.args.lock_sprite_flips, self.args.allow_overflow) self.ppu_memory = self.processor.ppu_memory() self.err = self.processor.err()
def test_process_tile_solid(self): img = Image.open('testdata/blue-tile.png') processor = image_processor.ImageProcessor() processor.load_image(img) (color_needs, dot_profile) = processor.process_tile(0, 0, 0, 0) self.assertEqual(color_needs, bytearray([2, 0xff, 0xff, 0xff])) self.assertEqual(dot_profile, bytearray([0] * 64))
def run(self, img, args): traversal = self.get_traversal(args.traversal_strategy) if args.makepal: global makepal_processor if not makepal_processor: import makepal_processor processor = makepal_processor.MakepalProcessor() processor.process_image(img, args) if processor.err().has(): self.handle_errors(processor.err(), img, args) return False processor.create_output(args.output) return True elif 'free' in traversal: if not args.is_sprite or args.bg_color.fill is None: raise errors.CommandLineArgError( 'Traversal strategy \'%s\' requires -s and -b `mask=fill` flags' % ( traversal)) global free_sprite_processor if not free_sprite_processor: import free_sprite_processor processor = free_sprite_processor.FreeSpriteProcessor(traversal) processor.set_verbose('--verbose' in sys.argv) processor.process_image(img, args.palette, args.bg_color.mask, args.bg_color.fill, args.is_locked_tiles, args.lock_sprite_flips, args.allow_overflow) elif traversal == '8x16': if not args.is_sprite: raise errors.CommandLineArgError('Traversal strategy \'8x16\' requires ' '-s flag') global eight_by_sixteen_processor if not eight_by_sixteen_processor: import eight_by_sixteen_processor processor = eight_by_sixteen_processor.EightBySixteenProcessor() processor.process_image(img, args.palette, args.bg_color.mask, args.bg_color.fill, traversal, args.is_sprite, args.is_locked_tiles, args.lock_sprite_flips, args.allow_overflow) else: global image_processor if not image_processor: import image_processor processor = image_processor.ImageProcessor() processor.process_image(img, args.palette, args.bg_color.mask, args.bg_color.fill, traversal, args.is_sprite, args.is_locked_tiles, args.lock_sprite_flips, args.allow_overflow) if args.bg_color.fill: processor.ppu_memory().override_bg_color(args.bg_color.fill) self.create_views(processor.ppu_memory(), args, img) if processor.err().has(): self.handle_errors(processor.err(), img, args) return False self.create_output(processor.ppu_memory(), args, traversal) if args.show_stats: self.show_stats(processor.ppu_memory(), processor, args) return True
def __init__(self, *args, **kwargs): """ """ super().__init__() self.path_to_watch = args[0] print("CameraProcessor main thread id: ", threading.get_native_id()) self.frame_timestamps = np.zeros(1000) self.cpu_timestamps = np.zeros(1000) self.ind_stamp = 0 with open("frame_timestamps.bin", "wb") as f: pass with open("cpu_timestamps.bin", "wb") as f: pass print('Cleared timestamps file') self.imgProc = image_processor.ImageProcessor() self.imgProcPlugin = image_processor_plugin.ImageProcessor() self.camera = sui_camera.SUICamera() self.ebusReader = ebus_reader.EbusReader(use_mock=False, camera=self.camera) self.fileWatcher = QtCore.QFileSystemWatcher( ["image_processor_plugin.py"]) self.fileWatcher.fileChanged.connect(self.fileWatcher_fileChanged) self.setupUI() self.widgets_exposure = { 'requestedCounts': self.editExposureCount, 'requestedMS': self.lblExposureMSRequested, 'actualMS': self.lblExposureMSActual, 'actualCounts': self.lblExposureCounts, } self.widgets_frameperiod = { 'requestedCounts': self.editFrameCount, 'requestedMS': self.lblFrameMSRequested, 'actualMS': self.lblFrameMSActual, 'actualCounts': self.lblFrameCounts, } for w in [ self.editPixelPitch, self.editTargetDistance, self.editResolution, self.editFocalLength ]: w.editingFinished.connect(self.updateFOV) # start polling timer for the serial reads: self.timer = QtCore.QTimer() self.timer.timeout.connect(self.pollSerial) self.timer_period_ms = 100 self.timer.start(self.timer_period_ms) # start the image reader thread: # it will actually sit idle until we actually connect and open a stream: self.startImageReadoutThread()
def __init__(self, drum_areas): self.drum_areas = drum_areas for i in range(len(drum_areas)): self.drum_areas[i].id = i self.img_process = image_processor.ImageProcessor() self.sound_player = sound_player.SoundPlayer() self.img_dfc_tool = image_difference_tool.ImageDifferenceTool() self.prev_color_check = False self.nn = NN('models/main_model')
def test_extract_16overuse_bmp(self): """Extract a palette from a 16-color indexed bmp with 8bpp.""" img = Image.open('testdata/full-image-16color-overuse.bmp') processor = image_processor.ImageProcessor() extractor = extract_indexed_image_palette.ExtractIndexedImagePalette( processor) pal = extractor.extract_palette(self.imgpal(img)) self.assertEqual(str(pal), 'P/16-30-01-0f/16-30-01-38/16-30-19-28/16-23-23-28/')
def enable_camera(self): if not self.camera_enabled: from picamera import PiCamera import image_processor self.image_processor = image_processor.ImageProcessor() self.camera = PiCamera() self.stream = BytesIO() self.camera.resolution = (480, 320) self.camera.start_preview() time.sleep(2) self.camera_enabled = True
def __init__(self, *args, **kwargs): super(MakechrGui, self).__init__(*args, **kwargs) self.processor = image_processor.ImageProcessor() self.renderer = view_renderer.ViewRenderer(scale=1) self.inputImagePath = None self.cursor = None self.manager = None self.watcher = file_modify_watcher.FileModifyWatcher() self.messageTimer = None self.Create() self.Bind(wx.EVT_CLOSE, self._close_handler)
def process_image(self, img, palette_text=None, traversal=None): platform = None if not traversal: traversal = 'horizontal' self.processor = image_processor.ImageProcessor() self.processor.process_image(img, palette_text, self.args.bg_color.mask, self.args.bg_color.fill, platform, traversal, self.args.is_sprite, self.args.is_locked_tiles, self.args.lock_sprite_flips, self.args.allow_overflow) self.ppu_memory = self.processor.ppu_memory()
def process_image(self, img, args): self._err = errors.ErrorCollector() self.width, self.height = img.size self.pixels = img.load() self.base = image_processor.ImageProcessor() self.base.pixels = self.pixels try: self.unit_size = self._find_unit_size() self.pal = self._build_palette() except Exception as e: self._err.add(e) return
def test_wrapped_48_bytes_bmp(self): processor = image_processor.ImageProcessor() extractor = extract_indexed_image_palette.ExtractIndexedImagePalette( processor) bytes = [ 0, 56, 252, 255, 255, 255, 252, 0, 0, 0, 0, 0, 0, 56, 252, 255, 255, 255, 252, 0, 0, 132, 216, 252, 0, 56, 252, 255, 255, 255, 0, 184, 0, 0, 184, 248, 0, 56, 252, 249, 145, 173, 249, 145, 173, 0, 184, 248 ] pal = extractor.extract_palette(MockWrappedImagePalette(bytes, 'BMP')) self.assertEqual(str(pal), 'P/16-30-01-0f/16-30-01-38/16-30-19-28/16-23-23-28/')
def test_wrapped_64_bytes_png(self): processor = image_processor.ImageProcessor() extractor = extract_indexed_image_palette.ExtractIndexedImagePalette( processor) bytes = [ 252, 56, 0, 0, 255, 255, 255, 1, 0, 0, 252, 2, 0, 0, 0, 3, 252, 56, 0, 4, 255, 255, 255, 5, 0, 0, 252, 6, 252, 216, 132, 8, 252, 56, 0, 8, 255, 255, 255, 9, 0, 184, 0, 10, 248, 184, 0, 11, 252, 56, 0, 12, 173, 145, 249, 13, 173, 145, 249, 14, 248, 184, 0, 15 ] pal = extractor.extract_palette(MockWrappedImagePalette(bytes, 'PNG')) self.assertEqual(str(pal), 'P/16-30-01-0f/16-30-01-38/16-30-19-28/16-23-23-28/')
def ProcessMakechr(self): config = self.BuildConfigFromOptions() # TODO: It might be inefficient to reconstruct every time. if config.traversal != '8x16': self.processor = image_processor.ImageProcessor() self.manager.setProcessor(self.processor) else: self.processor = eight_by_sixteen_processor.EightBySixteenProcessor( ) self.manager.setProcessor(self.processor) input = Image.open(self.inputImagePath) self.processor.process_image(input, None, None, None, config.traversal, config.is_sprite, config.is_locked_tiles, None, config.allow_overflow)
def test_process_tile_all_four_colors(self): img = Image.open('testdata/gradiant-tile.png') processor = image_processor.ImageProcessor() processor.load_image(img) (color_needs, dot_profile) = processor.process_tile(0, 0, 0, 0) self.assertEqual(color_needs, bytearray([2, 0x21, 0x2a, 0x0b])) self.assertEqual( dot_profile, bytearray([ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 2, 0, 0, 1, 1, 1, 1, 2, 2, 0, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 3, 1, 1, 2, 2, 2, 2, 3, 3, 1, 2, 2, 2, 2, 3, 3, 3 ]))
def produce_overlay(self): base_image = self.overlay_queue[0][0] if len(self.overlay_queue) == 0: return None if len(self.overlay_queue) == 1: return base_image processor = image_processor.ImageProcessor() combined = processor.overlay_images(base_image, self.overlay_queue[1][0], self.overlay_queue[1][1]) for i in range(2, len(self.overlay_queue)): combined = processor.overlay_images(combined, self.overlay_queue[i][0], self.overlay_queue[i][1]) return combined
def makeImgproc(doc): for title, content in doc.paragraphs.items(): # Only take first record return image_processor.ImageProcessor( title, body=''.join(content.paragraphs['Implementation'].paragraphs['Source code'].text), processor_id=content.paragraphs['Tags'].paragraphs['Id'].text[0], release_state=content.paragraphs['Tags'].paragraphs['Release state'].text[0] if 'Release state' in content.paragraphs['Tags'].paragraphs else 'Experimental', input_ports=getPorts(content.paragraphs['Input ports'].paragraphs) if 'Input ports' in content.paragraphs else dict(), output_ports=getPorts(content.paragraphs['Output ports'].paragraphs), params=getParams(content.paragraphs['Parameters'].paragraphs if 'Parameters' in content.paragraphs else dict()), user_includes=content.paragraphs['Implementation'].paragraphs['Includes'].text if 'Includes' in content.paragraphs['Implementation'].paragraphs else [], user_includes_start=content.paragraphs['Implementation'].paragraphs['Includes'].line_no + 1 if 'Includes' in content.paragraphs['Implementation'].paragraphs else 0, description='\\n\\n'.join(content.text), category=content.paragraphs['Tags'].paragraphs['Category'].text[0], impl_start=content.paragraphs['Implementation'].paragraphs['Source code'].line_no + 1)
def run(self, img, args): traversal = self.get_traversal(args.traversal_strategy) if args.makepal: global makepal_processor if not makepal_processor: import makepal_processor processor = makepal_processor.MakepalProcessor() processor.process_image(img, args) if processor.err().has(): self.handle_errors(processor.err(), img, args) return False processor.create_output(args.output) return True elif 'free' in traversal: # DEPRECATED if not args.is_sprite or args.bg_color.fill is None: raise errors.CommandLineArgError( 'Traversal strategy \'%s\' requires -s and -b `mask=fill` flags' % (traversal)) global free_sprite_processor if not free_sprite_processor: import free_sprite_processor processor = free_sprite_processor.FreeSpriteProcessor(traversal) processor.set_verbose('--verbose' in sys.argv) processor.process_image(img, args.palette, args.bg_color.mask, args.bg_color.fill, args.platform, args.is_locked_tiles, args.lock_sprite_flips, args.allow_overflow) elif args.decompose_sprites: # --decompose_sprites is a processor mode, not a traversal style. # Can be combined with --free 8x16. Implies -s # TODO: Make -b optional, derive it if possible if args.bg_color.fill is None: raise errors.CommandLineArgError( 'Decompose sprites mode requires -b `mask=fill` flags') global decompose_sprites_processor if not decompose_sprites_processor: import decompose_sprites_processor processor = decompose_sprites_processor.DecomposeSpritesProcessor() # TODO: lock_sprite_flags, is_locked_tiles, allow_overflow? processor.process_image( img, args.palette, args.bg_color.mask, args.bg_color.fill, { 'anon_view': args.rect_cover_anon_view, 'steps_view': args.rect_cover_steps_view }) if processor.err().has(): self.handle_errors(processor.err(), img, args) return False args.is_sprite = True elif traversal == '8x16': if not args.is_sprite: raise errors.CommandLineArgError( 'Traversal strategy \'8x16\' requires ' '-s flag') global eight_by_sixteen_processor if not eight_by_sixteen_processor: import eight_by_sixteen_processor processor = eight_by_sixteen_processor.EightBySixteenProcessor() processor.process_image(img, args.palette, args.bg_color.mask, args.bg_color.fill, args.platform, traversal, args.is_sprite, args.is_locked_tiles, args.lock_sprite_flips, args.allow_overflow) else: global image_processor if not image_processor: import image_processor processor = image_processor.ImageProcessor() processor.process_image(img, args.palette, args.bg_color.mask, args.bg_color.fill, args.platform, traversal, args.is_sprite, args.is_locked_tiles, args.lock_sprite_flips, args.allow_overflow) if args.bg_color.fill: processor.ppu_memory().override_bg_color(args.bg_color.fill) self.create_views(processor.ppu_memory(), args, img) if processor.err().has(): self.handle_errors(processor.err(), img, args) return False self.create_output(processor.ppu_memory(), args, traversal, args.platform) if args.show_stats: self.show_stats(processor.ppu_memory(), processor, args) return True
# # approx = cv2.approxPolyDP(c, 0.02 * peri, True) # # # # x, y, w, h = cv2.boundingRect(c) # # diff_img_color = cv2.rectangle(edged, (x, y), (x + w, y + h), (0, 255, 255), 2) # # # # cv2.drawContours(diff_img_color, [approx], -1, (0, 255, 0), 4) # # # # total += 1 # # # # cv2.imshow("Output", diff_img_color) # # cv2.waitKey(0) # # sobel = cv2.Sobel(img_grey, cv2.CV_8U, ) img_proc = img_proc.ImageProcessor(True) cropped = img_proc.crop_logon_btn_img(img_grey) ret, binary = cv2.threshold(cropped, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) cv2.imshow('binary', binary) cv2.waitKey(100) # is_most_black = img_proc.binary_most_black_pixels(binary) # dict_col_pixels, dict_row_pixels = img_proc.count_pixels(binary, is_most_black) # # # fig = plt.figure() # # axes1 = fig.add_subplot(111) # # line, = axes1.plot(np.random.rand(10))
import cv2 import numpy as np import time import image_processor cap = cv2.VideoCapture(0) img_process = image_processor.ImageProcessor() prev_time = 0 while (1): _, frame = cap.read() frame = img_process.horizontal_flip(frame) # It converts the BGR color space of image to HSV color space col = None for i in range(80, 130): for j in range(80, 130): if i == 100 and j == 100: if time.time() - prev_time > 3: prev_time = time.time() print(frame[i, j]) col = frame[i, j] if (not col[2] <= 240 and col[2] >= 215) or ( not col[1] <= 170 and col[0] >= 130) or (not col[0] <= 255 and col[0] >= 245): frame[i, j] = np.asarray([255, 255, 255]) else: frame[i, j] = np.asarray([0, 0, 0])
def main_entry(j_dict): trace_logger() xml_dom_url = j_dict['dom_tree_url'] shot_img_url = j_dict['screen_shot_url'] logging.info( "--------Auto Login Verification Start----------------------------") logging.info("dom tree file:" + xml_dom_url) logging.info("screen shot file:" + shot_img_url) # ocr识别默认采用英文,分别用英文和中文字符集做2次识别 is_chinese_sim = False img_proc = image_processor.ImageProcessor(is_chinese_sim) # 读入全图和XML数据,XML数据返回全部node节点数据 screen_img_color, height, width = img_proc.read_screen_img(shot_img_url) if screen_img_color is None: return [" Read image file FAILURE!!!"] xml_parser = xp.XmlParser() node_list = xml_parser.get_xml_data(xml_dom_url) if node_list is None: return [" Read xml file FAILURE!!!"] ########################################################################################## edit_text_widgets = xml_parser.get_edit_text_widgets( node_list, USER_WIDGET_CLASS) '''读取xml控件类型:编辑框可能有其他类型''' btn_widgets = xml_parser.get_btn_widget(node_list, LOGON_WIDGET_CLASS) '''读取xml控件类型:登录框类型可能有其他类型''' for n in btn_widgets: logging.info("candidate logon widget1: " + str(n.get('bounds'))) estm_login_button = None if len(btn_widgets) == 0: '''如果没有指定类型的按钮.则以android.view.View进行ocr识别''' btn_widgets = xml_parser.get_btn_widget(node_list, LOGON_WIDGET_CLASS_SUPPLEMENT) estm_login_button = img_proc.ocr_detect(True, shot_img_url, LOGON_KEY, btn_widgets) flag = xml_parser.is_login_ui(edit_text_widgets, btn_widgets) if flag: logging.info("current ui is logon dialog !") else: logging.info("current ui is NOT logon dialog !!!") return None estm_user_edits = xml_parser.xml_estm_edit(edit_text_widgets, USER_TEXT_ATTR, USER_RC_ID_ATTR) '''根据xml读取属性信息不可靠''' estm_user_edit = None is_logon_btn = False if len(estm_user_edits) == 0: estm_user_edit = img_proc.ocr_detect(is_logon_btn, shot_img_url, USER_ACCOUNT_KEY, edit_text_widgets) elif len(estm_user_edits) > 1: estm_user_edit = img_proc.ocr_detect(is_logon_btn, shot_img_url, USER_ACCOUNT_KEY, estm_user_edits) elif len(estm_user_edits) == 1: estm_user_edit = estm_user_edits[0] # if estm_user_edit is None: # return None if estm_user_edit is not None: logging.info("FINAL detect user edit: " + str(estm_user_edit.get('bounds'))) num_edit_text_widgets = len(edit_text_widgets) estm_pwd_edit = None if num_edit_text_widgets >= 1: # if num_edit_text_widgets > 1: estm_pwd_edits = xml_parser.xml_estm_edit(edit_text_widgets, PWD_TEXT_ATTR, PWD_RC_ID_ATTR) if len(estm_pwd_edits) == 0: estm_pwd_edit = img_proc.ocr_detect(is_logon_btn, shot_img_url, PWD_KEY, edit_text_widgets) elif len(estm_pwd_edits) > 1: estm_pwd_edit = img_proc.ocr_detect(is_logon_btn, shot_img_url, PWD_KEY, estm_pwd_edits) elif len(estm_pwd_edits) == 1: estm_pwd_edit = estm_pwd_edits[0] # assert estm_pwd_edit is not None # if estm_pwd_edit is None: # return None if estm_pwd_edit is not None: logging.info("FINAL detect password edit: " + str(estm_pwd_edit.get('bounds'))) candid_login_buttons = xml_parser.xml_estm_button(estm_user_edit, estm_pwd_edit, btn_widgets, LOGON_KEY) '''xml读取属性信息不可靠''' # assert len(candid_login_buttons) >= 1 # if len(candid_login_buttons) == 0: # return None for n in candid_login_buttons: logging.info("candidate logon widget2: " + str(n.get('bounds'))) estm_login_buttons = None edit_widget = None if len(candid_login_buttons) == 1: estm_login_button = candid_login_buttons[0] elif len(candid_login_buttons) > 1 or len(candid_login_buttons) == 0: if estm_user_edit is not None and estm_pwd_edit is not None: edit_widget = estm_pwd_edit if estm_user_edit is not None and estm_pwd_edit is None: edit_widget = estm_user_edit if estm_user_edit is None and estm_pwd_edit is not None: edit_widget = estm_pwd_edit if estm_user_edit is None and estm_pwd_edit is None: return None estm_login_button = img_proc.ocr_detect_logon_button( True, edit_widget, shot_img_url, LOGON_KEY, candid_login_buttons) # assert estm_login_button is not None if estm_login_button is None: return None logging.info("FINAL detect logon button: " + str(estm_login_button.get('bounds'))) return verify_result(estm_user_edit, estm_pwd_edit, estm_login_button)
def main_entry2(json_dict): trace_logger() xml_dom_url = json_dict['dom_tree_url'] shot_img_url = json_dict['screen_shot_url'] logging.info( "--------Auto Login Verification Start----------------------------") logging.info("dom tree file:" + xml_dom_url) logging.info("screen shot file:" + shot_img_url) # ocr识别默认采用英文,分别用英文和中文字符集做2次识别 is_chinese_sim = False img_proc = image_processor.ImageProcessor(is_chinese_sim) # 读入全图和XML数据,XML数据返回全部node节点数据 screen_img_color, height, width = img_proc.read_screen_img(shot_img_url) if screen_img_color is None: return [" Read image file FAILURE!!!"] xml_parser = xp.XmlParser() node_list = xml_parser.get_xml_data(xml_dom_url) if node_list is None: return [" Read xml file FAILURE!!!"] ''' 判断是否是登录框的标准依据2条, 一个是关键字信息,另一个是通过ocr获取控件位置关联信息 比如帐号编辑框, 如返回结果有多个,需要结合关键字和控件位置信息进行筛选。 ocr的识别准确度是关键,登录按钮可能需要裁边后再做ocr 一次遍历获得所有的候选信息,再依据位置关系进行筛选 矩形识别判断登录按钮. 针对ui是webview的特例的情况,直接调用图形检测和ocr, 当前逻辑如果根节点ocr最后没有返回正确信息,则调用图形检测和ocr模块 ''' # 全图矩形检测,找出登录按钮, 再进行ocr检测 ''' ocr检测帐号和密码编辑框, 不用xml的控件类型等不可靠的属性信息 结果将控件属性值和ocr结果进行关联 关键字信息采用NLP语义分析 ''' leaf_node_list = [] for n in node_list: children_node = n.getchildren() if len(children_node) == 0: leaf_node_list.append(n) account_candidate = [] pwd_candidate = [] login_candidate = [] for n in leaf_node_list: coord = n.get('bounds') if coord is None: continue eng_ocr_detect_failure = False for i in range(0, 2): if eng_ocr_detect_failure: is_chinese_sim = True img_proc.set_language(is_chinese_sim) flag_account = img_proc.ocr_detect_match(False, screen_img_color, coord, USER_ACCOUNT_KEY) flag_pwd = img_proc.ocr_detect_match(False, screen_img_color, coord, PWD_KEY) flag_login = img_proc.ocr_detect_match(True, screen_img_color, coord, LOGON_KEY) if not flag_account and not flag_pwd and not flag_login: eng_ocr_detect_failure = True continue else: if flag_account: account_candidate.append(n) elif flag_pwd: pwd_candidate.append(n) elif flag_login: login_candidate.append(n) break print 'Account Candidates:' for n1 in account_candidate: print n1.get('bounds') print 'Password Candidates:' for n2 in pwd_candidate: print n2.get('bounds') print 'Login Candidates:' for n3 in login_candidate: print n3.get('bounds')
def test_process_tile_error_palette_overflow(self): img = Image.open('testdata/palette-overflow-tile.png') processor = image_processor.ImageProcessor() processor.load_image(img) with self.assertRaises(errors.PaletteOverflowError): processor.process_tile(0, 0, 0, 0)
def test_process_tile_error_color_not_allowed(self): img = Image.open('testdata/color-not-allowed-tile.png') processor = image_processor.ImageProcessor() processor.load_image(img) with self.assertRaises(errors.CouldntConvertRGB): processor.process_tile(0, 0, 0, 0)