def _listener(self, evt): d = self.d # keep screenshot for every call if not evt.is_before and evt.flag == consts.EVENT_SCREENSHOT: self.__last_screenshot = evt.retval if evt.depth > 1: # base depth is 1 return if evt.is_before: # call before function if evt.flag == consts.EVENT_CLICK: self.__last_screenshot = d.screenshot() # Maybe no need to set value here. (x, y) = evt.args cv_img = imutils.from_pillow(self.last_screenshot) cv_img = imutils.mark_point(cv_img, x, y) self.__last_screenshot = imutils.to_pillow(cv_img) self._add_to_gif(self.last_screenshot) return if evt.flag == consts.EVENT_CLICK: screen_before = self._save_screenshot(self.last_screenshot, name_prefix='before') screen_after = self._save_screenshot(name_prefix='after') (x, y) = evt.args self.add_step('click', screen_before=screen_before, screen_after=screen_after, position={'x': x, 'y': y}) elif evt.flag == consts.EVENT_CLICK_IMAGE: kwargs = { 'success': evt.traceback is None, 'traceback': None if evt.traceback is None else evt.traceback.stack, 'description': evt.kwargs.get('desc'), } # do not record if image not found and no trackback if evt.retval is None and evt.traceback is None: return # save before click image kwargs['screen_before'] = self._save_screenshot(self.last_screenshot, name_prefix='before') if evt.traceback is None or not isinstance(evt.traceback.exception, IOError): pattern = d.pattern_open(evt.args[0]) kwargs['target'] = self._save_screenshot(pattern, name_prefix='target') if evt.traceback is None: # update image to add a click mark (x, y) = evt.retval.pos cv_img = imutils.from_pillow(self.last_screenshot) cv_img = imutils.mark_point(cv_img, x, y) self.__last_screenshot = imutils.to_pillow(cv_img) kwargs['screen_before'] = self._save_screenshot(self.last_screenshot, name=kwargs['screen_before']) kwargs['screen_after'] = self._save_screenshot(name_prefix='after') kwargs['confidence'] = evt.retval.confidence kwargs['position'] = {'x': x, 'y': y} self.add_step('click_image', **kwargs)
def _uia_listener(self, evtjson): evt = json2obj(evtjson) if evt.name != '_click': return if evt.is_before: self.d.screenshot() self.__uia_last_position = center(evt.this.bounds) else: (x, y) = self.__uia_last_position # self.last_screenshot cv_last_img = imutils.from_pillow(self.last_screenshot) cv_last_img = imutils.mark_point(cv_last_img, x, y) screen = imutils.to_pillow(cv_last_img) screen_before = self._save_screenshot(screen=screen, append_gif=True) # FIXME: maybe need sleep for a while screen_after = self._save_screenshot(append_gif=True) self.add_step('click', screen_before=screen_before, screen_after=screen_after, position={ 'x': x, 'y': y })
def match_all(self, pattern): """ Test method, not suggested to use """ pattern = self.pattern_open(pattern) search_img = pattern.image screen = self.region_screenshot() screen = imutils.from_pillow(screen) points = ac.find_all_template(screen, search_img, maxcnt=10) return points
def match_all(self, pattern, screen=None, rect=None, offset=None, threshold=None): pattern = self.pattern_open(pattern) search_img = pattern.image pattern_scale = self._cal_scale(pattern) if pattern_scale != 1.0: search_img = cv2.resize(search_img, (0, 0), fx=pattern_scale, fy=pattern_scale, interpolation=cv2.INTER_CUBIC) screen = screen or self.region_screenshot() threshold = threshold or pattern.threshold or self.image_match_threshold # handle offset if percent, ex (0.2, 0.8) dx, dy = offset or pattern.offset or (0, 0) dx = pattern.image.shape[1] * dx # opencv object width dy = pattern.image.shape[0] * dy # opencv object height dx, dy = int(dx * pattern_scale), int(dy * pattern_scale) # image match screen = imutils.from_pillow(screen) # convert to opencv image if rect and isinstance(rect, tuple) and len(rect) == 4: (x0, y0, x1, y1) = [v * pattern_scale for v in rect] (dx, dy) = dx + x0, dy + y0 screen = imutils.crop(screen, x0, y0, x1, y1) # cv2.imwrite('cc.png', screen) ret = None confidence = None position = None position_list = [] ret_all = ac.find_all_template(screen, search_img, maxcnt=10) if not ret_all: return None for ret in ret_all: confidence = ret['confidence'] if confidence > threshold: (x, y) = ret['result'] position = (x + dx, y + dy) position_list.append(position) return position_list
def main(serial=None, host=None, port=None): d = atx.connect(serial, host=host, port=port) while True: pilimg = d.screenshot() cv2img = imutils.from_pillow(pilimg) # cv2img = cv2.imread('tmp.png') # cv2.imwrite('tmp.png', cv2img) cv2img = cv2.resize(cv2img, fx=0.5, fy=0.5, dsize=(0, 0)) pt = choose_point(cv2img) print 'click:', pt if pt: x, y = pt d.click(2*x, 2*y) cv2.waitKey(100) # import time # time.sleep(0.1)
def match_all(self, pattern, threshold=None): pattern = self.pattern_open(pattern) search_img = pattern.image pattern_scale = self._cal_scale(pattern) if pattern_scale != 1.0: search_img = cv2.resize(search_img, (0, 0), fx=pattern_scale, fy=pattern_scale, interpolation=cv2.INTER_CUBIC) threshold = threshold or pattern.threshold or self.image_match_threshold screen = self.region_screenshot() screen = imutils.from_pillow(screen) points = ac.find_all_template(screen, search_img, threshold=threshold, maxcnt=10) return points
def _uia_listener(self, evtjson): evt = json2obj(evtjson) if evt.name != '_click': return if evt.is_before: self.d.screenshot() self.__uia_last_position = center(evt.this.bounds) else: (x, y) = self.__uia_last_position # self.last_screenshot cv_last_img = imutils.from_pillow(self.last_screenshot) cv_last_img = imutils.mark_point(cv_last_img, x, y) screen = imutils.to_pillow(cv_last_img) screen_before = self._save_screenshot(screen=screen, name_prefix='click-before') # FIXME: maybe need sleep for a while screen_after = self._save_screenshot(name_prefix='click-after') self.add_step('click', screen_before=screen_before, screen_after=screen_after, position={'x': x, 'y': y})
def _add_to_gif(self, image): half = 0.5 out = image.resize([int(half*s) for s in image.size]) cvimg = imutils.from_pillow(out) self.__gif.append_data(cvimg[:, :, ::-1])
def match(self, pattern, screen=None, rect=None, offset=None, threshold=None, method=None): """Check if image position in screen Args: - pattern: Image file name or opencv image object - screen (PIL.Image): optional, if not None, screenshot method will be called - threshold (float): it depends on the image match method - method (string): choices on <template | sift> Returns: None or FindPoint, For example: FindPoint(pos=(20, 30), method='tmpl', confidence=0.801, matched=True) Only when confidence > self.image_match_threshold, matched will be True Raises: TypeError: when image_match_method is invalid """ pattern = self.pattern_open(pattern) search_img = pattern.image pattern_scale = self._cal_scale(pattern) if pattern_scale != 1.0: search_img = cv2.resize(search_img, (0, 0), fx=pattern_scale, fy=pattern_scale, interpolation=cv2.INTER_CUBIC) screen = screen or self.region_screenshot() threshold = threshold or pattern.threshold or self.image_match_threshold # handle offset if percent, ex (0.2, 0.8) dx, dy = offset or pattern.offset or (0, 0) dx = pattern.image.shape[1] * dx # opencv object width dy = pattern.image.shape[0] * dy # opencv object height dx, dy = int(dx*pattern_scale), int(dy*pattern_scale) # image match screen = imutils.from_pillow(screen) # convert to opencv image if rect and isinstance(rect, tuple) and len(rect) == 4: (x0, y0, x1, y1) = [int(v*pattern_scale) for v in rect] (dx, dy) = dx+x0, dy+y0 screen = imutils.crop(screen, x0, y0, x1, y1) #cv2.imwrite('cc.png', screen) match_method = method or self.image_match_method ret = None confidence = None matched = False position = None if match_method == consts.IMAGE_MATCH_METHOD_TMPL: #IMG_METHOD_TMPL ret = ac.find_template(screen, search_img) if ret is None: return None confidence = ret['confidence'] if confidence > threshold: matched = True (x, y) = ret['result'] position = (x+dx, y+dy) # fix by offset elif match_method == consts.IMAGE_MATCH_METHOD_SIFT: ret = ac.find_sift(screen, search_img, min_match_count=10) if ret is None: return None confidence = ret['confidence'] matches, total = confidence if 1.0*matches/total > 0.5: # FIXME(ssx): sift just write here matched = True (x, y) = ret['result'] position = (x+dx, y+dy) # fix by offset elif match_method == consts.IMAGE_MATCH_METHOD_AUTO: fp = self._match_auto(screen, search_img, threshold) if fp is None: return None (x, y) = fp.pos position = (x+dx, y+dy) return FindPoint(position, fp.confidence, fp.method, fp.matched) else: raise TypeError("Invalid image match method: %s" %(match_method,)) (x, y) = ret['result'] position = (x+dx, y+dy) # fix by offset if self.bounds: x, y = position position = (x+self.bounds.left, y+self.bounds.top) return FindPoint(position, confidence, match_method, matched=matched)
def _listener(self, evt): d = self.d # keep screenshot for every call if not evt.is_before and evt.flag == consts.EVENT_SCREENSHOT: self.__last_screenshot = evt.retval if evt.depth > 1: # base depth is 1 return if evt.is_before: # call before function if evt.flag == consts.EVENT_CLICK: self.__last_screenshot = d.screenshot( ) # Maybe no need to set value here. (x, y) = evt.args cv_img = imutils.from_pillow(self.last_screenshot) cv_img = imutils.mark_point(cv_img, x, y) self.__last_screenshot = imutils.to_pillow(cv_img) self._add_to_gif(self.last_screenshot) return if evt.flag == consts.EVENT_CLICK: screen_before = self._save_screenshot(self.last_screenshot, name_prefix='before') screen_after = self._save_screenshot(name_prefix='after') (x, y) = evt.args self.add_step('click', screen_before=screen_before, screen_after=screen_after, position={ 'x': x, 'y': y }) elif evt.flag == consts.EVENT_CLICK_IMAGE: kwargs = { 'success': evt.traceback is None, 'traceback': None if evt.traceback is None else evt.traceback.stack, 'description': evt.kwargs.get('desc'), } # do not record if image not found and no trackback if evt.retval is None and evt.traceback is None: return # save before click image kwargs['screen_before'] = self._save_screenshot( self.last_screenshot, name_prefix='before') if evt.traceback is None or not isinstance(evt.traceback.exception, IOError): pattern = d.pattern_open(evt.args[0]) kwargs['target'] = self._save_screenshot(pattern, name_prefix='target') if evt.traceback is None: # update image to add a click mark (x, y) = evt.retval.pos cv_img = imutils.from_pillow(self.last_screenshot) cv_img = imutils.mark_point(cv_img, x, y) self.__last_screenshot = imutils.to_pillow(cv_img) kwargs['screen_before'] = self._save_screenshot( self.last_screenshot, name=kwargs['screen_before']) kwargs['screen_after'] = self._save_screenshot( name_prefix='after') kwargs['confidence'] = evt.retval.confidence kwargs['position'] = {'x': x, 'y': y} self.add_step('click_image', **kwargs)
def match(self, pattern, screen=None, rect=None, offset=None, threshold=None, method=None): pattern = self.pattern_open(pattern) search_img = pattern.image pattern_scale = self._cal_scale(pattern) if pattern_scale != 1.0: search_img = cv2.resize(search_img, (0, 0), fx=pattern_scale, fy=pattern_scale, interpolation=cv2.INTER_CUBIC) screen = screen or self.region_screenshot() threshold = threshold or pattern.threshold or self.image_match_threshold # handle offset if percent, ex (0.2, 0.8) dx, dy = offset or pattern.offset or (0, 0) dx = pattern.image.shape[1] * dx # opencv object width dy = pattern.image.shape[0] * dy # opencv object height dx, dy = int(dx * pattern_scale), int(dy * pattern_scale) # image match screen = imutils.from_pillow(screen) # convert to opencv image if rect and isinstance(rect, tuple) and len(rect) == 4: (x0, y0, x1, y1) = [v * pattern_scale for v in rect] (dx, dy) = dx + x0, dy + y0 screen = imutils.crop(screen, x0, y0, x1, y1) # cv2.imwrite('cc.png', screen) match_method = method or self.image_match_method ret = None confidence = None matched = False if match_method == consts.IMAGE_MATCH_METHOD_TMPL: # IMG_METHOD_TMPL ret = ac.find_template(screen, search_img) if ret is None: return None confidence = ret['confidence'] if confidence > threshold: matched = True (x, y) = ret['result'] position = (x + dx, y + dy) # fix by offset else: ret_all = ac.find_all_template(screen, search_img, maxcnt=10) if not ret_all: return None for ret in ret_all: confidence = ret['confidence'] if confidence > threshold: (x, y) = ret['rectangle'][0] color_screen = screen[y, x, 2] color_img = search_img[0, 0, 2] if -10 < int(color_img) - int(color_screen) < 10: matched = True break (x, y) = ret['result'] position = (x + dx, y + dy) # fix by offset if self.bounds: x, y = position position = (x + self.bounds.left, y + self.bounds.top) return FindPoint(position, confidence, match_method, matched=matched)
def screenshot_cv2(self): img = self._screenshot_minicap() return from_pillow(img)
def _listener(self, evt): d = self.d screen_before = 'images/before_%d.jpg' % time.time() screen_before_abspath = os.path.join(self.save_dir, screen_before) # keep screenshot for every call if not evt.is_before and evt.flag == consts.EVENT_SCREENSHOT: self.__last_screenshot = evt.retval if evt.depth > 1: # base depth is 1 return if evt.is_before: # call before function if evt.flag == consts.EVENT_CLICK: self.__last_screenshot = d.screenshot() # Maybe no need to set value here. (x, y) = evt.args cv_img = imutils.from_pillow(self.last_screenshot) cv_img = imutils.mark_point(cv_img, x, y) self.__last_screenshot = imutils.to_pillow(cv_img) self._add_to_gif(self.last_screenshot) return if evt.flag == consts.EVENT_CLICK: if self.last_screenshot: # just in case self.last_screenshot.save(screen_before_abspath) screen_after = 'images/after_%d.jpg' % time.time() d.screenshot(os.path.join(self.save_dir, screen_after)) (x, y) = evt.args self.add_step('click', screen_before=screen_before, screen_after=screen_after, position={'x': x, 'y': y}) elif evt.flag == consts.EVENT_CLICK_IMAGE: kwargs = { 'success': evt.traceback is None, 'traceback': None if evt.traceback is None else evt.traceback.stack, 'description': evt.kwargs.get('desc'), } # not record if image not found if evt.retval is None and evt.traceback is None: return if self.last_screenshot: self.last_screenshot.save(screen_before_abspath) kwargs['screen_before'] = screen_before if evt.traceback is None or not isinstance(evt.traceback.exception, IOError): target = 'images/target_%d.jpg' % time.time() pattern = d.pattern_open(evt.args[0]) self._save_screenshot(pattern, name=target) kwargs['target'] = target if evt.traceback is None: (x, y) = evt.retval.pos # FIXME(ssx): quick hot fix cv_img = imutils.from_pillow(self.last_screenshot) cv_img = imutils.mark_point(cv_img, x, y) self.__last_screenshot = imutils.to_pillow(cv_img) self.last_screenshot.save(screen_before_abspath) screen_after = 'images/after_%d.jpg' % time.time() d.screenshot(os.path.join(self.save_dir, screen_after)) kwargs['screen_after'] = screen_after kwargs['confidence'] = evt.retval.confidence kwargs['position'] = {'x': x, 'y': y} self.add_step('click_image', **kwargs) elif evt.flag == consts.EVENT_ASSERT_EXISTS: # this is image, not tested pattern = d.pattern_open(evt.args[0]) target = 'images/target_%.2f.jpg' % time.time() self._save_screenshot(pattern, name=target) kwargs = { 'target': target, 'description': evt.kwargs.get('desc'), 'screen': self._save_screenshot(name='images/screen_%.2f.jpg' % time.time()), 'traceback': None if evt.traceback is None else evt.traceback.stack, 'success': evt.traceback is None, } if evt.traceback is None: kwargs['confidence'] = evt.retval.confidence (x, y) = evt.retval.pos kwargs['position'] = {'x': x, 'y': y} self.add_step('assert_exists', **kwargs)
def match(self, pattern, screen=None, threshold=None): """Check if image position in screen Args: - pattern: Image file name or opencv image object - screen: opencv image, optional, if not None, screenshot method will be called Returns: None or FindPoint, For example: FindPoint(pos=(20, 30), method='tmpl', confidence=0.801, matched=True) Only when confidence > self.image_match_threshold, matched will be True Raises: TypeError: when image_match_method is invalid """ pattern = self.pattern_open(pattern) search_img = pattern.image pattern_scale = self._cal_scale(pattern) if pattern_scale != 1.0: search_img = cv2.resize(search_img, (0, 0), fx=pattern_scale, fy=pattern_scale, interpolation=cv2.INTER_CUBIC) screen = screen or self.region_screenshot() threshold = threshold or self.image_match_threshold dx, dy = pattern.offset dx, dy = int(dx * pattern_scale), int(dy * pattern_scale) # image match screen = imutils.from_pillow(screen) # convert to opencv image match_method = self.image_match_method ret = None if match_method == consts.IMAGE_MATCH_METHOD_TMPL: ret = ac.find_template(screen, search_img) elif match_method == consts.IMAGE_MATCH_METHOD_SIFT: ret = ac.find_sift(screen, search_img, min_match_count=10) else: raise TypeError("Invalid image match method: %s" % (match_method, )) if ret is None: return None (x, y) = ret['result'] # fix by offset position = (x + dx, y + dy) if self.bounds: x, y = position position = (x + self.bounds.left, y + self.bounds.top) confidence = ret['confidence'] matched = True if match_method == consts.IMAGE_MATCH_METHOD_TMPL: if confidence < threshold: matched = False elif match_method == consts.IMAGE_MATCH_METHOD_SIFT: matches, total = confidence if 1.0 * matches / total > 0.5: # FIXME(ssx): sift just write here matched = True return FindPoint(position, confidence, match_method, matched=matched)
def match(self, pattern, screen=None, threshold=None): """Check if image position in screen Args: - pattern: Image file name or opencv image object - screen: opencv image, optional, if not None, screenshot method will be called Returns: None or FindPoint, For example: FindPoint(pos=(20, 30), method='tmpl', confidence=0.801, matched=True) Only when confidence > self.image_match_threshold, matched will be True Raises: TypeError: when image_match_method is invalid """ pattern = self.pattern_open(pattern) search_img = pattern.image pattern_scale = self._cal_scale(pattern) if pattern_scale != 1.0: search_img = cv2.resize(search_img, (0, 0), fx=pattern_scale, fy=pattern_scale, interpolation=cv2.INTER_CUBIC) screen = screen or self.region_screenshot() threshold = threshold or self.image_match_threshold dx, dy = pattern.offset dx, dy = int(dx*pattern_scale), int(dy*pattern_scale) # image match screen = imutils.from_pillow(screen) # convert to opencv image match_method = self.image_match_method ret = None if match_method == consts.IMAGE_MATCH_METHOD_TMPL: ret = ac.find_template(screen, search_img) elif match_method == consts.IMAGE_MATCH_METHOD_SIFT: ret = ac.find_sift(screen, search_img, min_match_count=10) else: raise TypeError("Invalid image match method: %s" %(match_method,)) if ret is None: return None (x, y) = ret['result'] # fix by offset position = (x+dx, y+dy) if self.bounds: x, y = position position = (x+self.bounds.left, y+self.bounds.top) confidence = ret['confidence'] matched = True if match_method == consts.IMAGE_MATCH_METHOD_TMPL: if confidence < threshold: matched = False elif match_method == consts.IMAGE_MATCH_METHOD_SIFT: matches, total = confidence if 1.0*matches/total > 0.5: # FIXME(ssx): sift just write here matched = True return FindPoint(position, confidence, match_method, matched=matched)
def __point_saver(self, name='', screen=None, x=0, y=0): screen = imutils.from_pillow(screen) screen = imutils.mark_point(screen, x, y) return self.__image_saver(name=name, image=screen)