def __init__(self, user_simulation=None, crop_taskbar=True): if not user_simulation: from murphy.user_simulation.local.local_user import User self._user_automation = User() else: self._user_automation = user_simulation path = os.path.dirname(__file__) + '/img' radio_images = [ Image2(file_name=path + '/radio1.bmp', color_mask=0xed1c2400), Image2(file_name=path + '/radio2.bmp', color_mask=0xed1c2400) ] self._radio = UIElement(radio_images) check_images = [ Image2(file_name=path + '/check1.bmp', color_mask=0xed1c2400), Image2(file_name=path + '/check2.bmp', color_mask=0xed1c2400) ] self._checkboxes = UIElement(check_images) self._crop_taskbar = crop_taskbar self._internal_counter = 0
def __init__(self, user_simulation=None, crop_taskbar=True): if not user_simulation: from murphy.user_simulation.local.local_user import User self._user_automation = User() else: self._user_automation = user_simulation path = os.path.dirname(__file__) + '/img' radio_images = [Image2(file_name=path + '/radio1.bmp', color_mask=0xed1c2400), Image2(file_name=path + '/radio2.bmp', color_mask=0xed1c2400)] self._radio = UIElement(radio_images) check_images = [Image2(file_name=path + '/check1.bmp', color_mask=0xed1c2400), Image2(file_name=path + '/check2.bmp', color_mask=0xed1c2400)] self._checkboxes = UIElement(check_images) self._crop_taskbar = crop_taskbar self._internal_counter = 0
class Scraper(object): ''' UserAutomation must have the extended methods ''' def __init__(self, user_simulation=None, crop_taskbar=True): if not user_simulation: from murphy.user_simulation.local.local_user import User self._user_automation = User() else: self._user_automation = user_simulation path = os.path.dirname(__file__) + '/img' radio_images = [Image2(file_name=path + '/radio1.bmp', color_mask=0xed1c2400), Image2(file_name=path + '/radio2.bmp', color_mask=0xed1c2400)] self._radio = UIElement(radio_images) check_images = [Image2(file_name=path + '/check1.bmp', color_mask=0xed1c2400), Image2(file_name=path + '/check2.bmp', color_mask=0xed1c2400)] self._checkboxes = UIElement(check_images) self._crop_taskbar = crop_taskbar self._internal_counter = 0 def _grab_screen(self): ''' Convenient method for grabbing the screen while cropping the taskbar too ''' screen = self._user_automation.grab_screen() if self._crop_taskbar: return screen.crop((0, 0, screen.size[0], screen.size[1] - 41)) else: return screen.image def _send_tab(self, backwards=False): ''' Shorthand for sending a tab ''' if not backwards: self._user_automation.keyboard.enters("{tab}") else: self._user_automation.keyboard.enters("{+shift}{tab}{-shift}") def _find_elements_by_mousehover(self, tab_screens=None, ui_changes=None): ''' Find elements in the active window using the tab key and mouse hovering techniques. ''' self._user_automation.mouse.move(1, 1) if tab_screens is None: tab_screens = get_tab_changing_areas(self._grab_screen, self._send_tab) if ui_changes is None: ui_changes = get_ui_changes(tab_screens) start_screen = Image2(self._grab_screen()) extracted_areas = [] points = get_corner_points(ui_changes, 3, 3) for point in points: if not point_inside_rects((point[0], point[1]), extracted_areas): self._user_automation.mouse.move(point[0], point[1]) current_screen = Image2(self._grab_screen()) if current_screen != start_screen: changed_area = current_screen.difference(start_screen) if not changed_area in extracted_areas: if point_inside_rects((point[0], point[1]), [changed_area]): extracted_areas.append(changed_area) return extracted_areas def get_elements(self, hints=None, window_origin=None): ''' Returns a list of UI elements that this class can identify from the currently active window. The return of this implementation is an array of dictionaries where each dictionary describes the control type and it's bounding box ''' if hints is None: hints = {} if window_origin is None: window_origin = (0, 0) if hints.get('outfocus method', False): result = self.get_elements_by_app_outfocus(hints, window_origin) if result != False: return result else: LOGGER.warning('Unable to properly use the outfocus hint, ' 'reverting to standard behaviour for this node.') self._user_automation.mouse.move(1, 1) screen = self._grab_screen() screen_height = screen.size[1] screen = Image2(screen) tab_screens = get_tab_changing_areas(self._grab_screen, self._send_tab) if len(tab_screens) == 1: LOGGER.info('Only one image obtained when cycling with tab, adding' ' alt trick.') self._user_automation.keyboard.enters('{alt}') #we're searching for very small clue here... just one _ new_screen = Image2(self._grab_screen(), tolerance=1.0) tab_screens.append(new_screen) candidates = [] processed = [] for i in range(len(tab_screens)-1): coords = tab_screens[i].difference(tab_screens[i+1]) if coords: LOGGER.debug("Changes from %s to %s are: %s" % (i, i + 1, str(coords))) division = automation_helpers.find_inner_bbox(tab_screens[i], tab_screens[i+1], coords) LOGGER.debug("Splitting found %s bboxes (%s)" % (len(division), str(division))) for rect in division: if not rect in candidates: LOGGER.debug("Adding: %s" % str(rect)) candidates.append(rect) #hover, if image differs take diff coords, use biggest # of two use mouse pointer clue for type #ARGGGGGG cursor blinking... deactivated at os level for #now #the focus may be at this point anywhere and on 1st #case is where it is left from tab navigation, for #cases like menu we have to highlight current menu item center = center_of_rect(rect) self._user_automation.mouse.move(center[0], center[1]) self._user_automation.mouse.move(center[0]+1, center[1]+1) cursor = self._user_automation.get_current_cursor() screen1 = Image2(self._grab_screen()) self._user_automation.mouse.move(1, screen_height) screen2 = Image2(self._grab_screen()) diff = screen1.difference(screen2) if diff: #produced a change in UI, must be button LOGGER.debug(("Will compute biggest rect out of " "%s %s") % (str(rect), str(diff))) biggest_rect = get_bounding_box([rect, diff]) if not biggest_rect in processed: processed.append(biggest_rect) LOGGER.debug("Added: %s" % str(biggest_rect)) else: #no UI change, can be a link, text or misfired #recognition, exceptional case is one button alone #in dialog if ((cursor != 'normal' and not rect in processed) or (len(tab_screens) == 2 and not rect in processed)): processed.append(rect) LOGGER.debug("Added: %s" % str(rect)) LOGGER.debug("There are %s elements to consider from tab + hovering" % len(processed)) checkboxes = self._checkboxes.find_all(screen) LOGGER.debug("Found %s checkboxes" % len(checkboxes)) checkboxes = add_text_to_elements(checkboxes, screen) radios = self._radio.find_all(screen) LOGGER.debug("Found %s radios" % len(checkboxes)) radios = add_text_to_elements(radios, screen) checkboxes = merge_overlapping_areas(checkboxes, processed) radios = merge_overlapping_areas(radios, processed) areas = exclude_subareas(processed, checkboxes + radios) points = hints.get('points of interest', []) LOGGER.debug("Points of interest are: %s" % str(points)) for point in points: point_x = window_origin[0] + point[0] point_y = window_origin[1] + point[1] found, bbox = find_bounding_box(screen.image, point_x, point_y) if found: LOGGER.debug("Found %s from point of interest" % str(bbox)) areas.append(bbox) else: LOGGER.debug("Nothing found from point of interest at %s %s" % (point_x, point_y)) result = [] for area in areas: center_x, center_y = center_of_rect(area) self._user_automation.mouse.move(center_x, center_y) self._user_automation.mouse.move(center_x + 1, center_y + 1) cursor = self._user_automation.get_current_cursor() element = {'coords': (area[0], area[1], area[2], area[3]), 'type': cursor} result.append(element) for area in checkboxes: element = {'coords': (area[0], area[1], area[2], area[3]), 'type': 'checkbox'} result.append(element) for area in radios: element = {'coords': (area[0], area[1], area[2], area[3]), 'type': 'radio'} result.append(element) result = remove_containers(result) return result def get_elements_by_hover_points(self, window_origin, hover_points): ''' Simple recognition based on hovering the mouse thru the given list of points, the points should be close to the center of the elements and the elements must highlight on mouseover to work properly Points are relative to the window, not absolute screen coords ''' self._user_automation.mouse.move(1, 1) screen = Image2(self._grab_screen()) result = [] for point in hover_points: hover_x = window_origin[0] + point[0] hover_y = window_origin[1] + point[1] self._user_automation.mouse.move(hover_x, hover_y) self._user_automation.mouse.move(hover_x + 1, hover_y + 1) screen_when_hover = Image2(self._grab_screen()) coords = screen.difference(screen_when_hover) if coords: cursor = self._user_automation.get_current_cursor() element = {'coords': coords, 'type': cursor} result.append(element) return result def get_elements_by_app_outfocus(self, hints=None, window_origin=None): ''' Alternate algorithm, tries to find the elements by setting the focus in the desktop and then back to the application while analyzing the difference on the screen, when the application loses the focus the normal behaviour in windows is that the active control is not shown with the focus This technique has some caveats, for example the border of the application needs to be ignored, also the default button will also be rendered differently giving in some cases 2 areas with changes ''' self._user_automation.mouse.move(1, 1) if hints is None: hints = {} if window_origin is None: window_origin = (0, 0) rects = [] repeated = 0 attempts = 0 while True: screen = Image2(self._grab_screen()) width = screen.size[0] self._user_automation.mouse.click(width / 2, screen.size[1] + 5) time.sleep(0.1) screen2 = Image2(self._grab_screen()) self._user_automation.keyboard.enters("{+alt}{tab}{+shift}" "{tab}{-shift}{-alt}") time.sleep(1) #coords = screen.difference(screen2) coords = automation_helpers.crop_border_differences(screen, screen2) crop1 = screen.image.crop((coords[0], coords[1], coords[2] + 1, coords[3] + 1)) crop2 = screen2.image.crop((coords[0], coords[1], coords[2] + 1, coords[3] + 1)) coords2 = Image2(image=crop1).difference(Image2(image=crop2)) if coords2: coords = (coords[0] + coords2[0], coords[1] + coords2[1], coords[0] + coords2[2], coords[1] + coords2[3]) divisions = automation_helpers.find_inner_bbox(screen, screen2, coords) LOGGER.debug("Splitting found %s bboxes (%s)" % (len(divisions), str(divisions))) repeated_in_divisions = 0 for rect in divisions: print "resulting coords %s" % str(rect) if rect in rects: repeated_in_divisions += 1 else: rects.append(rect) if repeated_in_divisions == len(divisions): repeated += 1 if repeated == 2: break self._user_automation.keyboard.enters("{tab}") time.sleep(0.1) attempts += 1 if attempts - len(rects) > 5: #something is wrong, is possible that is unable to detect #horizontal division at all return False result = [] for rect in rects: self._user_automation.mouse.move(1, 1) time.sleep(0.2) before_cursor = self._user_automation.get_current_cursor() before_screen = Image2(self._grab_screen()) center_x, center_y = center_of_rect(rect) self._user_automation.mouse.move(center_x, center_y) self._user_automation.mouse.move(center_x + 1, center_y + 1) time.sleep(0.2) after_screen = Image2(self._grab_screen()) after_cursor = self._user_automation.get_current_cursor() ui_changes = before_screen != after_screen cursor_changes = before_cursor != after_cursor everything = not hints.get('visual clue needed', True) if cursor_changes or ui_changes or everything: element = {'coords': (rect[0], rect[1], rect[2], rect[3]), 'type': after_cursor} result.append(element) return result
class Scraper(object): ''' UserAutomation must have the extended methods ''' def __init__(self, user_simulation=None, crop_taskbar=True): if not user_simulation: from murphy.user_simulation.local.local_user import User self._user_automation = User() else: self._user_automation = user_simulation path = os.path.dirname(__file__) + '/img' radio_images = [ Image2(file_name=path + '/radio1.bmp', color_mask=0xed1c2400), Image2(file_name=path + '/radio2.bmp', color_mask=0xed1c2400) ] self._radio = UIElement(radio_images) check_images = [ Image2(file_name=path + '/check1.bmp', color_mask=0xed1c2400), Image2(file_name=path + '/check2.bmp', color_mask=0xed1c2400) ] self._checkboxes = UIElement(check_images) self._crop_taskbar = crop_taskbar self._internal_counter = 0 def _grab_screen(self): ''' Convenient method for grabbing the screen while cropping the taskbar too ''' screen = self._user_automation.grab_screen() if self._crop_taskbar: return screen.crop((0, 0, screen.size[0], screen.size[1] - 41)) else: return screen.image def _send_tab(self, backwards=False): ''' Shorthand for sending a tab ''' if not backwards: self._user_automation.keyboard.enters("{tab}") else: self._user_automation.keyboard.enters("{+shift}{tab}{-shift}") def _find_elements_by_mousehover(self, tab_screens=None, ui_changes=None): ''' Find elements in the active window using the tab key and mouse hovering techniques. ''' self._user_automation.mouse.move(1, 1) if tab_screens is None: tab_screens = get_tab_changing_areas(self._grab_screen, self._send_tab) if ui_changes is None: ui_changes = get_ui_changes(tab_screens) start_screen = Image2(self._grab_screen()) extracted_areas = [] points = get_corner_points(ui_changes, 3, 3) for point in points: if not point_inside_rects((point[0], point[1]), extracted_areas): self._user_automation.mouse.move(point[0], point[1]) current_screen = Image2(self._grab_screen()) if current_screen != start_screen: changed_area = current_screen.difference(start_screen) if not changed_area in extracted_areas: if point_inside_rects((point[0], point[1]), [changed_area]): extracted_areas.append(changed_area) return extracted_areas def get_elements(self, hints=None, window_origin=None): ''' Returns a list of UI elements that this class can identify from the currently active window. The return of this implementation is an array of dictionaries where each dictionary describes the control type and it's bounding box ''' if hints is None: hints = {} if window_origin is None: window_origin = (0, 0) if hints.get('outfocus method', False): result = self.get_elements_by_app_outfocus(hints, window_origin) if result != False: return result else: LOGGER.warning( 'Unable to properly use the outfocus hint, ' 'reverting to standard behaviour for this node.') self._user_automation.mouse.move(1, 1) screen = self._grab_screen() screen_height = screen.size[1] screen = Image2(screen) tab_screens = get_tab_changing_areas(self._grab_screen, self._send_tab) if len(tab_screens) == 1: LOGGER.info('Only one image obtained when cycling with tab, adding' ' alt trick.') self._user_automation.keyboard.enters('{alt}') #we're searching for very small clue here... just one _ new_screen = Image2(self._grab_screen(), tolerance=1.0) tab_screens.append(new_screen) candidates = [] processed = [] for i in range(len(tab_screens) - 1): coords = tab_screens[i].difference(tab_screens[i + 1]) if coords: LOGGER.debug("Changes from %s to %s are: %s" % (i, i + 1, str(coords))) division = automation_helpers.find_inner_bbox( tab_screens[i], tab_screens[i + 1], coords) LOGGER.debug("Splitting found %s bboxes (%s)" % (len(division), str(division))) for rect in division: if not rect in candidates: LOGGER.debug("Adding: %s" % str(rect)) candidates.append(rect) #hover, if image differs take diff coords, use biggest # of two use mouse pointer clue for type #ARGGGGGG cursor blinking... deactivated at os level for #now #the focus may be at this point anywhere and on 1st #case is where it is left from tab navigation, for #cases like menu we have to highlight current menu item center = center_of_rect(rect) self._user_automation.mouse.move(center[0], center[1]) self._user_automation.mouse.move( center[0] + 1, center[1] + 1) cursor = self._user_automation.get_current_cursor() screen1 = Image2(self._grab_screen()) self._user_automation.mouse.move(1, screen_height) screen2 = Image2(self._grab_screen()) diff = screen1.difference(screen2) if diff: #produced a change in UI, must be button LOGGER.debug(("Will compute biggest rect out of " "%s %s") % (str(rect), str(diff))) biggest_rect = get_bounding_box([rect, diff]) if not biggest_rect in processed: processed.append(biggest_rect) LOGGER.debug("Added: %s" % str(biggest_rect)) else: #no UI change, can be a link, text or misfired #recognition, exceptional case is one button alone #in dialog if ((cursor != 'normal' and not rect in processed) or (len(tab_screens) == 2 and not rect in processed)): processed.append(rect) LOGGER.debug("Added: %s" % str(rect)) LOGGER.debug("There are %s elements to consider from tab + hovering" % len(processed)) checkboxes = self._checkboxes.find_all(screen) LOGGER.debug("Found %s checkboxes" % len(checkboxes)) checkboxes = add_text_to_elements(checkboxes, screen) radios = self._radio.find_all(screen) LOGGER.debug("Found %s radios" % len(checkboxes)) radios = add_text_to_elements(radios, screen) checkboxes = merge_overlapping_areas(checkboxes, processed) radios = merge_overlapping_areas(radios, processed) areas = exclude_subareas(processed, checkboxes + radios) points = hints.get('points of interest', []) LOGGER.debug("Points of interest are: %s" % str(points)) for point in points: point_x = window_origin[0] + point[0] point_y = window_origin[1] + point[1] found, bbox = find_bounding_box(screen.image, point_x, point_y) if found: LOGGER.debug("Found %s from point of interest" % str(bbox)) areas.append(bbox) else: LOGGER.debug("Nothing found from point of interest at %s %s" % (point_x, point_y)) result = [] for area in areas: center_x, center_y = center_of_rect(area) self._user_automation.mouse.move(center_x, center_y) self._user_automation.mouse.move(center_x + 1, center_y + 1) cursor = self._user_automation.get_current_cursor() element = { 'coords': (area[0], area[1], area[2], area[3]), 'type': cursor } result.append(element) for area in checkboxes: element = { 'coords': (area[0], area[1], area[2], area[3]), 'type': 'checkbox' } result.append(element) for area in radios: element = { 'coords': (area[0], area[1], area[2], area[3]), 'type': 'radio' } result.append(element) result = remove_containers(result) return result def get_elements_by_hover_points(self, window_origin, hover_points): ''' Simple recognition based on hovering the mouse thru the given list of points, the points should be close to the center of the elements and the elements must highlight on mouseover to work properly Points are relative to the window, not absolute screen coords ''' self._user_automation.mouse.move(1, 1) screen = Image2(self._grab_screen()) result = [] for point in hover_points: hover_x = window_origin[0] + point[0] hover_y = window_origin[1] + point[1] self._user_automation.mouse.move(hover_x, hover_y) self._user_automation.mouse.move(hover_x + 1, hover_y + 1) screen_when_hover = Image2(self._grab_screen()) coords = screen.difference(screen_when_hover) if coords: cursor = self._user_automation.get_current_cursor() element = {'coords': coords, 'type': cursor} result.append(element) return result def get_elements_by_app_outfocus(self, hints=None, window_origin=None): ''' Alternate algorithm, tries to find the elements by setting the focus in the desktop and then back to the application while analyzing the difference on the screen, when the application loses the focus the normal behaviour in windows is that the active control is not shown with the focus This technique has some caveats, for example the border of the application needs to be ignored, also the default button will also be rendered differently giving in some cases 2 areas with changes ''' self._user_automation.mouse.move(1, 1) if hints is None: hints = {} if window_origin is None: window_origin = (0, 0) rects = [] repeated = 0 attempts = 0 while True: screen = Image2(self._grab_screen()) width = screen.size[0] self._user_automation.mouse.click(width / 2, screen.size[1] + 5) time.sleep(0.1) screen2 = Image2(self._grab_screen()) self._user_automation.keyboard.enters("{+alt}{tab}{+shift}" "{tab}{-shift}{-alt}") time.sleep(1) #coords = screen.difference(screen2) coords = automation_helpers.crop_border_differences( screen, screen2) crop1 = screen.image.crop( (coords[0], coords[1], coords[2] + 1, coords[3] + 1)) crop2 = screen2.image.crop( (coords[0], coords[1], coords[2] + 1, coords[3] + 1)) coords2 = Image2(image=crop1).difference(Image2(image=crop2)) if coords2: coords = (coords[0] + coords2[0], coords[1] + coords2[1], coords[0] + coords2[2], coords[1] + coords2[3]) divisions = automation_helpers.find_inner_bbox( screen, screen2, coords) LOGGER.debug("Splitting found %s bboxes (%s)" % (len(divisions), str(divisions))) repeated_in_divisions = 0 for rect in divisions: print "resulting coords %s" % str(rect) if rect in rects: repeated_in_divisions += 1 else: rects.append(rect) if repeated_in_divisions == len(divisions): repeated += 1 if repeated == 2: break self._user_automation.keyboard.enters("{tab}") time.sleep(0.1) attempts += 1 if attempts - len(rects) > 5: #something is wrong, is possible that is unable to detect #horizontal division at all return False result = [] for rect in rects: self._user_automation.mouse.move(1, 1) time.sleep(0.2) before_cursor = self._user_automation.get_current_cursor() before_screen = Image2(self._grab_screen()) center_x, center_y = center_of_rect(rect) self._user_automation.mouse.move(center_x, center_y) self._user_automation.mouse.move(center_x + 1, center_y + 1) time.sleep(0.2) after_screen = Image2(self._grab_screen()) after_cursor = self._user_automation.get_current_cursor() ui_changes = before_screen != after_screen cursor_changes = before_cursor != after_cursor everything = not hints.get('visual clue needed', True) if cursor_changes or ui_changes or everything: element = { 'coords': (rect[0], rect[1], rect[2], rect[3]), 'type': after_cursor } result.append(element) return result