Exemple #1
0
    def _find_elements_by_mousehover(self, tab_screens=None, ui_changes=None):
        '''
        Find elements in the active window using the tab key and mouse hovering
        techniques.
        '''
        self._user_automation.mouse.move(1, 1)
        if tab_screens is None:
            tab_screens = get_tab_changing_areas(self._grab_screen,
                                                 self._send_tab)
        if ui_changes is None:
            ui_changes = get_ui_changes(tab_screens)
        start_screen = Image2(self._grab_screen())

        extracted_areas = []

        points = get_corner_points(ui_changes, 3, 3)

        for point in points:
            if not point_inside_rects((point[0], point[1]), extracted_areas):
                self._user_automation.mouse.move(point[0], point[1])
                current_screen = Image2(self._grab_screen())
                if current_screen != start_screen:
                    changed_area = current_screen.difference(start_screen)
                    if not changed_area in extracted_areas:
                        if point_inside_rects((point[0], point[1]),
                                              [changed_area]):
                            extracted_areas.append(changed_area)

        return extracted_areas
Exemple #2
0
def test():
    '''
    Simple unit test
    '''
    img1 = Image2(file_name='self_test/img/active_window.bmp')
    img2 = Image2(file_name='self_test/img/inactive_window.bmp')
    diff = crop_border_differences(img1, img2)
    crop_1 = img1.image.crop((diff[0], diff[1], diff[2] + 1, diff[3] + 1))
    crop_1.save('crop1.bmp')
    crop_2 = img2.image.crop((diff[0], diff[1], diff[2] + 1, diff[3] + 1))
    crop_2.save('crop2.bmp')
Exemple #3
0
def get_verb_image_b64(model, verb):
    if type(verb['how']) is dict and len(verb['how']['snapshots']) > 0:
        how = verb['how']['snapshots'][0]
        image1 = Image2(file_name="%s/%s" % (model.images_dir, how))
    else:
        image1 = Image.open(
            os.path.dirname(__file__) + "/noimageavailable.png")
        image1 = Image2(image=image1)

    image1.image.save("tmp.png")
    return base64.b64encode(read_binary_file("tmp.png"))
Exemple #4
0
    def parametrize(self, ignorable_areas):
        '''
        For the given node we need to mark as dynamic the changing areas
        and ensure recognition of it will go smooth
        FIXME: should check sanity validate on tab images
        '''
        if self.file_name is None:
            raise Exception("Node file name must be set before parametrize!")

        if len(self.reference_images) == 0:
            raise Exception("No reference images available to parametrize")

        dialog = self.reference_images[0].image
        color = (237, 28, 36, 0)
        for edge in self.edges:
            if edge.location:
                elem_rect = expand_rect(edge.location, (1, 1))
                dialog.paste(color, elem_rect)

        LOGGER.debug("Parametrizing with %s ignorable areas" %
                     len(ignorable_areas))
        for area in ignorable_areas:
            elem_rect = expand_rect(area, (1, 1))
            dialog.paste(color, elem_rect)

        dialog = Image2(image=dialog,
                        color_mask=0xed1c2400,
                        tolerance=self.graph.image_tolerance)
        self.screenshots.append(dialog)
Exemple #5
0
def get_node_image_b64(model, view, reference_image=False):
    if not reference_image:
        view_images = view['self'].HERE.get('snapshots', [])
    else:
        view_images = view['self'].HERE.get('reference snapshots', [])
    image1 = Image2(file_name="%s/%s" % (model.images_dir, view_images[0]))
    image1.image.save("tmp.png")
    return base64.b64encode(read_binary_file("tmp.png"))
Exemple #6
0
    def perform(self, world):
        LOGGER.info("Performing %s" % str(self))
        screen = world.machine.automation.grab_screen()
        world.last_screen = screen.copy()

        if self.method:
            self.method(world)  # pylint: disable=E1102
        else:
            #found = self.tail.ui_element.find_in(Image2(image=screen))
            found = self.tail.find_in(Image2(image=screen))
            if found:
                #links usually need to be clicked close to the left
                #top coord (if left aligned) but some buttons needs
                #the center as they shade in the borders
                if self.ui_type == 'link':
                    x_coord = found[0] + self.location[0] + 4
                    y_coord = found[1] + self.location[1] + 4
                else:
                    center_x, center_y = center_of_rect(self.location)
                    x_coord = found[0] + center_x
                    y_coord = found[1] + center_y
                #Some quite seldom and random issues when clicking!?
                world.machine.automation.mouse.move(x_coord - 1, y_coord - 1)
                world.machine.automation.mouse.click(x_coord, y_coord)
                if self.ui_type == 'desktop icon':
                    world.machine.automation.mouse.click()
                elif self.ui_type == 'text' and 'test value' in self.custom:
                    world.machine.automation.keyboard.enters(
                        self.custom['test value'][0]['value'])
            else:
                screen.save("Current screen.bmp")
                raise Exception("Parent node (%s) not found in screen!" %
                                self.tail.name)

        #FIXME: for recording purposes this is needed, for playback purposes
        #the destination should be checked by waiting...
        world.machine.automation.mouse.move(1, 1)
        world.machine.wait_idling()
        if self.head:
            destination_reached = False
            # FIXME: this is not entirely correct, some long transitions may
            # happen for example during installations or parts of the
            # applications were it looks like stuck for a while
            for i in range(3):
                destination_reached = self.head.is_in(world)
                if destination_reached:
                    break
                LOGGER.info("Expected destination not in screen yet, "
                            "waiting (%s)" % i)
                world.machine.wait_idling()

            if destination_reached == False:
                LOGGER.info("Expected destination not in screen, could be a "
                            "type of return to caller scenario")
            elif self.head.enter_hook:
                self.head.enter_hook(world)
Exemple #7
0
    def import_from_file(self, file_name, images_dir):
        '''
        Imports this node from a file, images_dir does not replace the node
        directory or images_dir
        '''
        with open(file_name, "rb") as the_file:
            json_node = json.load(the_file)
        self.name = json_node['HERE']['desc']
        use_name = file_name
        if use_name.find('.') != -1:
            use_name = use_name.split('.')[0]
        self.file_name = use_name.split("/")[-1].split("\\")[-1]
        for reference in json_node['HERE']['reference screenshots']:
            an_image = Image2(file_name="%s/%s" %
                              (images_dir, reference['file']),
                              color_mask=reference.get('mask'))
            an_image.file_name = None
            self.reference_images.append(an_image)
        for screenshot in json_node['HERE']['screenshots']:
            an_image = Image2(file_name="%s/%s" %
                              (images_dir, screenshot['file']),
                              color_mask=screenshot.get('mask'))
            an_image.file_name = None
            self.screenshots.append(an_image)
        self.title = json_node['HERE'].get('title', '')

        keys = list(json_node.keys())
        keys.remove('HERE')
        keys.sort()
        for i in range(len(keys)):
            elem = json_node[keys[i]]
            edge = self.create_edge(elem['desc'])
            #FIXME: implement edge.head set
            edge.location = elem['how']['visual']
            edge.ui_type = elem['how']['type']
            for screenshot in elem['how']['screenshots']:
                an_image = Image2(file_name="%s/%s" %
                                  (images_dir, screenshot['file']),
                                  color_mask=screenshot.get('mask'))
                an_image.file_name = None
                edge.screenshots.append(an_image)
Exemple #8
0
def get_tab_changing_areas(grab_screen, send_tab):
    '''
    Returns an array of images produced by cycling with tab over a window
    Put the mouse out of the way before calling this

    cases:
        cycles thru controls, looping ok
        there's only 1 control with focus already
        there's only 1 control, focus not set (2 screenshots)
        one of the controls will 'capture' and wont let the focus go out
    '''
    last_screen = Image2(grab_screen())
    images = [last_screen]
    tail = 0
    repeated = 0
    while True:
        last_screen = send_tab_expecting_change(send_tab, grab_screen,
                                                last_screen)
        images.append(last_screen)
        #consecutive match?
        if images[tail] == images[-1]:
            tail += 1
            repeated += 1
            if repeated > 2:
                break
        else:
            tail = 0
            repeated = 0
            #search if it starts repeating somewhere not in 1st screenshot
            for position in range(len(images) - 1):
                if images[position] == images[-1]:
                    tail = position + 1
                    repeated = 1
                    break

        if len(images) > 30:
            LOGGER.warning("Stopped tab cycling after 30 different "
                           "screenshots...")
            break

    for _ in range(repeated):
        images.pop()

    #remove repeated images in the tail if there are some
    while len(images) > 1 and images[-1] == images[-2]:
        images.pop()

    for _ in range(repeated - 1):
        send_tab(backwards=True)

    LOGGER.debug("Got %s tab images" % len(images))
    return images
Exemple #9
0
    def __init__(self, user_simulation=None, crop_taskbar=True):
        if not user_simulation:
            from murphy.user_simulation.local.local_user import User
            self._user_automation = User()
        else:
            self._user_automation = user_simulation

        path = os.path.dirname(__file__) + '/img'
        radio_images = [
            Image2(file_name=path + '/radio1.bmp', color_mask=0xed1c2400),
            Image2(file_name=path + '/radio2.bmp', color_mask=0xed1c2400)
        ]
        self._radio = UIElement(radio_images)

        check_images = [
            Image2(file_name=path + '/check1.bmp', color_mask=0xed1c2400),
            Image2(file_name=path + '/check2.bmp', color_mask=0xed1c2400)
        ]
        self._checkboxes = UIElement(check_images)
        self._crop_taskbar = crop_taskbar

        self._internal_counter = 0
Exemple #10
0
    def compare_view_images(self, model, view, reference_model,
                            reference_view):
        #we compare parametrized images, otherwise dates and times will not match
        view_images = view['self'].HERE.get('snapshots', [])
        reference_view_images = reference_view['self'].HERE.get(
            'snapshots', [])

        if len(view_images) != len(reference_view_images):
            return 'Node "%s" has %s snapshots but the reference node has %s' % (
                view['self'].HERE['desc'], len(view_images),
                len(reference_view_images))

        result = ''
        for index in range(len(view_images)):
            image1 = Image2(file_name="%s/%s" %
                            (model.images_dir, view_images[index]))
            image2 = Image2(
                file_name="%s/%s" %
                (reference_model.images_dir, reference_view_images[index]),
                tolerance=0.9999)
            if image1 != image2:
                result += 'Node "%s", image "%s" differs from reference node "%s"<br>\n' % (
                    view['self'].HERE['desc'], view_images[index],
                    reference_view_images[index])
                image1.image.save("tmp.png")
                encoded1 = base64.b64encode(read_binary_file("tmp.png"))
                image2.image.save("tmp.png")
                encoded2 = base64.b64encode(read_binary_file("tmp.png"))
                id1 = str(uuid.uuid1())
                id2 = str(uuid.uuid1())
                result += "<table>\n\t<tr>\n\t\t<td><img id='%s' title='model' src='data:image/png;base64,%s'></td>\n" % (
                    id1, encoded1)
                result += "\t\t<td><input style='font-family:\"Courier New\", Courier, monospace;' type=button id='button-%s' value='<-    New      \n   Reference ->' onclick='swap(\"%s\", \"%s\")'></td>\n" % (
                    id1, id1, id2)
                result += "\t\t<td><img id='%s' title='reference' src='data:image/png;base64,%s'></td>\n\t</tr>\n</table><br>\n" % (
                    id2, encoded2)

        return result
Exemple #11
0
    def get_elements_by_hover_points(self, window_origin, hover_points):
        '''
        Simple recognition based on hovering the mouse thru the given
        list of points, the points should be close to the center of the
        elements and the elements must highlight on mouseover to work properly
        Points are relative to the window, not absolute screen coords
        '''
        self._user_automation.mouse.move(1, 1)
        screen = Image2(self._grab_screen())
        result = []
        for point in hover_points:
            hover_x = window_origin[0] + point[0]
            hover_y = window_origin[1] + point[1]
            self._user_automation.mouse.move(hover_x, hover_y)
            self._user_automation.mouse.move(hover_x + 1, hover_y + 1)
            screen_when_hover = Image2(self._grab_screen())
            coords = screen.difference(screen_when_hover)
            if coords:
                cursor = self._user_automation.get_current_cursor()
                element = {'coords': coords, 'type': cursor}
                result.append(element)

        return result
Exemple #12
0
 def wait(self, element, max_wait=60, retry_every=0.3):
     '''
     Waits for the given UI element to be visible, returns the bounding
     coordinates of the elements if found, raises ValueError if not.
     '''
     started_at = datetime.datetime.now()
     while True:
         screen = Image2(image=self.grab_screen())
         result = element.find_in(screen)
         if result:
             return result
         now = datetime.datetime.now()
         if (now - started_at).seconds > max_wait:
             raise ValueError("Element not found in screen")
         time.sleep(retry_every)
Exemple #13
0
def send_tab_expecting_change(send_tab, grab_screen, current_screen):
    '''
    Sends a tab key to change control focus, wait few seconds for any ui
    change to synchronize it, will give up after 5 seconds (because sometimes
    focus is not properly handled in applications)
    Returns an Image2 of the new screen
    '''
    send_tab()
    for _ in range(5):
        new_screen = Image2(grab_screen())
        if new_screen != current_screen:
            break
        time.sleep(1)

    return new_screen
Exemple #14
0
    def is_in(self, world):
        '''
        Checks if this node is recognizable at this moment in the screen,
        returns either True or False
        '''
        LOGGER.debug("Checking if i'm in %s" % self.name)

        if ((len(self.edges) == 1 and self.edges[0].ui_type == 'desktop icon')
                or self.desktop_icon == True):
            rect = world.machine.helper.get_active_window_rect()
            if rect and rect[0] > 0 and rect[1] > 0:
                LOGGER.debug("Not there, desktop icons are detectable when "
                             "there are no active windows")
                return False

        screen = Image2(image=world.machine.automation.grab_screen())
        found = self.find_in(screen)

        if found:
            LOGGER.debug("Found")
            return True
        else:
            LOGGER.debug("Not found")
            return False
Exemple #15
0
    def compare(self):
        views = self.model.new_worker().get_views()
        reference_views = self.reference_model.new_worker().get_views()

        matching_views = []
        candidate_for_moved = []
        moved_views = []
        candidate_for_new = []
        changed_views = []
        new_views = []
        reference_views_used = []

        #dictionary of moved nodes, key is model view nam, value is reference view name
        reference_translation = {}
        reference_translation[''] = ''
        result = ''
        ordered_views = self._get_ordered_views(self.model)
        for view_name in ordered_views:
            view = views[view_name]
            if view_name in reference_views:
                reference_view = reference_views[view_name]
                comparison = self.compare_view_images(self.model, view,
                                                      self.reference_model,
                                                      reference_view)
                if comparison == '':
                    matching_views.append(view)
                    reference_views_used.append(reference_view)
                    reference_translation[view_name] = view_name
                else:
                    candidate_for_moved.append(view)
            else:
                candidate_for_moved.append(view)

        for view in candidate_for_moved:
            candidate = self.find_node_by_image(self.model, view,
                                                self.reference_model,
                                                reference_views)
            if candidate and not candidate in reference_views_used:
                moved_views.append({'view': view, 'reference view': candidate})
                reference_views_used.append(candidate)
                reference_translation[
                    view['self'].HERE['desc']] = candidate['self'].HERE['desc']
            else:
                candidate_for_new.append(view)

        for view in candidate_for_new:
            view_name = view['self'].HERE['desc']
            if view_name in reference_views and not reference_views[
                    view_name] in reference_views_used:
                #can this still be wrong?
                changed_views.append(view)
                reference_views_used.append(reference_views[view_name])
                reference_translation[view_name] = view_name
            else:
                new_views.append(view)

        #print "Translation table: %s" % str(reference_translation)

        for view in matching_views:
            reference_view = reference_views[view['self'].HERE['desc']]
            result += self.compare_edges(view, reference_view,
                                         reference_translation)

        for movement in moved_views:
            result += "Node '%s' is in reference model as %s<br>\n" % (
                movement['view']['self'].HERE['desc'],
                movement['reference view']['self'].HERE['desc'])
            result += self.compare_edges(movement['view'],
                                         movement['reference view'],
                                         reference_translation)

        for view in changed_views:
            result += self.compare_view_images(
                self.model, view, self.reference_model,
                reference_views[view['self'].HERE['desc']])

        for view in new_views:
            result += "Node '%s' is new, does not seems to exist in the reference model<br>\n" % (
                view['self'].HERE['desc'])
            view_images = view['self'].HERE.get('snapshots', [])
            if len(view_images) > 0:
                image = Image2(file_name="%s/%s" %
                               (self.model.images_dir, view_images[0]))
                image.image.save("tmp.png")
                encoded = base64.b64encode(read_binary_file("tmp.png"))
                result += "<img src='data:image/png;base64,%s' title='model'><br>\n" % encoded

        for view in reference_views.values():
            if not view in reference_views_used:
                result += "Reference node '%s' does not seem to exists in the model (was removed?)<br>\n" % (
                    view['self'].HERE['desc'])
                view_images = view['self'].HERE.get('snapshots', [])
                if len(view_images) > 0:
                    image = Image2(
                        file_name="%s/%s" %
                        (self.reference_model.images_dir, view_images[0]))
                    image.image.save("tmp.png")
                    encoded = base64.b64encode(read_binary_file("tmp.png"))
                    result += "<img src='data:image/png;base64,%s' title='model'><br>\n" % encoded

        return result
Exemple #16
0
'''
Exemple #17
0
'''
Exemple #18
0
def scrap_state(node, world, scraper_hints, node_hints):
    '''
    Scraps the ui into the given node for the given world state
    '''
    node_index = node.graph.nodes.index(node)
    node.name = 'Node %s' % node_index
    node.file_name = 'node_%s' % str(node_index).zfill(2)

    screen_image = world.machine.automation.grab_screen()
    screen = Image2(image=screen_image)

    LOGGER.debug("Node hints for scrapper, node %s are %s" %
                 (node_index, str(scraper_hints)))
    is_desktop = False
    if scraper_hints.get('windowless', False):
        #rect is difference between world.last_screen and screen
        no_taskbar = world.last_screen
        no_taskbar = world.last_screen.crop(
            (0, 0, no_taskbar.size[0], no_taskbar.size[1] - TASKBAR_HEIGHT))
        window_rect = Image2(image=no_taskbar).difference(screen)
        window_rect = automation_helpers.refine_window_rect(
            screen_image, window_rect)
    else:
        is_desktop, window_rect = solve_active_window_rect(world, screen_image)

    window_image = Image2(image=screen_image.crop(window_rect))
    node.reference_images.append(window_image)

    node.last_location = window_rect
    #Don't scrap the desktop!
    print "Screen size %s, window_rect %s" % (str(
        screen_image.size), str(window_rect))

    if is_desktop == "Screen":
        LOGGER.info(
            "Disabling outfocus method as window rect is the whole screen")
        scraper_hints['outfocus method'] = False
        is_desktop = False

    if not is_desktop:
        scraper = Scraper(world.machine.automation, crop_taskbar=True)
        if 'defined by hover points' in scraper_hints:
            LOGGER.debug("Node defined by hover points")
            elements = scraper.get_elements_by_hover_points(
                (window_rect[0], window_rect[1]),
                scraper_hints['defined by hover points'])
        else:
            elements = scraper.get_elements(scraper_hints,
                                            (window_rect[0], window_rect[1]))
    else:
        #FIXME: could search for new icons...
        elements = []

    LOGGER.debug("Found elements: %s" % str(elements))

    LOGGER.debug("Node hints for node %s are %s" %
                 (node_index, str(node_hints)))
    ignorable_areas = node_hints.get('ignorable', [])
    for element in elements:
        element_rect = move_rect(element['coords'],
                                 (window_rect[0], window_rect[1]))

        if not is_rect_inside(element_rect, ignorable_areas):
            edge_name = 'Element %s' % len(node.edges)
            edge = node.create_edge(edge_name)
            edge.location = element_rect
            edge.absolute_location = element['coords']
            edge.ui_type = element['type']
            elem_rect = expand_rect(element['coords'], (1, 1))
            screenshot = Image2(image=screen.image.crop(elem_rect))
            edge.screenshots.append(screenshot)
            if edge.ui_type == 'text':
                edge.head = node
        else:
            LOGGER.debug(
                ("Ignoring element at %s as it is inside ignorable" + " area")
                % str(element_rect))
Exemple #19
0
    def get_elements(self, hints=None, window_origin=None):
        '''
        Returns a list of UI elements that this class can identify from the
        currently active window.
        The return of this implementation is an array of dictionaries where
        each dictionary describes the control type and it's bounding box
        '''
        if hints is None:
            hints = {}
        if window_origin is None:
            window_origin = (0, 0)

        if hints.get('outfocus method', False):
            result = self.get_elements_by_app_outfocus(hints, window_origin)
            if result != False:
                return result
            else:
                LOGGER.warning(
                    'Unable to properly use the outfocus hint, '
                    'reverting to standard behaviour for this node.')

        self._user_automation.mouse.move(1, 1)

        screen = self._grab_screen()
        screen_height = screen.size[1]
        screen = Image2(screen)

        tab_screens = get_tab_changing_areas(self._grab_screen, self._send_tab)

        if len(tab_screens) == 1:
            LOGGER.info('Only one image obtained when cycling with tab, adding'
                        ' alt trick.')
            self._user_automation.keyboard.enters('{alt}')
            #we're searching for very small clue here... just one _
            new_screen = Image2(self._grab_screen(), tolerance=1.0)
            tab_screens.append(new_screen)

        candidates = []
        processed = []
        for i in range(len(tab_screens) - 1):
            coords = tab_screens[i].difference(tab_screens[i + 1])
            if coords:
                LOGGER.debug("Changes from %s to %s are: %s" %
                             (i, i + 1, str(coords)))
                division = automation_helpers.find_inner_bbox(
                    tab_screens[i], tab_screens[i + 1], coords)

                LOGGER.debug("Splitting found %s bboxes (%s)" %
                             (len(division), str(division)))
                for rect in division:
                    if not rect in candidates:
                        LOGGER.debug("Adding: %s" % str(rect))
                        candidates.append(rect)
                        #hover, if image differs take diff coords, use biggest
                        # of two use mouse pointer clue for type
                        #ARGGGGGG cursor blinking... deactivated at os level for
                        #now

                        #the focus may be at this point anywhere and on 1st
                        #case is where it is left from tab navigation, for
                        #cases like menu we have to highlight current menu item
                        center = center_of_rect(rect)
                        self._user_automation.mouse.move(center[0], center[1])
                        self._user_automation.mouse.move(
                            center[0] + 1, center[1] + 1)
                        cursor = self._user_automation.get_current_cursor()
                        screen1 = Image2(self._grab_screen())
                        self._user_automation.mouse.move(1, screen_height)
                        screen2 = Image2(self._grab_screen())
                        diff = screen1.difference(screen2)

                        if diff:  #produced a change in UI, must be button
                            LOGGER.debug(("Will compute biggest rect out of "
                                          "%s %s") % (str(rect), str(diff)))
                            biggest_rect = get_bounding_box([rect, diff])
                            if not biggest_rect in processed:
                                processed.append(biggest_rect)
                                LOGGER.debug("Added: %s" % str(biggest_rect))
                        else:
                            #no UI change, can be a link, text or misfired
                            #recognition, exceptional case is one button alone
                            #in dialog
                            if ((cursor != 'normal' and not rect in processed)
                                    or (len(tab_screens) == 2
                                        and not rect in processed)):
                                processed.append(rect)
                                LOGGER.debug("Added: %s" % str(rect))

        LOGGER.debug("There are %s elements to consider from tab + hovering" %
                     len(processed))

        checkboxes = self._checkboxes.find_all(screen)
        LOGGER.debug("Found %s checkboxes" % len(checkboxes))
        checkboxes = add_text_to_elements(checkboxes, screen)

        radios = self._radio.find_all(screen)
        LOGGER.debug("Found %s radios" % len(checkboxes))
        radios = add_text_to_elements(radios, screen)

        checkboxes = merge_overlapping_areas(checkboxes, processed)
        radios = merge_overlapping_areas(radios, processed)

        areas = exclude_subareas(processed, checkboxes + radios)

        points = hints.get('points of interest', [])
        LOGGER.debug("Points of interest are: %s" % str(points))
        for point in points:
            point_x = window_origin[0] + point[0]
            point_y = window_origin[1] + point[1]
            found, bbox = find_bounding_box(screen.image, point_x, point_y)
            if found:
                LOGGER.debug("Found %s from point of interest" % str(bbox))
                areas.append(bbox)
            else:
                LOGGER.debug("Nothing found from point of interest at %s %s" %
                             (point_x, point_y))

        result = []
        for area in areas:
            center_x, center_y = center_of_rect(area)
            self._user_automation.mouse.move(center_x, center_y)
            self._user_automation.mouse.move(center_x + 1, center_y + 1)
            cursor = self._user_automation.get_current_cursor()
            element = {
                'coords': (area[0], area[1], area[2], area[3]),
                'type': cursor
            }
            result.append(element)

        for area in checkboxes:
            element = {
                'coords': (area[0], area[1], area[2], area[3]),
                'type': 'checkbox'
            }
            result.append(element)

        for area in radios:
            element = {
                'coords': (area[0], area[1], area[2], area[3]),
                'type': 'radio'
            }
            result.append(element)

        result = remove_containers(result)
        return result
Exemple #20
0
    def get_elements_by_app_outfocus(self, hints=None, window_origin=None):
        '''
        Alternate algorithm, tries to find the elements by setting the focus
        in the desktop and then back to the application while analyzing the
        difference on the screen, when the application loses the focus the
        normal behaviour in windows is that the active control is not shown
        with the focus
        This technique has some caveats, for example the border of the
        application needs to be ignored, also the default button will also
        be rendered differently giving in some cases 2 areas with changes
        '''
        self._user_automation.mouse.move(1, 1)
        if hints is None:
            hints = {}
        if window_origin is None:
            window_origin = (0, 0)
        rects = []
        repeated = 0
        attempts = 0
        while True:
            screen = Image2(self._grab_screen())
            width = screen.size[0]
            self._user_automation.mouse.click(width / 2, screen.size[1] + 5)
            time.sleep(0.1)
            screen2 = Image2(self._grab_screen())
            self._user_automation.keyboard.enters("{+alt}{tab}{+shift}"
                                                  "{tab}{-shift}{-alt}")
            time.sleep(1)

            #coords = screen.difference(screen2)
            coords = automation_helpers.crop_border_differences(
                screen, screen2)
            crop1 = screen.image.crop(
                (coords[0], coords[1], coords[2] + 1, coords[3] + 1))
            crop2 = screen2.image.crop(
                (coords[0], coords[1], coords[2] + 1, coords[3] + 1))
            coords2 = Image2(image=crop1).difference(Image2(image=crop2))
            if coords2:
                coords = (coords[0] + coords2[0], coords[1] + coords2[1],
                          coords[0] + coords2[2], coords[1] + coords2[3])
            divisions = automation_helpers.find_inner_bbox(
                screen, screen2, coords)
            LOGGER.debug("Splitting found %s bboxes (%s)" %
                         (len(divisions), str(divisions)))
            repeated_in_divisions = 0
            for rect in divisions:
                print "resulting coords %s" % str(rect)
                if rect in rects:
                    repeated_in_divisions += 1
                else:
                    rects.append(rect)
            if repeated_in_divisions == len(divisions):
                repeated += 1
                if repeated == 2:
                    break
            self._user_automation.keyboard.enters("{tab}")
            time.sleep(0.1)
            attempts += 1
            if attempts - len(rects) > 5:
                #something is wrong, is possible that is unable to detect
                #horizontal division at all
                return False

        result = []
        for rect in rects:
            self._user_automation.mouse.move(1, 1)
            time.sleep(0.2)
            before_cursor = self._user_automation.get_current_cursor()
            before_screen = Image2(self._grab_screen())
            center_x, center_y = center_of_rect(rect)
            self._user_automation.mouse.move(center_x, center_y)
            self._user_automation.mouse.move(center_x + 1, center_y + 1)
            time.sleep(0.2)
            after_screen = Image2(self._grab_screen())
            after_cursor = self._user_automation.get_current_cursor()

            ui_changes = before_screen != after_screen
            cursor_changes = before_cursor != after_cursor
            everything = not hints.get('visual clue needed', True)
            if cursor_changes or ui_changes or everything:
                element = {
                    'coords': (rect[0], rect[1], rect[2], rect[3]),
                    'type': after_cursor
                }
                result.append(element)

        return result
Exemple #21
0
def find_inner_bbox(image1, image2, bbox):
    '''
    Tries to separate the changes inside bbox, returns an array with either
    1 or 2 bboxes
    '''
    result = []
    division = find_vertical_division(image1, image2, bbox)
    if division:
        print "found vertical division as %s" % (str(division))
        a_box = [bbox[0], bbox[1], bbox[2]+1, bbox[3]+1]
        img1 = Image.new("RGB", image1.size)
        img1.paste(image1.image.crop(a_box), (bbox[0], bbox[1]))
        img2 = Image.new("RGB", image2.size)
        img2.paste(image2.image.crop(a_box), (bbox[0], bbox[1]))
        
        img3 = img1.copy()
        img3.paste((0, 0, 0), [division[1], 0, img1.size[0], img1.size[1]])
        img4 = img2.copy()
        img4.paste((0, 0, 0), [division[1], 0, img2.size[0], img2.size[1]])
        bbox1 = Image2(image=img3).difference(Image2(image=img4))
        if bbox1:
            result.append(bbox1)
            
        img3 = img1.copy()
        img3.paste((0, 0, 0), [0, 0, division[0] + 1, img1.size[1]])
        img4 = img2.copy()
        img4.paste((0, 0, 0), [0, 0, division[0] + 1, img2.size[1]])        
        bbox2 = Image2(image=img3).difference(Image2(image=img4))
        if bbox2:
            result.append(bbox2)
        
        if len(result) == 0:
            result.append(bbox)
            
        return result
        
    division = find_horizontal_division(image1, image2, bbox)
    if division:
        print "found horizontal division"
        a_box = [bbox[0], bbox[1], bbox[2]+1, bbox[3]+1]
        img1 = Image.new("RGB", image1.size)
        img1.paste(image1.image.crop(a_box), (bbox[0], bbox[1]))
        img2 = Image.new("RGB", image2.size)
        img2.paste(image2.image.crop(a_box), (bbox[0], bbox[1]))
        
        img3 = img1.copy()
        img3.paste((0, 0, 0), [0, division[1], img1.size[0], img1.size[1]])
        img4 = img2.copy()
        img4.paste((0, 0, 0), [0, division[1], img2.size[0], img2.size[1]])
        bbox1 = Image2(image=img3).difference(Image2(image=img4))
        if bbox1:
            result.append(bbox1)
            
        img3 = img1.copy()
        img3.paste((0, 0, 0), [0, 0, img1.size[0], division[0] + 1])
        img4 = img2.copy()
        img4.paste((0, 0, 0), [0, 0, img2.size[0], division[0] + 1])        
        bbox2 = Image2(image=img3).difference(Image2(image=img4))
        if bbox2:
            result.append(bbox2)
        
        if len(result) == 0:
            result.append(bbox)
            
        return result

    return [bbox]