def open_textfinder_view(self):
     self.xml_name = None
     self.restore_view()
     screen_manager = ScreenManager()
     self.hide()
     self.sleep_before_action()
     time.sleep(0.600)
     img_color = screen_manager.grab_desktop(screen_manager.get_color_mat)
     img_color = cv2.cvtColor(img_color, cv2.COLOR_BGR2RGB)
     image = QImage(img_color, img_color.shape[1], img_color.shape[0], img_color.strides[0], QImage.Format_RGB888)
     self.alyvix_text_finder_controller = AlyvixTextFinderView(self)
     self.alyvix_text_finder_controller.set_bg_pixmap(image)
     self.alyvix_text_finder_controller.showFullScreen()
 def open_textfinder_view(self):
     self.xml_name = None
     self.restore_view()
     screen_manager = ScreenManager()
     self.hide()
     self.sleep_before_action()
     time.sleep(0.600)
     img_color = screen_manager.grab_desktop(screen_manager.get_color_mat)
     img_color = cv2.cvtColor(img_color, cv2.COLOR_BGR2RGB)
     image = QImage(img_color, img_color.shape[1], img_color.shape[0],
                    img_color.strides[0], QImage.Format_RGB888)
     self.alyvix_text_finder_controller = AlyvixTextFinderView(self)
     self.alyvix_text_finder_controller.set_bg_pixmap(image)
     self.alyvix_text_finder_controller.showFullScreen()
Exemple #3
0
    def __init__(self, name=None):
        """
        init the class

        :type name: string
        :param name: the object name
        """

        self._source_image_color = None
        self._source_image_gray = None
        self._objects_found = []
        self._log_manager = None

        #variables for the perfdata
        self._cacheManager = None
        self._min_different_contours = 15
        self._flag_thread_started = False
        self._flag_check_before_exit = False
        self._flag_checked_before_exit = False
        self._flag_thread_have_to_exit = False
        self._screen_capture = None
        #end perfdata section

        self._time_checked_before_exit_start = 0

        self._objects_finders_caller = []
        self._name_with_caller = None

        self._name = name
        self._log_manager = LogManager()
        self._log_manager.set_object_name(self._name)
        self._screen_capture = ScreenManager()
        self._cacheManager = CacheManager()
        self._configReader = ConfigReader()
    def edit_item(self):
        self.action = "edit"

        if self.listWidgetAlyObj.currentRow() < 0:
            return

        #print self.listWidgetAlyObj.currentRow()

        selected_item_data = self.listWidgetAlyObj.currentItem().data(
            Qt.UserRole).toString()
        self.xml_name = str(selected_item_data)
        #print selected_item_data

        if self.xml_name.endswith("_ObjectFinder.xml"):
            self.hide()
            time.sleep(0.600)
            self.alyvix_finder_controller = AlyvixObjectFinderView(self)
            self.alyvix_finder_controller.show()
            return

        if self.xml_name.endswith("_CustomCode.xml"):
            self.hide()
            time.sleep(0.600)
            self.alyvix_finder_controller = AlyvixCustomCodeView(self)
            self.alyvix_finder_controller.show()
            return

        screen_manager = ScreenManager()
        self.hide()
        time.sleep(0.600)
        img_color = cv2.imread(str(self.xml_name).replace(
            "xml",
            "png"))  #screen_manager.grab_desktop(screen_manager.get_color_mat)
        #print "imgggg", self.path + os.sep + self.xml_name
        image = QImage(self.path + os.sep +
                       self.xml_name.replace("xml", "png"))
        #print self.path, self.robot_file_name, self.xml_name

        if self.xml_name.endswith("_RectFinder.xml"):
            self.alyvix_finder_controller = AlyvixRectFinderView(self)
        elif self.xml_name.endswith("_ImageFinder.xml"):
            self.alyvix_finder_controller = AlyvixImageFinderView(self)
        elif self.xml_name.endswith("_TextFinder.xml"):
            self.alyvix_finder_controller = AlyvixTextFinderView(self)

        #self.alyvix_rect_finder_controller.set_path(self.full_file_name)
        self.alyvix_finder_controller.set_bg_pixmap(image)
        self.alyvix_finder_controller.showFullScreen()
Exemple #5
0
    def find(self):
        """
        find the main text and sub texts into the source image.

        :rtype: list[[MatchResult, list[MatchResult]]]
        :return: a list that contains x, y, height, width of rectangle(s) found
        """

        try:
            time_before_find = time.time()
            #print "into find"
            self._timedout_main_components = []
            self._timedout_sub_components = []

            self._objects_found = []

            source_img_auto_set = False

            res = self._info_manager.get_info("RESOLUTION")

            if self._source_image_color is None:
                screen_capture = ScreenManager()
                src_img_color = screen_capture.grab_desktop(
                    screen_capture.get_color_mat)
                self.set_source_image_color(src_img_color)
                src_img_gray = cv2.cvtColor(src_img_color, cv2.COLOR_BGR2GRAY)
                self.set_source_image_gray(src_img_gray)
                source_img_auto_set = True

            self.__find_log_folder = datetime.datetime.now().strftime(
                "%H_%M_%S") + "_" + "searching"

            offset_x = 0
            offset_y = 0

            main_text = self._main_component[0]
            roi = self._main_component[1]

            if roi is not None:

                y1 = roi.y
                y2 = y1 + roi.height
                x1 = roi.x
                x2 = x1 + roi.width

                if roi.unlimited_up is True:
                    y1 = 0
                    y2 = roi.y + roi.height

                if roi.unlimited_down is True:
                    y2 = res[1]

                if roi.unlimited_left is True:
                    x1 = 0
                    x2 = roi.x + roi.width

                if roi.unlimited_right is True:
                    x2 = res[0]

                offset_x = x1
                offset_y = y1

                source_img_height, source_img_width, channels = self._source_image_color.shape

                if y1 < 0:
                    y1 = 0
                elif y1 > source_img_height:
                    y1 = source_img_height

                if y2 < 0:
                    y2 = 0
                elif y2 > source_img_height:
                    y2 = source_img_height

                if x1 < 0:
                    x1 = 0
                elif x1 > source_img_width:
                    x1 = source_img_width

                if x2 < 0:
                    x2 = 0
                elif x2 > source_img_width:
                    x2 = source_img_width

                #print x1,x2,y1,y2
                source_image = self._source_image_color[y1:y2, x1:x2]
            else:
                source_image = self._source_image_color

            if self._log_manager.is_log_enable() is True:
                self._log_manager.save_image(self.__find_log_folder,
                                             "source_img.png", source_image)

            source_image = cv2.cvtColor(source_image, cv2.COLOR_BGR2RGB)
            source_image_pil = Image.fromarray(source_image)
            width = source_image_pil.size[0]
            height = source_image_pil.size[1]
            source_image = source_image_pil.resize((width * 3, height * 3),
                                                   Image.BICUBIC)
            """
            if self._log_manager.is_log_enable() is True:
                self._log_manager.save_image(self.__find_log_folder, "resized.png", source_image)
            """

            objects_found = []
            analyzed_points = []
            self._objects_found = []

            cnt = 0

            self.api = tesseract.TessBaseAPI()
            self.api.Init(
                get_python_lib() + os.sep + "alyvix" + os.sep + "extra" +
                os.sep + "Tesseract-OCR" + os.sep, main_text.lang,
                tesseract.OEM_DEFAULT)
            self.api.SetPageSegMode(tesseract.PSM_AUTO)
            #.setPageSegMode(TessBaseAPI.pageSegMode.PSM_SINGLE_CHAR);
            self.api.SetVariable("tessedit_char_whitelist",
                                 main_text.whitelist)

            phrase = ""
            concatWord = False
            wordLine = None

            timex = time.time()

            color_img = cv.CreateImageHeader(source_image.size,
                                             cv.IPL_DEPTH_8U, 3)

            cv.SetData(color_img, source_image.tobytes())

            grey_img = cv.CreateImage(cv.GetSize(color_img), 8, 1)

            cv.CvtColor(color_img, grey_img, cv.CV_RGB2GRAY)

            if self._log_manager.is_log_enable() is True:
                self._log_manager.save_image(self.__find_log_folder,
                                             "resized.png", grey_img)

            #cv.SaveImage('c:\\alan\\image2.jpg', grey_img)
            tesseract.SetCvImage(grey_img, self.api)

            #write debug image
            #LogManager.WriteCvImage(datetime.now().strftime("%H_%M_%S_%f") + '_Grey.png', grey_img)

            #text=Ocr.Api.GetUTF8Text()

            if self._log_manager.is_log_enable() is True:
                textfile_log_name = datetime.datetime.now().strftime(
                    "%H_%M_%S.%f")[:-3] + "_resized.txt"

            text = self.api.GetHOCRText(0)

            #print text
            root = ET.fromstring(text)

            line_sep = ""
            break_loop = False
            self.__phrase_backup = ""
            for span in root.iter('span'):
                object_found = []
                object_found.append([])
                object_found.append([])
                """
                if self._flag_thread_have_to_exit is True:
                    self._flag_thread_have_to_exit = False
                    self._flag_thread_started = False
                    self._source_image_color = None
                    self._source_image_gray = None
                    return []
                """

                try:
                    #print span.attrib,span.text
                    #print span.get('title')

                    title = span.get('title')
                    title = title.replace(';', '')
                    coordinates = title.split(' ')

                    if span.get('class') == 'ocr_line':
                        line = span.get('id')
                        line = line.replace('line_', '')
                        lineNr = line
                        line_sep = "\n"  #"RRRRRRR" #os.linesep

                    if not span.find('strong') == None:
                        span.text = span.find('strong').text

                    if not span.find('em') == None:
                        span.text = span.find('em').text

                    if not span.find('strong/em') == None:
                        span.text = span.find('strong/em').text

                    if span.text == None:
                        continue

                    phrase = phrase + " " + span.text  #+ line_sep
                    self.__phrase_backup = self.__phrase_backup + " " + line_sep + span.text

                    if line_sep != "":
                        line_sep = ""

                    #print phrase
                    #print main_text.text

                    result = re.match(
                        ".*" + unicode(main_text.text, "UTF-8") + ".*", phrase,
                        re.DOTALL | re.IGNORECASE)

                    #print span.text," >> line:",lineNr,"coordinates:",int(coordinates[1])/3,int(coordinates[2])/3,int(coordinates[3])/3,int(coordinates[4])/3
                    #print "text found:",phrase

                    #print "tempo ocr", time.time() - timex
                    if result != None:

                        x = offset_x + (int(coordinates[1]) / 3)
                        y = offset_y + (int(coordinates[2]) / 3)
                        #print "offset x, x", offset_x, (int(coordinates[1])/3)
                        #print "offset y, y", offset_y, (int(coordinates[2])/3)

                        w = (int(coordinates[3]) / 3) - (int(coordinates[1]) /
                                                         3)
                        h = (int(coordinates[4]) / 3) - (int(coordinates[2]) /
                                                         3)

                        self._timedout_main_components.append(
                            MatchResult((x, y, w, h)))

                        try:
                            #print "text from Ocr engine:",phrase
                            #print "ocr time:",time.time() - timex,"sec."
                            #phrase = phrase.replace(main_text.text,"")
                            insensitive_phrase = re.compile(
                                main_text.text, re.IGNORECASE)
                            #insensitive_phrase = re.compile(re.escape(main_text.text), re.IGNORECASE)
                            phrase = insensitive_phrase.sub('', phrase)
                            #print phrase

                        except Exception, err:
                            pass  #print err

                        sub_texts_len = len(self._sub_components)

                        if sub_texts_len == 0:
                            #good_points.append((x, y, w, h))

                            main_object_result = MatchResult((x, y, w, h))
                            object_found[0] = main_object_result

                            object_found[1] = None
                            objects_found.append(object_found)
                        else:

                            total_sub_template_found = 0

                            sub_objects_found = []
                            timed_out_objects = []
                            for sub_text in self._sub_components:
                                #print "entering in sub text"
                                """
                                if self._flag_thread_have_to_exit is True:
                                    self._flag_thread_have_to_exit = False
                                    self._flag_thread_started = False
                                    self._source_image_color = None
                                    self._source_image_gray = None
                                    return []
                                """

                                sub_template_coordinates = self._find_sub_text(
                                    (x, y), sub_text)

                                if sub_template_coordinates is not None:
                                    sub_objects_found.append(
                                        sub_template_coordinates)
                                    total_sub_template_found = total_sub_template_found + 1
                                    timed_out_objects.append(
                                        (sub_template_coordinates,
                                         sub_text[1]))
                                else:
                                    timed_out_objects.append(
                                        (None, sub_text[1]))

                                if total_sub_template_found == sub_texts_len:
                                    #good_points.append((x, y, w, h))

                                    main_object_result = MatchResult(
                                        (x, y, w, h))
                                    object_found[0] = main_object_result

                                    object_found[1] = sub_objects_found

                                    objects_found.append(object_found)
                                    #print "len obj found:", len(objects_found)
                                    #print "appended"
                                    break_loop = True

                            self._timedout_sub_components.append(
                                timed_out_objects)
                            #if break_loop is True:
                            #    break

                        # write debug message
                        #LogManager.WriteMessage("debug", "text from Ocr engine: " + phrase)
                        #return int(coordinates[1]),int(coordinates[2]),int(coordinates[3]),int(coordinates[4])

                except Exception, err:
                    pass  #print err
                    #LogManager.IsInError = True
                    #LogManager.WriteMessage("ERROR", "an exception has occurred: " + str(err) + " on line " + str(sys.exc_traceback.tb_lineno))

            if self._log_manager.is_log_enable() is True:
                self._log_manager.save_info_file(self.__find_log_folder,
                                                 textfile_log_name,
                                                 self.__phrase_backup)

            #print len(objects_found)
            if len(objects_found) > 0:
                self._objects_found = copy.deepcopy(objects_found)
                if self._is_object_finder is True:
                    self._objects_found_of_sub_object_finder.extend(
                        copy.deepcopy(objects_found))
                    #gray_source_img = cv2.cvtColor(self._source_image, cv2.COLOR_BGR2GRAY)

                    #if self._info_manager.get_info('LOG OBJ IS FOUND') is False:
                    if self._info_manager.get_info(
                            'LOG OBJ FINDER TYPE') is None:
                        self._info_manager.set_info('LOG OBJ FINDER TYPE', 2)

                    # if wait_disappear is False:
                    self._log_manager.save_objects_found(
                        self._name,
                        self.get_source_image_gray(),
                        self._objects_found,
                        [x[1] for x in self._sub_components],
                        self.main_xy_coordinates,
                        self.sub_xy_coordinates,
                        finder_type=2)

                self._cacheManager.SetLastObjFoundFullImg(
                    self._source_image_gray)

            if source_img_auto_set is True and self._scraper_enable is False:
                self._source_image_color = None
                self._source_image_gray = None
                source_img_auto_set = False
            """
            if self._flag_check_before_exit is True:
                self._flag_checked_before_exit = True
            """

            #time.sleep(40)

            self._flag_thread_started = False

            if self._calc_last_finder_time is True:
                self._last_finder_time = time.time() - time_before_find
                self._calc_last_finder_time = False

            return self._objects_found
Exemple #6
0
    def find(self):
        """
        find the main rectangle and sub rectangles into the source image.

        :rtype: list[[MatchResult, list[MatchResult]]]
        :return: a list that contains x, y, height, width of rectangle(s) found
        """
        try:
            self._timedout_main_components = []
            self._timedout_sub_components = []

            self._main_extra_img_log = None
            self._sub_extra_imgages_log = []

            self._objects_found = []

            source_img_auto_set = False

            if self._source_image_color is None:
                screen_capture = ScreenManager()
                src_img_color = screen_capture.grab_desktop(
                    screen_capture.get_color_mat)
                self.set_source_image_color(src_img_color)
                src_img_gray = cv2.cvtColor(src_img_color, cv2.COLOR_BGR2GRAY)
                self.set_source_image_gray(src_img_gray)
                source_img_auto_set = True

            self.__find_log_folder = datetime.datetime.now().strftime(
                "%H_%M_%S") + "_" + "searching"

            offset_x = 0
            offset_y = 0

            main_rect = self._main_component[0]
            roi = self._main_component[1]

            if roi is not None:

                y1 = roi.y
                y2 = y1 + roi.height
                x1 = roi.x
                x2 = x1 + roi.width

                offset_x = x1
                offset_y = y1

                source_img_height, source_img_width, channels = self._source_image_color.shape

                if y1 < 0:
                    y1 = 0
                elif y1 > source_img_height:
                    y1 = source_img_height

                if y2 < 0:
                    y2 = 0
                elif y2 > source_img_height:
                    y2 = source_img_height

                if x1 < 0:
                    x1 = 0
                elif x1 > source_img_width:
                    x1 = source_img_width

                if x2 < 0:
                    x2 = 0
                elif x2 > source_img_width:
                    x2 = source_img_width

                #print x1,x2,y1,y2
                source_image = self._source_image_color[y1:y2, x1:x2]
            else:
                source_image = self._source_image_color

            objects_found = []
            analyzed_points = []
            self._objects_found = []

            blue, green, red = cv2.split(source_image)
            # Run canny edge detection on each channel
            blue_edges = self.__median_canny(blue, 0.2, 0.3)
            green_edges = self.__median_canny(green, 0.2, 0.3)
            red_edges = self.__median_canny(red, 0.2, 0.3)

            # Join edges back into image
            edges = blue_edges | green_edges | red_edges
            if self._log_manager.is_log_enable() is True:
                self._log_manager.save_image(self.__find_log_folder,
                                             "source_img.png", source_image)
                self._log_manager.save_image(self.__find_log_folder,
                                             "edges.png", edges)

            #self._rect_extra_timedout_image = edges.copy()
            if roi is not None:
                self._main_extra_img_log = (edges.copy(), (x1, y1, x2, y2))
            else:
                self._main_extra_img_log = (edges.copy(), None)

            #edges = self.__median_canny(self._source_image, 0.2, 0.3)

            #self._timed_out_images.append(source_image.copy())

            # find the contours
            contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)
            #self._log_manager.save_image(self.__find_log_folder, "canny.png", edges)

            if main_rect.width != 0 and main_rect.height != 0:
                main_rect.min_width = main_rect.max_width = main_rect.width
                main_rect.min_height = main_rect.max_height = main_rect.height

            if main_rect.width_tolerance != 0 and main_rect.width != 0 and main_rect.height != 0:
                main_rect.min_width = main_rect.min_width - main_rect.width_tolerance
                main_rect.max_width = main_rect.max_width + main_rect.width_tolerance

            if main_rect.height_tolerance != 0 and main_rect.width != 0 and main_rect.height != 0:
                main_rect.min_height = main_rect.min_height - main_rect.height_tolerance
                main_rect.max_height = main_rect.max_height + main_rect.height_tolerance

            cnt = 0

            #print main_rect.min_width, main_rect.max_width, main_rect.min_height, main_rect.max_height
            # For each contour, find the bounding rectangle and draw it
            for c in reversed(contours):

                object_found = []
                object_found.append([])
                object_found.append([])
                self.__timed_out_sub_extra_images = []

                x, y, w, h = cv2.boundingRect(c)
                x = offset_x + x
                y = offset_y + y

                #print x, y, w, h

                if (w >= main_rect.min_width and w <= main_rect.max_width
                        and h >= main_rect.min_height
                        and h <= main_rect.max_height):

                    is_already_found = False

                    for point_already_analyzed in analyzed_points:

                        tolerance_region_w = ((
                            (main_rect.min_width + main_rect.max_width) / 2) /
                                              2) + (20 * self._scaling_factor)
                        tolerance_region_h = (
                            ((main_rect.min_height + main_rect.max_height) / 2)
                            / 2) + (20 * self._scaling_factor)

                        #tolerance_region = 20 * self._scaling_factor

                        if (x >= point_already_analyzed[0] - tolerance_region_w and
                                    x <= point_already_analyzed[0] + tolerance_region_w) and\
                                (y >= point_already_analyzed[1] - tolerance_region_h and
                                    y <= point_already_analyzed[1] + tolerance_region_h):

                            is_already_found = True

                    if is_already_found == False:

                        analyzed_points.append((x, y, w, h))

                        self._timedout_main_components.append(
                            MatchResult((x, y, w, h)))

                        #self._log_manager.set_main_object_points((x, y, w, h))
                        if self._log_manager.is_log_enable() is True:
                            img_copy = source_image.copy()
                            cv2.rectangle(img_copy,
                                          ((x - offset_x), (y - offset_y)),
                                          ((x - offset_x) + w,
                                           (y - offset_y) + h), (0, 0, 255), 2)
                            self._log_manager.save_image(
                                self.__find_log_folder, "object_found.png",
                                img_copy)

                        sub_templates_len = len(self._sub_components)

                        if sub_templates_len == 0:
                            #good_points.append((x, y, w, h))

                            main_object_result = MatchResult((x, y, w, h))
                            object_found[0] = main_object_result

                            object_found[1] = None
                            objects_found.append(object_found)
                        else:

                            total_sub_template_found = 0

                            sub_objects_found = []
                            timed_out_objects = []

                            for sub_rect in self._sub_components:

                                sub_template_coordinates = self._find_sub_rect(
                                    (x, y), sub_rect)

                                if sub_template_coordinates is not None:
                                    sub_objects_found.append(
                                        sub_template_coordinates)
                                    total_sub_template_found = total_sub_template_found + 1
                                    timed_out_objects.append(
                                        (sub_template_coordinates,
                                         sub_rect[1]))
                                else:
                                    timed_out_objects.append(
                                        (None, sub_rect[1]))

                                if total_sub_template_found == sub_templates_len:

                                    #good_points.append((x, y, w, h))

                                    main_object_result = MatchResult(
                                        (x, y, w, h))
                                    object_found[0] = main_object_result

                                    object_found[1] = sub_objects_found

                                    objects_found.append(object_found)

                            self._timedout_sub_components.append(
                                timed_out_objects)
                            self._sub_extra_imgages_log.append(
                                self.__timed_out_sub_extra_images)

                        #self._log_manager.save_object_image("img__result" + str(cnt) + ".png")
                cnt = cnt + 1

            if len(objects_found) > 0:
                self._objects_found = copy.deepcopy(objects_found)
                if self._is_object_finder is True:
                    self._objects_found_of_sub_object_finder.extend(
                        copy.deepcopy(objects_found))
                #gray_source_img = cv2.cvtColor(self._source_image, cv2.COLOR_BGR2GRAY)
                self._cacheManager.SetLastObjFoundFullImg(
                    self._source_image_gray)

            #time.sleep(40)

            if source_img_auto_set is True:
                self._source_image_color = None
                self._source_image_gray = None
                source_img_auto_set = False

            self._flag_thread_started = False

            return self._objects_found

        except Exception, err:
            self._log_manager.save_exception(
                "ERROR", "an exception has occurred: " + str(err) +
                " on line " + str(sys.exc_traceback.tb_lineno))
            self._flag_thread_started = False
            return None
Exemple #7
0
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
# Developer: Alan Pipitone (Violet Atom) - http://www.violetatom.com/
# Supporter: Wuerth Phoenix - http://www.wuerth-phoenix.com/
# Official website: http://www.alyvix.com/

from alyvix.tools.configreader import ConfigReader
from alyvix.tools.screen import ScreenManager
from alyvix.bridge.robot import RobotManager
import time

config_reader = ConfigReader()
robot_manager = RobotManager()
screen_manager = ScreenManager()

_log_folder = None

_dict = {}


class InfoManager():
    def update(self):
        """
        updates all info.
        """

        self.set_info('START TIME', int(time.time()))

        float_scaling_factor = screen_manager.get_scaling_factor()
Exemple #8
0
    def __init__(self, name=None):
        """
        init the class

        :type name: string
        :param name: the object name
        """
        self._main_component = None
        self._sub_components = []

        self._timedout_main_components = []
        self._timedout_sub_components = []

        self._main_extra_img_log = None
        self._sub_extra_imgages_log = []

        self._rect_extra_timedout_image = None

        self._robot_manager = RobotManager()

        self._rf_is_set = self._robot_manager.context_is_set()

        self._source_image_color = None
        self._source_image_gray = None
        self._objects_found = []
        self._log_manager = None

        self._timed_out_images = []

        self._find_thread_images = []
        self._find_thread_images_disappear = []
        #self._find_thread_images_copy = []
        self._last_thread_image = None
        self._last_thread_image_copy = None
        self._heartbeat_images = []
        self._heartbeat_images_copy = []

        #variables for the perfdata
        self._cacheManager = None
        self._min_different_contours = 15
        self._flag_thread_started = False
        self._flag_check_before_exit = False
        self._flag_checked_before_exit = False
        self._flag_thread_have_to_exit = False
        self._screen_capture = None
        #end perfdata section

        self._info_manager = InfoManager()
        self._scaling_factor = self._info_manager.get_info(
            "SCALING FACTOR INT")

        self._time_checked_before_exit_start = 0

        self._objects_finders_caller = []
        self._name_with_caller = None

        self._name = name
        self._log_manager = LogManager()
        self._log_manager.set_object_name(self._name)
        self._screen_capture = ScreenManager()
        self._cacheManager = CacheManager()
        self._configReader = ConfigReader()

        self.__enable_debug_calcperf = False

        #self._timer_for_disappear = 0

        self._object_is_found_flag = False

        self._is_object_finder = False
        self._objects_found_of_sub_object_finder = []
Exemple #9
0
class BaseFinder(object):
    def __init__(self, name=None):
        """
        init the class

        :type name: string
        :param name: the object name
        """
        self._main_component = None
        self._sub_components = []

        self._timedout_main_components = []
        self._timedout_sub_components = []

        self._main_extra_img_log = None
        self._sub_extra_imgages_log = []

        self._rect_extra_timedout_image = None

        self._robot_manager = RobotManager()

        self._rf_is_set = self._robot_manager.context_is_set()

        self._source_image_color = None
        self._source_image_gray = None
        self._objects_found = []
        self._log_manager = None

        self._timed_out_images = []

        self._find_thread_images = []
        self._find_thread_images_disappear = []
        #self._find_thread_images_copy = []
        self._last_thread_image = None
        self._last_thread_image_copy = None
        self._heartbeat_images = []
        self._heartbeat_images_copy = []

        #variables for the perfdata
        self._cacheManager = None
        self._min_different_contours = 15
        self._flag_thread_started = False
        self._flag_check_before_exit = False
        self._flag_checked_before_exit = False
        self._flag_thread_have_to_exit = False
        self._screen_capture = None
        #end perfdata section

        self._info_manager = InfoManager()
        self._scaling_factor = self._info_manager.get_info(
            "SCALING FACTOR INT")

        self._time_checked_before_exit_start = 0

        self._objects_finders_caller = []
        self._name_with_caller = None

        self._name = name
        self._log_manager = LogManager()
        self._log_manager.set_object_name(self._name)
        self._screen_capture = ScreenManager()
        self._cacheManager = CacheManager()
        self._configReader = ConfigReader()

        self.__enable_debug_calcperf = False

        #self._timer_for_disappear = 0

        self._object_is_found_flag = False

        self._is_object_finder = False
        self._objects_found_of_sub_object_finder = []

    def _compress_image(self, img):
        return cv2.imencode('.png', img)[1]

    def _uncompress_image(self, compressed_img):
        return cv2.imdecode(compressed_img, cv2.CV_LOAD_IMAGE_GRAYSCALE)

    def set_name(self, name):
        """
        set the name of the object.

        :type name: string
        :param name: the name of the object
        """
        self._name = name
        self._log_manager.set_object_name(self._name)

    def get_name(self):
        """
        get the name of the object.

        :rtype: string
        :return: the name of the object
        """
        return self._name

    def set_name_with_caller(self):

        tmp_name = self._name

        for object_caller in self._objects_finders_caller:
            tmp_name = object_caller + os.sep + tmp_name

        self._name_with_caller = tmp_name
        self._log_manager.set_object_name(self._name_with_caller)

    def set_source_image_color(self, image_data):
        """
        set the color image on which the find method will search the object.

        :type image_data: numpy.ndarray
        :param image_data: the color image
        """
        self._source_image_color = image_data.copy()
        img_gray = cv2.cvtColor(image_data, cv2.COLOR_BGR2GRAY)
        self.set_source_image_gray(img_gray)
        #self._log_manager.set_image(self._source_image)

    def set_source_image_gray(self, image_data):
        """
        set the gray image on which the find method will search the object.

        :type image_data: numpy.ndarray
        :param image_data: the gray image
        """
        self._source_image_gray = image_data.copy()

    def get_source_image_color(self):
        """
        get the color image on which the find method will search the object.

        :rtype: numpy.ndarray
        :return: the source color image
        """
        return self._source_image_color

    def get_source_image_gray(self):
        """
        get the gray image on which the find method will search the object.

        :rtype: numpy.ndarray
        :return: the source gray image
        """
        return self._source_image_gray

    def find(self):
        raise NotImplementedError

    def wait(self, timeout=-1, wait_disappear=False):
        """
        wait until the object appears on the screen.
        if timeout value is -1 (default value) then timeout value will be read from config file.
        if configuration file doesn't exist, then timeout value will be 15 sec.

        :param timeout: timeout in seconds
        :type timeout: int
        """

        #cv2.imwrite()

        #self._robot_manager.write_log_message("wait method: " + self.get_name(), "ERROR", False)
        #self._robot_manager.write_log_message("wait method: " + self.get_name(), "ERROR", False)
        #sss = self._robot_manager.get_suite_name()
        #ttt = self._robot_manager.get_testcase_name()

        #self._robot_manager.method1().method2()

        timeout_value = 15

        if timeout == -1:
            timeout_value = self._configReader.get_finder_wait_timeout()
        else:
            timeout_value = timeout

        self._objects_found = []

        self._heartbeat_images = []
        self._find_thread_images = []

        self._flag_thread_started = False
        self._flag_thread_have_to_exit = False

        self._heartbeat_images_copy = []

        time_elapsed = 0.0
        #time_of_last_change = 0.0
        self._time_checked_before_exit_start = None

        self._object_is_found_flag = False

        #screenCapture = ScreenManager()
        thread_interval = self._info_manager.get_info(
            "FINDER THREAD INTERVAL"
        )  #self._configReader.get_finder_thread_interval()
        #thread_interval = 0.5
        check_diff_interval = self._info_manager.get_info(
            "CHECK DIFF INTERVAL")

        img1 = self._cacheManager.GetLastObjFoundFullImg()

        if img1 is None:
            img1 = self._screen_capture.grab_desktop(
                self._screen_capture.get_gray_mat)

        thread_t0 = time.time()
        time_before_loop = time.time()
        while True:
            #txx = time.time()
            try:

                if len(self._objects_found
                       ) > 0 and self._flag_thread_started is False:
                    #do analysis cjecl_time()
                    """
                    print "len main:", len(self._objects_found)

                    print "main x, y, w, h:", self._objects_found[0][0].x, self._objects_found[0][0].y, self._objects_found[0][0].width, self._objects_found[0][0].height


                    if self._objects_found[0][1] is not None:
                        print "len secodn:", len(self._objects_found[0][1])
                        for sub_obj in self._objects_found[0][1]:
                            print "sub x, y, w, h:", sub_obj.x, sub_obj.y, sub_obj.width, sub_obj.height
                    """

                    self._last_thread_image = self._uncompress_image(
                        self._find_thread_images[-1][1])

                    #time.sleep(3600)

                    if wait_disappear is False:
                        self._log_manager.save_objects_found(
                            self._name, self.get_source_image_gray(),
                            self._objects_found,
                            [x[1] for x in self._sub_components])

                    if wait_disappear is True:
                        self._heartbeat_images_copy = copy.deepcopy(
                            self._heartbeat_images)
                        self._last_thread_image_copy = copy.deepcopy(
                            self._last_thread_image)
                        #self._timer_for_disappear = self._heartbeat_images[-1][0]
                        #self._find_thread_images_copy = copy.deepcopy(self._find_thread_images)
                        return -2
                    else:
                        self._object_is_found_flag = True
                        self._last_thread_image_copy = copy.deepcopy(
                            self._last_thread_image)
                        return self._get_performance()

                if time_elapsed > timeout_value and self._flag_thread_started is False:
                    self._last_thread_image = self._uncompress_image(
                        self._find_thread_images[-1][1])
                    #from alyvix.finders.cv.rectfinder import RectFinder
                    #from alyvix.finders.cv.imagefinder import ImageFinder
                    #from alyvix.finders.cv.textfinder import TextFinder
                    from alyvix.finders.cv.objectfinder import ObjectFinder

                    #if not isinstance(self, ObjectFinder):
                    self._log_manager.save_timedout_objects(
                        self._name + "_timedout", self.get_source_image_gray(),
                        self._timedout_main_components,
                        self._timedout_sub_components,
                        self._main_extra_img_log, self._sub_extra_imgages_log)
                    #else:
                    if isinstance(self, ObjectFinder):

                        #self._log_manager.save_timedout_objects(self._name + "_timedout", self._last_thread_image, self._main_component[0]._timedout_main_components, self._main_component[0]._timedout_sub_components, self._main_component[0]._main_extra_img_log, self._main_component[0]._sub_extra_imgages_log, True, self._main_component[0]._name)

                        if len(self._main_component[0]._objects_found) == 0:
                            self._log_manager.save_timedout_objects(
                                self._name + "_timedout",
                                self._last_thread_image, self.
                                _main_component[0]._timedout_main_components,
                                self._main_component[0].
                                _timedout_sub_components,
                                self._main_component[0]._main_extra_img_log,
                                self._main_component[0]._sub_extra_imgages_log,
                                True, self._main_component[0]._name)

                        for t_sub in self._sub_components:
                            self._log_manager.save_timedout_objects(
                                self._name + "_timedout",
                                self._last_thread_image,
                                t_sub[0]._timedout_main_components,
                                t_sub[0]._timedout_sub_components,
                                t_sub[0]._main_extra_img_log,
                                t_sub[0]._sub_extra_imgages_log, True,
                                t_sub[0]._name)

                    return -1

                t0 = time.time()

                #cv2.imwrite('img2.png', img2)

                #if time.time() - thread_t0 >= thread_interval:
                if time_elapsed < timeout_value and time.time(
                ) - thread_t0 >= thread_interval and self._flag_thread_started is False:
                    thread_t0 = time.time()

                    self._flag_thread_started = True
                    """
                    folder = 'c:\\log\\buffer_images'
                    for the_file in os.listdir(folder):
                        file_path = os.path.join(folder, the_file)
                        try:
                            if os.path.isfile(file_path):
                                os.unlink(file_path)
                        except Exception, e:
                            print e
                    """

                    #for i in range(len(self._find_thread_images)):
                    #cv2.imwrite("c:\\log\\buffer_images\\_old_" + str(self._find_thread_images[i][0]) + ".png", self._uncompress_image(self._find_thread_images[i][1]))

                    self._find_thread_images = copy.deepcopy(
                        self._heartbeat_images)
                    self._heartbeat_images = []

                    self.set_source_image_color(img2_color)
                    self.set_source_image_gray(img2_gray)
                    if self._log_manager.is_log_enable() is True:
                        self._log_manager.delete_all_items(
                            keep_items=20, exclude_item="difference")
                    worker = Thread(target=self.find)
                    worker.setDaemon(True)
                    worker.start()

                img2_color = self._screen_capture.grab_desktop(
                    self._screen_capture.get_color_mat)
                img2_gray = cv2.cvtColor(img2_color, cv2.COLOR_BGR2GRAY)
                self._heartbeat_images.append(
                    (time_elapsed, self._compress_image(img2_gray)))

                t1 = time.time() - t0
                time_sleep = check_diff_interval - t1
                if time_sleep < 0:
                    time_sleep = 0

                time.sleep(time_sleep)

                time_elapsed = time.time() - time_before_loop
                #print time_elapsed

            except Exception, err:
                #print str(err) + " on line " + str(sys.exc_traceback.tb_lineno)
                self._log_manager.save_exception(
                    "ERROR", "an exception has occurred: " + str(err) +
                    " on line " + str(sys.exc_traceback.tb_lineno))
                return None
Exemple #10
0
class BaseFinder(object):


    def __init__(self, name=None):
        """
        init the class

        :type name: string
        :param name: the object name
        """

        self._source_image_color = None
        self._source_image_gray = None
        self._objects_found = []
        self._log_manager = None

        #variables for the perfdata
        self._cacheManager = None
        self._min_different_contours = 15
        self._flag_thread_started = False
        self._flag_check_before_exit = False
        self._flag_checked_before_exit = False
        self._flag_thread_have_to_exit = False
        self._screen_capture = None
        #end perfdata section

        self._time_checked_before_exit_start = 0

        self._objects_finders_caller = []
        self._name_with_caller = None

        self._name = name
        self._log_manager = LogManager()
        self._log_manager.set_object_name(self._name)
        self._screen_capture = ScreenManager()
        self._cacheManager = CacheManager()
        self._configReader = ConfigReader()

    def set_name(self, name):
        """
        set the name of the object.

        :type name: string
        :param name: the name of the object
        """
        self._name = name
        self._log_manager.set_object_name(self._name)

    def get_name(self):
        """
        get the name of the object.

        :rtype: string
        :return: the name of the object
        """
        return self._name

    def set_name_with_caller(self):

        tmp_name = self._name

        for object_caller in self._objects_finders_caller:
            tmp_name = object_caller + os.sep + tmp_name

        self._name_with_caller = tmp_name
        self._log_manager.set_object_name(self._name_with_caller)

    def set_source_image_color(self, image_data):
        """
        set the color image on which the find method will search the object.

        :type image_data: numpy.ndarray
        :param image_data: the color image
        """
        self._source_image_color = image_data.copy()
        img_gray = cv2.cvtColor(image_data, cv2.COLOR_BGR2GRAY)
        self.set_source_image_gray(img_gray)
        #self._log_manager.set_image(self._source_image)

    def set_source_image_gray(self, image_data):
        """
        set the gray image on which the find method will search the object.

        :type image_data: numpy.ndarray
        :param image_data: the gray image
        """
        self._source_image_gray = image_data.copy()

    def find(self):
        raise NotImplementedError

    def wait(self, timeout=-1):
        """
        wait until the object appears on the screen.
        if timeout value is -1 (default value) then timeout value will be read from config file.
        if configuration file doesn't exist, then timeout value will be 15 sec.

        :param timeout: timeout in seconds
        :type timeout: int
        """

        timeout_value = 15

        if timeout == -1:
            timeout_value = self._configReader.get_finder_wait_timeout()
        else:
            timeout_value = timeout

        self._objects_found = []
        self._flag_thread_started = False
        self._flag_thread_have_to_exit = False

        time_elapsed = 0
        time_of_last_change = 0
        self._time_checked_before_exit_start = None

        #screenCapture = ScreenManager()
        thread_interval = self._configReader.get_finder_thread_interval()
        check_diff_interval = self._configReader.get_finder_diff_interval()

        img1 = self._cacheManager.GetLastObjFoundFullImg()

        if img1 is None:
            img1 = self._screen_capture.grab_desktop(self._screen_capture.get_gray_mat)

        thread_t0 = time.time()
        time_before_loop = time.time()
        while True:
            try:
                if time_elapsed > timeout_value:
                    return -1

                t0 = time.time()

                img2_color = self._screen_capture.grab_desktop(self._screen_capture.get_color_mat)
                img2_gray = cv2.cvtColor(img2_color, cv2.COLOR_BGR2GRAY)

                #cv2.imwrite('img2.png', img2)

                if time.time() - thread_t0 >= thread_interval:
                    thread_t0 = time.time()
                    if self._flag_thread_started is False:
                        self._flag_thread_started = True
                        self.set_source_image_color(img2_color)
                        self.set_source_image_gray(img2_gray)
                        if self._log_manager.is_log_enable() is True:
                            self._log_manager.delete_all_items(keep_items=20, exclude_item="difference")
                        worker = Thread(target=self.find)
                        worker.setDaemon(True)
                        worker.start()

                if len(self._objects_found) > 0:
                    if self._time_checked_before_exit_start is not None:
                        return self._time_checked_before_exit_start
                    else:
                        return time_of_last_change

                diff_mask = numpy.bitwise_xor(img1, img2_gray)
                #cv2.imwrite("bit.png", diff_mask)

                # find the contours
                contours, hierarchy = cv2.findContours(diff_mask, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

                for cnt in contours:
                    x,y,w,h = cv2.boundingRect(cnt)
                    #cv2.rectangle(img2,(x,y),(x+w,y+h),(0,0,255),3)

                #is_images_equal = not(diff_mask.any())
                if len(contours) < self._min_different_contours:
                #if True:
                    is_images_equal = True
                else:
                    is_images_equal = False

                if is_images_equal is False:
                    if self._log_manager.is_log_enable() is True:
                        self._log_manager.save_image("difference", "old.png", img1)
                    img1 = img2_gray.copy()
                    if self._log_manager.is_log_enable() is True:
                        self._log_manager.save_image("difference", "current.png", img2_gray)
                        self._log_manager.save_image("difference", "mask.png", diff_mask)
                        self._log_manager.delete_all_items(sub_dir="difference", keep_items=20)

                    if self._flag_check_before_exit is False:
                        self._flag_check_before_exit = True
                        self._time_checked_before_exit_start = time_of_last_change
                    elif self._flag_checked_before_exit is True and self._flag_check_before_exit is True:
                        self._flag_check_before_exit = False
                        self._flag_checked_before_exit = False
                        self._flag_thread_have_to_exit = True
                        self._time_checked_before_exit_start = None

                    time_of_last_change = time_elapsed

                #if len(self._objects_found) > 0:
                #    return time_of_last_change

                t1 = time.time() - t0
                time_sleep = check_diff_interval - t1
                if time_sleep < 0:
                    time_sleep = 0

                time.sleep(time_sleep)

                time_elapsed = time.time() - time_before_loop

            except Exception, err:
                #print str(err) + " on line " + str(sys.exc_traceback.tb_lineno)
                self._log_manager.save_exception("ERROR", "an exception has occurred: " + str(err) + " on line " + str(sys.exc_traceback.tb_lineno))
                return None
Exemple #11
0
    def __init__(self, name=None):
        """
        init the class

        :type name: string
        :param name: the object name
        """
        self._main_component = None
        self._sub_components = []

        self._timedout_main_components = []
        self._timedout_sub_components = []

        self._main_extra_img_log = None
        self._sub_extra_imgages_log = []

        self._rect_extra_timedout_image = None

        self._robot_manager = RobotManager()

        self._rf_is_set = self._robot_manager.context_is_set()


        self._source_image_color = None
        self._source_image_gray = None
        self._objects_found = []
        self._log_manager = None

        self._timed_out_images = []

        self._find_thread_images = []
        self._find_thread_images_disappear = []
        #self._find_thread_images_copy = []
        self._last_thread_image = None
        self._last_thread_image_copy = None
        self._heartbeat_images = []
        self._heartbeat_images_copy = []

        #variables for the perfdata
        self._cacheManager = None
        self._min_different_contours = 15
        self._flag_thread_started = False
        self._flag_check_before_exit = False
        self._flag_checked_before_exit = False
        self._flag_thread_have_to_exit = False
        self._screen_capture = None
        #end perfdata section

        self._info_manager = InfoManager()
        self._scaling_factor = self._info_manager.get_info("SCALING FACTOR INT")

        self._time_checked_before_exit_start = 0

        self._objects_finders_caller = []
        self._name_with_caller = None

        self._name = name
        self._log_manager = LogManager()
        self._log_manager.set_object_name(self._name)
        self._screen_capture = ScreenManager()
        self._cacheManager = CacheManager()
        self._configReader = ConfigReader()

        self.__enable_debug_calcperf = False

        #self._timer_for_disappear = 0

        self._object_is_found_flag = False

        self._is_object_finder = False
        self._objects_found_of_sub_object_finder = []
Exemple #12
0
    def find(self):

        for sub_object in self._sub_components:
            sub_object[0]._objects_found_of_sub_object_finder = []

        self._timedout_main_components = []
        self._timedout_sub_components = []

        self._objects_found = []

        main_object = self._main_component[0]

        if not isinstance(main_object, ObjectFinder):
            #main_object[1] = [1,1]
            if self._main_component[1] is not None:
                roi = Roi(self._main_component[1])
            else:
                roi = None

            main_component = main_object._main_component[0]
            main_object._main_component = (main_component, roi)
            """
            if isinstance(main_object, RectFinder):
                main_rect = main_object._main_rect[0]
                main_object._main_rect = (main_rect, roi)

            elif isinstance(main_object, ImageFinder):
                main_template = main_object._main_template[0]
                main_object._main_template = (main_template, roi)

            elif isinstance(main_object, TextFinder):
                main_text = main_object._main_text[0]
                main_object._main_text = (main_text, roi)
            """

        source_img_auto_set = False

        if self._source_image_color is None or self._source_image_gray is None:

            screen_capture = ScreenManager()
            src_img_color = screen_capture.grab_desktop(
                screen_capture.get_color_mat)
            self.set_source_image_color(src_img_color)
            src_img_gray = cv2.cvtColor(src_img_color, cv2.COLOR_BGR2GRAY)
            self.set_source_image_gray(src_img_gray)
            #main_object.set_source_image_color(self._source_image_color)
            #main_object.set_source_image_gray(self._source_image_gray)
            source_img_auto_set = True

        main_object.set_source_image_color(self._source_image_color)
        main_object.set_source_image_gray(self._source_image_gray)

        objects_found = []

        main_object.find()

        cnt = 0
        for object in main_object._objects_found:
            """
            if self._flag_thread_have_to_exit is True:
                main_object._flag_thread_have_to_exit = False
                main_object._flag_thread_started = False
                self._flag_thread_have_to_exit = False
                self._flag_thread_started = False
                return []
            """

            object_found = []
            object_found.append([])
            object_found.append([])

            x = object[0].x
            y = object[0].y
            w = object[0].width
            h = object[0].height

            sub_objects_len = len(self._sub_components)

            self._timedout_main_components.append(MatchResult((x, y, w, h)))

            if sub_objects_len == 0:
                #good_points.append((x, y, w, h))

                main_object_result = MatchResult((x, y, w, h))
                object_found[0] = main_object_result

                object_found[1] = None
                objects_found.append(object_found)
                self._main_indexes_to_keep.append(cnt)
            else:

                total_sub_object_found = 0

                sub_objects_found = []
                timed_out_objects = []
                for sub_object in self._sub_components:
                    """
                    if self._flag_thread_have_to_exit is True:
                        main_object._flag_thread_have_to_exit = False
                        main_object._flag_thread_started = False
                        self._flag_thread_have_to_exit = False
                        self._flag_thread_started = False
                        return []
                    """

                    #sub_object._objects_found = []

                    sub_template_coordinates = copy.deepcopy(
                        self.find_sub_object((x, y), sub_object))

                    if sub_template_coordinates is not None:
                        sub_objects_found.append(sub_template_coordinates)
                        timed_out_objects.append(
                            (sub_template_coordinates, sub_object[1]))
                        total_sub_object_found = total_sub_object_found + 1
                    else:
                        timed_out_objects.append((None, sub_object[1]))

                    if total_sub_object_found == sub_objects_len:
                        #good_points.append((x, y, w, h))

                        main_object_result = MatchResult((x, y, w, h))
                        object_found[0] = main_object_result

                        object_found[1] = sub_objects_found

                        objects_found.append(object_found)
                        self._main_indexes_to_keep.append(cnt)

                self._timedout_sub_components.append(timed_out_objects)
            #self._log_manager.save_object_image("img_" + str(cnt) + ".png")
            cnt = cnt + 1

        if len(objects_found) > 0:
            self._objects_found = copy.deepcopy(objects_found)
            main_object.rebuild_result(self._main_indexes_to_keep)
            self.rebuild_result_for_sub_component(self._main_indexes_to_keep)
            self._cacheManager.SetLastObjFoundFullImg(self._source_image_gray)

        #self._source_image_color = None
        #self._source_image_gray = None

        if source_img_auto_set is True:
            self._source_image_gray = None
            self._source_image_color = None
            source_img_auto_set = False
        """
        if self._flag_check_before_exit is True:
            self._flag_checked_before_exit = True
            #print "self._flag_checked_before_exit = True"
            #print self._time_checked_before_exit_start
        """

        self._flag_thread_started = False
        main_object._flag_thread_started = False

        return self._objects_found
Exemple #13
0
    def find(self):
        """
        find the main template and sub templates into the source image.

        :rtype: list[(int, int, int, int)]
        :return: x, y, height, width of main template(s)
        """
        #tzero = time.time()
        try:

            #x = 1 / 0

            #print "main comp:",self._main_component

            self._timedout_main_components = []
            self._timedout_sub_components = []

            self._main_extra_img_log = None
            self._sub_extra_imgages_log = []

            source_img_auto_set = False

            self._objects_found = []

            if self._source_image_gray is None or self._source_image_color is None:
                screen_capture = ScreenManager()
                src_img_color = screen_capture.grab_desktop(screen_capture.get_color_mat)
                self.set_source_image_color(src_img_color)
                src_img_gray = cv2.cvtColor(src_img_color, cv2.COLOR_BGR2GRAY)
                self.set_source_image_gray(src_img_gray)
                source_img_auto_set = True

            self.__find_log_folder = datetime.datetime.now().strftime("%H_%M_%S") + "_" + "searching"

            offset_x = 0
            offset_y = 0


            main_template = self._main_component[0]
            #print "main templ:", main_template
            roi = self._main_component[1]

            if roi is not None:

                y1 = roi.y
                y2 = y1 + roi.height
                x1 = roi.x
                x2 = x1 + roi.width

                offset_x = x1
                offset_y = y1

                source_img_height, source_img_width = self._source_image_gray.shape

                if y1 < 0:
                    y1 = 0
                elif y1 > source_img_height:
                    y1 = source_img_height

                if y2 < 0:
                    y2 = 0
                elif y2 > source_img_height:
                    y2 = source_img_height

                if x1 < 0:
                    x1 = 0
                elif x1 > source_img_width:
                    x1 = source_img_width

                if x2 < 0:
                    x2 = 0
                elif x2 > source_img_width:
                    x2 = source_img_width

                #print x1,x2,y1,y2
                source_image = self._source_image_gray[y1:y2, x1:x2]
            else:
                source_image = self._source_image_gray


            if self._log_manager.is_log_enable() is True:
                self._log_manager.save_image(self.__find_log_folder, "source_img.png", source_image)
                self._log_manager.save_image(self.__find_log_folder, "main_template.png", main_template.image_data)

            #self._timed_out_images.append(source_image.copy())

            objects_found = []
            analyzed_points = []
            self._objects_found = []

            w, h = main_template.image_data.shape[::-1]
            src_w, src_h = source_image.shape[::-1]
            tpl_w = w
            tpl_h = h

            if src_h < tpl_h or src_w < tpl_w:
                self._flag_thread_have_to_exit = False
                """
                self._flag_thread_started = False
                self._source_image_gray = None
                self._source_image_color = None
                """
                return []

            result = None

            res = cv2.matchTemplate(source_image, main_template.image_data, cv2.TM_CCOEFF_NORMED)
            #min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(resacascascsacas)

            res_norm = (res *255).round().clip(min=0).astype(numpy.uint8) #numpy.array(res * 255, dtype = numpy.float32) #(res * 255) #.round().astype(numpy.int8)
            res_norm = cv2.resize(res_norm,(source_image.shape[1], source_image.shape[0]), interpolation = cv2.INTER_CUBIC)

            #cv2.imwrite("c:\\log\\res_norm.png",res_norm)
            #cv2.imwrite("c:\\log\\res.png",res)

            #loadddd = cv2.imread("c:\\log\\aaaaaaaaaaaaaaaa.png")

            #res_norm.resize(res_norm.shape[0], res_norm.shape[0], 3L, refcheck=False)

            if roi is not None:
                self._main_extra_img_log = (res_norm, (x1, y1, x2, y2))
            else:
                self._main_extra_img_log = (res_norm, None)

            loc = numpy.where(res >= main_template.threshold)

            cnt = 0
            for point in zip(*loc[::-1]):

                object_found = []
                object_found.append([])
                object_found.append([])
                self.__timed_out_sub_extra_images = []

                """
                if self._flag_thread_have_to_exit is True:
                    self._flag_thread_have_to_exit = False
                    self._flag_thread_started = False
                    self._source_image_gray = None
                    self._source_image_color = None
                    return []
                """

                x = offset_x + point[0]
                y = offset_y + point[1]

                is_already_found = False

                for point_already_analyzed in analyzed_points:

                    #tolerance_region_w = (tpl_w/2)  + (20 * self._scaling_factor)
                    #tolerance_region_h = (tpl_h/2) + (20 * self._scaling_factor)

                    tolerance_region_w = (tpl_w/2)  + (self._overlapping_factor * self._scaling_factor)
                    tolerance_region_h = (tpl_h/2) + (self._overlapping_factor * self._scaling_factor)

                    if (x >= point_already_analyzed[0] - tolerance_region_w and
                                x <= point_already_analyzed[0] + tolerance_region_w) and\
                            (y >= point_already_analyzed[1] - tolerance_region_h and
                                     y <= point_already_analyzed[1] + tolerance_region_h):

                        is_already_found = True
                        #print point[0],point_already_analyzed[0],point[1],point_already_analyzed[1]

                if is_already_found == False:

                    analyzed_points.append((x, y, w, h))

                    self._timedout_main_components.append(MatchResult((x, y, w, h)))

                    #self._log_manager.set_main_object_points((x, y, w, h))
                    if self._log_manager.is_log_enable() is True:
                        img_copy = source_image.copy()
                        cv2.rectangle(img_copy, ((x-offset_x), (y-offset_y)), ((x-offset_x)+w, (y-offset_y)+h), (0, 0, 255), 2)
                        self._log_manager.save_image(self.__find_log_folder, "object_found.png", img_copy)

                    sub_templates_len = len(self._sub_components)

                    if sub_templates_len == 0:
                        main_object_result = MatchResult((x, y, w, h))
                        object_found[0] = main_object_result
                        object_found[1] = None
                        objects_found.append(object_found)
                    else:
                        #print sub_templates_len
                        total_sub_template_found = 0
                        sub_objects_found = []
                        timed_out_objects = []
                        timed_out_sub_extra_images = []
                        for sub_template in self._sub_components:

                            """
                            if self._flag_thread_have_to_exit is True:
                                self._flag_thread_have_to_exit = False
                                self._flag_thread_started = False
                                self._source_image_gray = None
                                self._source_image_color = None
                                return []
                            """

                            sub_template_coordinates = self._find_sub_template((x, y), sub_template)

                            if sub_template_coordinates is not None:
                                sub_objects_found.append(sub_template_coordinates)
                                total_sub_template_found = total_sub_template_found + 1
                                timed_out_objects.append((sub_template_coordinates, sub_template[1]))
                            else:
                                timed_out_objects.append((None, sub_template[1]))

                            #timed_out_sub_extra_images.append()

                            if total_sub_template_found == sub_templates_len:
                                #good_points.append((x, y, w, h))

                                main_object_result = MatchResult((x, y, w, h))
                                object_found[0] = main_object_result

                                object_found[1] = sub_objects_found

                                objects_found.append(object_found)
                        self._timedout_sub_components.append(timed_out_objects)
                        self._sub_extra_imgages_log.append(self.__timed_out_sub_extra_images)
                    #self._log_manager.save_object_image("img_" + str(cnt) + ".png")
                cnt = cnt + 1

            if len(objects_found) > 0:
                self._objects_found = copy.deepcopy(objects_found)
                if self._is_object_finder is True:
                    self._objects_found_of_sub_object_finder.extend(copy.deepcopy(objects_found))
                self._cacheManager.SetLastObjFoundFullImg(self._source_image_gray)

            if source_img_auto_set is True:
                self._source_image_gray = None
                self._source_image_color = None
                source_img_auto_set = False

            """
            if self._flag_check_before_exit is True:
                self._flag_checked_before_exit = True
            """

            #time.sleep(40)

            self._flag_thread_started = False

            #print time.time() - tzero

            return self._objects_found

        except Exception, err:
            self._log_manager.save_exception("ERROR", "an exception has occurred: " + str(err) + " on line " + str(sys.exc_traceback.tb_lineno))
            self._flag_thread_started = False
            return []
Exemple #14
0
    def find(self):
        """
        find the main template and sub templates into the source image.

        :rtype: list[(int, int, int, int)]
        :return: x, y, height, width of main template(s)
        """
        #tzero = time.time()
        time_before_find = time.time()
        try:

            #x = 1 / 0

            #print "main comp:",self._main_component

            self._timedout_main_components = []
            self._timedout_sub_components = []

            self._main_extra_img_log = None
            self._sub_extra_imgages_log = []

            source_img_auto_set = False

            self._objects_found = []

            if self._source_image_gray is None or self._source_image_color is None:
                screen_capture = ScreenManager()
                src_img_color = screen_capture.grab_desktop(
                    screen_capture.get_color_mat)
                self.set_source_image_color(src_img_color)
                src_img_gray = cv2.cvtColor(src_img_color, cv2.COLOR_BGR2GRAY)
                self.set_source_image_gray(src_img_gray)
                source_img_auto_set = True

            if str(self._info_manager.get_info('channel')).lower() != 'all':
                img_b, img_g, img_r = cv2.split(self._source_image_color)

                if str(self._info_manager.get_info('channel')).lower() == 'b':
                    self._source_image_color = cv2.cvtColor(
                        img_b, cv2.COLOR_GRAY2BGR)
                elif str(
                        self._info_manager.get_info('channel')).lower() == 'g':
                    self._source_image_color = cv2.cvtColor(
                        img_g, cv2.COLOR_GRAY2BGR)
                elif str(
                        self._info_manager.get_info('channel')).lower() == 'r':
                    self._source_image_color = cv2.cvtColor(
                        img_r, cv2.COLOR_GRAY2BGR)

                self._source_image_gray = cv2.cvtColor(
                    self._source_image_color, cv2.COLOR_BGR2GRAY)

            self.__find_log_folder = datetime.datetime.now().strftime(
                "%H_%M_%S") + "_" + "searching"

            offset_x = 0
            offset_y = 0

            main_template = self._main_component[0]
            #print "main templ:", main_template
            roi = self._main_component[1]

            if roi is not None:

                y1 = roi.y
                y2 = y1 + roi.height
                x1 = roi.x
                x2 = x1 + roi.width

                res = self._info_manager.get_info("RESOLUTION")

                y1 = roi.y
                y2 = y1 + roi.height

                x1 = roi.x
                x2 = x1 + roi.width

                if roi.unlimited_up is True:
                    y1 = 0
                    y2 = roi.y + roi.height

                if roi.unlimited_down is True:
                    y2 = res[1]

                if roi.unlimited_left is True:
                    x1 = 0
                    x2 = roi.x + roi.width

                if roi.unlimited_right is True:
                    x2 = res[0]

                offset_x = x1
                offset_y = y1

                source_img_height, source_img_width = self._source_image_gray.shape

                if y1 < 0:
                    y1 = 0
                elif y1 > source_img_height:
                    y1 = source_img_height

                if y2 < 0:
                    y2 = 0
                elif y2 > source_img_height:
                    y2 = source_img_height

                if x1 < 0:
                    x1 = 0
                elif x1 > source_img_width:
                    x1 = source_img_width

                if x2 < 0:
                    x2 = 0
                elif x2 > source_img_width:
                    x2 = source_img_width

                #print x1,x2,y1,y2
                source_image = self._source_image_gray[y1:y2, x1:x2]
            else:
                source_image = self._source_image_gray

            if self._log_manager.is_log_enable() is True:
                self._log_manager.save_image(self.__find_log_folder,
                                             "source_img.png", source_image)
                self._log_manager.save_image(self.__find_log_folder,
                                             "main_template.png",
                                             main_template.image_data)

            #self._timed_out_images.append(source_image.copy())

            objects_found = []
            analyzed_points = []
            self._objects_found = []

            w, h = main_template.image_data.shape[::-1]
            src_w, src_h = source_image.shape[::-1]
            tpl_w = w
            tpl_h = h

            if src_h < tpl_h or src_w < tpl_w:
                self._flag_thread_have_to_exit = False
                """
                self._flag_thread_started = False
                self._source_image_gray = None
                self._source_image_color = None
                """
                return []

            result = None

            res = cv2.matchTemplate(source_image, main_template.image_data,
                                    cv2.TM_CCOEFF_NORMED)
            #min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(resacascascsacas)

            res_norm = (res * 255).round().clip(min=0).astype(
                numpy.uint8
            )  #numpy.array(res * 255, dtype = numpy.float32) #(res * 255) #.round().astype(numpy.int8)
            res_norm = cv2.resize(
                res_norm, (source_image.shape[1], source_image.shape[0]),
                interpolation=cv2.INTER_CUBIC)

            #cv2.imwrite("c:\\log\\res_norm.png",res_norm)
            #cv2.imwrite("c:\\log\\res.png",res)

            #res_norm.resize(res_norm.shape[0], res_norm.shape[0], 3L, refcheck=False)

            if roi is not None:
                self._main_extra_img_log = (res_norm, (x1, y1, x2, y2))
            else:
                self._main_extra_img_log = (res_norm, None)

            loc = numpy.where(res >= main_template.threshold)

            cnt = 0
            for point in zip(*loc[::-1]):

                object_found = []
                object_found.append([])
                object_found.append([])
                self.__timed_out_sub_extra_images = []
                """
                if self._flag_thread_have_to_exit is True:
                    self._flag_thread_have_to_exit = False
                    self._flag_thread_started = False
                    self._source_image_gray = None
                    self._source_image_color = None
                    return []
                """

                x = offset_x + point[0]
                y = offset_y + point[1]

                is_already_found = False

                for point_already_analyzed in analyzed_points:

                    #tolerance_region_w = (tpl_w/2)  + (20 * self._scaling_factor)
                    #tolerance_region_h = (tpl_h/2) + (20 * self._scaling_factor)

                    tolerance_region_w = (tpl_w / 2) + (
                        self._overlapping_factor * self._scaling_factor)
                    tolerance_region_h = (tpl_h / 2) + (
                        self._overlapping_factor * self._scaling_factor)

                    if (x >= point_already_analyzed[0] - tolerance_region_w and
                                x <= point_already_analyzed[0] + tolerance_region_w) and\
                            (y >= point_already_analyzed[1] - tolerance_region_h and
                                     y <= point_already_analyzed[1] + tolerance_region_h):

                        is_already_found = True
                        #print point[0],point_already_analyzed[0],point[1],point_already_analyzed[1]

                if is_already_found == False:

                    #hist_blue = cv2.calcHist([self._source_image_color[y:y+h, x:x+w]], [0], None, [256], [0, 256])
                    #hist_green = cv2.calcHist([self._source_image_color[y:y+h, x:x+w]], [1], None, [256], [0, 256])
                    #hist_red = cv2.calcHist([self._source_image_color[y:y+h, x:x+w]], [2], None, [256], [0, 256])

                    hist_rgb = cv2.calcHist(
                        [self._source_image_color[y:y + h, x:x + w]],
                        [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
                    hist_rgb = cv2.normalize(hist_rgb).flatten()

                    #comp_blue = cv2.compareHist(hist_blue,main_template.hist_blue,cv2.cv.CV_COMP_BHATTACHARYYA)
                    #comp_green = cv2.compareHist(hist_green, main_template.hist_green, cv2.cv.CV_COMP_BHATTACHARYYA)
                    #comp_red = cv2.compareHist(hist_red, main_template.hist_red, cv2.cv.CV_COMP_BHATTACHARYYA)
                    comp_rgb = cv2.compareHist(hist_rgb,
                                               main_template.hist_rgb,
                                               cv2.cv.CV_COMP_BHATTACHARYYA)

                    analyzed_points.append((x, y, w, h))

                    #if (comp_blue > 0.3 or comp_green > 0.3 or comp_red > 0.3) and main_template.match_colors is True:
                    if comp_rgb > 0.2 and main_template.match_colors is True:
                        continue

                    self._timedout_main_components.append(
                        MatchResult((x, y, w, h)))

                    #self._log_manager.set_main_object_points((x, y, w, h))
                    if self._log_manager.is_log_enable() is True:
                        img_copy = source_image.copy()
                        cv2.rectangle(img_copy,
                                      ((x - offset_x), (y - offset_y)),
                                      ((x - offset_x) + w, (y - offset_y) + h),
                                      (0, 0, 255), 2)
                        self._log_manager.save_image(self.__find_log_folder,
                                                     "object_found.png",
                                                     img_copy)

                    sub_templates_len = len(self._sub_components)

                    if sub_templates_len == 0:
                        main_object_result = MatchResult((x, y, w, h))
                        object_found[0] = main_object_result
                        object_found[1] = None
                        objects_found.append(object_found)
                    else:
                        #print sub_templates_len
                        total_sub_template_found = 0
                        sub_objects_found = []
                        timed_out_objects = []
                        timed_out_sub_extra_images = []
                        for sub_template in self._sub_components:
                            """
                            if self._flag_thread_have_to_exit is True:
                                self._flag_thread_have_to_exit = False
                                self._flag_thread_started = False
                                self._source_image_gray = None
                                self._source_image_color = None
                                return []
                            """

                            sub_template_coordinates = self._find_sub_template(
                                (x, y), sub_template)

                            if sub_template_coordinates is not None:
                                sub_objects_found.append(
                                    sub_template_coordinates)
                                total_sub_template_found = total_sub_template_found + 1
                                timed_out_objects.append(
                                    (sub_template_coordinates,
                                     sub_template[1]))
                            else:
                                timed_out_objects.append(
                                    (None, sub_template[1]))

                            #timed_out_sub_extra_images.append()

                            if total_sub_template_found == sub_templates_len:
                                #good_points.append((x, y, w, h))

                                main_object_result = MatchResult((x, y, w, h))
                                object_found[0] = main_object_result

                                object_found[1] = sub_objects_found

                                objects_found.append(object_found)
                        self._timedout_sub_components.append(timed_out_objects)
                        self._sub_extra_imgages_log.append(
                            self.__timed_out_sub_extra_images)
                    #self._log_manager.save_object_image("img_" + str(cnt) + ".png")
                cnt = cnt + 1

            if len(objects_found) > 0:
                self._objects_found = copy.deepcopy(objects_found)
                if self._is_object_finder is True:
                    self._objects_found_of_sub_object_finder.extend(
                        copy.deepcopy(objects_found))
                    #if wait_disappear is False:

                    #if self._info_manager.get_info('LOG OBJ IS FOUND') is False:
                    if self._info_manager.get_info(
                            'LOG OBJ FINDER TYPE') is None:
                        self._info_manager.set_info('LOG OBJ FINDER TYPE', 0)

                    self._log_manager.save_objects_found(
                        self._name,
                        self.get_source_image_gray(),
                        self._objects_found,
                        [x[1] for x in self._sub_components],
                        self.main_xy_coordinates,
                        self.sub_xy_coordinates,
                        finder_type=0)

                self._cacheManager.SetLastObjFoundFullImg(
                    self._source_image_gray)

            if source_img_auto_set is True:
                self._source_image_gray = None
                self._source_image_color = None
                source_img_auto_set = False
            """
            if self._flag_check_before_exit is True:
                self._flag_checked_before_exit = True
            """

            #time.sleep(40)

            self._flag_thread_started = False

            if self._calc_last_finder_time is True:
                self._last_finder_time = time.time() - time_before_find
                self._calc_last_finder_time = False

            #print time.time() - tzero

            return self._objects_found

        except Exception, err:
            self._log_manager.save_exception(
                "ERROR", "an exception has occurred: " + str(err) +
                " on line " + str(sys.exc_traceback.tb_lineno))
            self._flag_thread_started = False
            return []
Exemple #15
0
    def find(self):

        self._objects_found = []

        main_object = self._main_object[0]

        if not isinstance(main_object, ObjectFinder):
            #main_object[1] = [1,1]
            if self._main_object[1] is not None:
                roi = Roi(self._main_object[1])
            else:
                roi = None

            main_component = main_object._main_component[0]
            main_object._main_component = (main_component, roi)

            """
            if isinstance(main_object, RectFinder):
                main_rect = main_object._main_rect[0]
                main_object._main_rect = (main_rect, roi)

            elif isinstance(main_object, ImageFinder):
                main_template = main_object._main_template[0]
                main_object._main_template = (main_template, roi)

            elif isinstance(main_object, TextFinder):
                main_text = main_object._main_text[0]
                main_object._main_text = (main_text, roi)
            """

        if self._source_image_color is None or self._source_image_gray is None:

            screen_capture = ScreenManager()
            src_img_color = screen_capture.grab_desktop(screen_capture.get_color_mat)
            self.set_source_image_color(src_img_color)
            src_img_gray = cv2.cvtColor(src_img_color, cv2.COLOR_BGR2GRAY)
            self.set_source_image_gray(src_img_gray)
            main_object.set_source_image_color(self._source_image_color)
            main_object.set_source_image_gray(self._source_image_gray)

        objects_found = []

        main_object.find()

        cnt = 0
        for object in main_object._objects_found:

            if self._flag_thread_have_to_exit is True:
                main_object._flag_thread_have_to_exit = False
                main_object._flag_thread_started = False
                self._flag_thread_have_to_exit = False
                self._flag_thread_started = False
                return []

            object_found = []
            object_found.append([])
            object_found.append([])

            x = object[0].x
            y = object[0].y
            w = object[0].width
            h = object[0].height

            sub_objects_len = len(self._sub_objects)

            if sub_objects_len == 0:
                #good_points.append((x, y, w, h))

                main_object_result = MatchResult((x, y, w, h))
                object_found[0] = main_object_result

                object_found[1] = None
                objects_found.append(object_found)
            else:

                total_sub_object_found = 0

                sub_objects_found = []
                for sub_object in self._sub_objects:

                    if self._flag_thread_have_to_exit is True:
                        main_object._flag_thread_have_to_exit = False
                        main_object._flag_thread_started = False
                        self._flag_thread_have_to_exit = False
                        self._flag_thread_started = False
                        return []

                    sub_template_coordinates = copy.deepcopy(self.find_sub_object((x, y), sub_object))

                    if sub_template_coordinates is not None:
                        sub_objects_found.append(sub_template_coordinates)
                        total_sub_object_found = total_sub_object_found + 1

                    if total_sub_object_found == sub_objects_len:
                        #good_points.append((x, y, w, h))

                        main_object_result = MatchResult((x, y, w, h))
                        object_found[0] = main_object_result

                        object_found[1] = sub_objects_found

                        objects_found.append(object_found)
                        self._main_indexes_to_keep.append(cnt)

            #self._log_manager.save_object_image("img_" + str(cnt) + ".png")
            cnt = cnt + 1

        if len(objects_found) > 0:
            self._objects_found = copy.deepcopy(objects_found)
            main_object.rebuild_result(self._main_indexes_to_keep)
            self._cacheManager.SetLastObjFoundFullImg(self._source_image_gray)

        self._source_image_color = None
        self._source_image_gray = None

        if self._flag_check_before_exit is True:
            self._flag_checked_before_exit = True
            #print "self._flag_checked_before_exit = True"
            #print self._time_checked_before_exit_start

        self._flag_thread_started = False
        main_object._flag_thread_started = False

        return self._objects_found
Exemple #16
0
    def find(self):
        """
        find the main text and sub texts into the source image.

        :rtype: list[[MatchResult, list[MatchResult]]]
        :return: a list that contains x, y, height, width of rectangle(s) found
        """
        try:
            #print "into find"

            self._objects_found = []

            source_img_auto_set = False

            if self._source_image_color is None:
                screen_capture = ScreenManager()
                src_img_color = screen_capture.grab_desktop(screen_capture.get_color_mat)
                self.set_source_image_color(src_img_color)
                src_img_gray = cv2.cvtColor(src_img_color, cv2.COLOR_BGR2GRAY)
                self.set_source_image_gray(src_img_gray)
                source_img_auto_set = True

            self.__find_log_folder = datetime.datetime.now().strftime("%H_%M_%S") + "_" + "searching"

            offset_x = 0
            offset_y = 0

            main_text = self._main_component[0]
            roi = self._main_component[1]

            if roi is not None:

                y1 = roi.y
                y2 = y1 + roi.height
                x1 = roi.x
                x2 = x1 + roi.width

                offset_x = x1
                offset_y = y1

                source_img_height, source_img_width, channels = self._source_image_color.shape

                if y1 < 0:
                    y1 = 0
                elif y1 > source_img_height:
                    y1 = source_img_height

                if y2 < 0:
                    y2 = 0
                elif y2 > source_img_height:
                    y2 = source_img_height

                if x1 < 0:
                    x1 = 0
                elif x1 > source_img_width:
                    x1 = source_img_width

                if x2 < 0:
                    x2 = 0
                elif x2 > source_img_width:
                    x2 = source_img_width

                #print x1,x2,y1,y2
                source_image = self._source_image_color[y1:y2, x1:x2]
            else:
                source_image = self._source_image_color


            if self._log_manager.is_log_enable() is True:
                self._log_manager.save_image(self.__find_log_folder, "source_img.png", source_image)

            source_image = cv2.cvtColor(source_image, cv2.COLOR_BGR2RGB)
            source_image_pil = Image.fromarray(source_image)
            width = source_image_pil.size[0]
            height = source_image_pil.size[1]
            source_image = source_image_pil.resize((width * 3, height * 3), Image.BICUBIC)

            """
            if self._log_manager.is_log_enable() is True:
                self._log_manager.save_image(self.__find_log_folder, "resized.png", source_image)
            """

            objects_found = []
            analyzed_points = []
            self._objects_found = []

            cnt = 0

            self.api = tesseract.TessBaseAPI()
            self.api.Init(get_python_lib() + os.sep + "alyvix" + os.sep + "extra" + os.sep +
                          "Tesseract-OCR" + os.sep, main_text.lang, tesseract.OEM_DEFAULT)
            self.api.SetPageSegMode(tesseract.PSM_AUTO)
            self.api.SetVariable("tessedit_char_whitelist", main_text.whitelist)

            phrase = ""
            concatWord = False
            wordLine = None

            timex = time.time()

            color_img = cv.CreateImageHeader(source_image.size, cv.IPL_DEPTH_8U, 3)

            cv.SetData(color_img, source_image.tostring())

            grey_img = cv.CreateImage(cv.GetSize(color_img), 8, 1)

            cv.CvtColor(color_img, grey_img, cv.CV_RGB2GRAY)

            if self._log_manager.is_log_enable() is True:
                self._log_manager.save_image(self.__find_log_folder, "resized.png", grey_img)

            #cv.SaveImage('c:\\alan\\image2.jpg', grey_img)
            tesseract.SetCvImage(grey_img, self.api)

            #write debug image
            #LogManager.WriteCvImage(datetime.now().strftime("%H_%M_%S_%f") + '_Grey.png', grey_img)

            #text=Ocr.Api.GetUTF8Text()
            
            if self._log_manager.is_log_enable() is True:
                textfile_log_name = datetime.datetime.now().strftime("%H_%M_%S.%f")[:-3] + "_resized.txt"

            text = self.api.GetHOCRText(0)

            #print text
            root = ET.fromstring(text)

            line_sep = ""
            break_loop = False
            self.__phrase_backup = ""
            for span in root.iter('span'):
                object_found = []
                object_found.append([])
                object_found.append([])

                if self._flag_thread_have_to_exit is True:
                    self._flag_thread_have_to_exit = False
                    self._flag_thread_started = False
                    self._source_image_color = None
                    self._source_image_gray = None
                    return []

                try:
                    #print span.attrib,span.text
                    #print span.get('title')

                    title = span.get('title')
                    title = title.replace(';', '')
                    coordinates = title.split(' ')

                    if span.get('class') == 'ocr_line':
                        line = span.get('id')
                        line = line.replace('line_','')
                        lineNr = line
                        line_sep = "\n" #"RRRRRRR" #os.linesep

                    if not span.find('strong') ==  None:
                        span.text = span.find('strong').text

                    if not span.find('em') ==  None:
                        span.text = span.find('em').text

                    if not span.find('strong/em') ==  None:
                        span.text = span.find('strong/em').text

                    if span.text == None:
                        continue

                    phrase = phrase + " " + span.text #+ line_sep
                    self.__phrase_backup = self.__phrase_backup + " " + line_sep + span.text

                    if line_sep != "":
                        line_sep = ""

                    #print phrase
                    #print main_text.text

                    result = re.match(".*" + unicode(main_text.text, "UTF-8") + ".*", phrase, re.DOTALL | re.IGNORECASE)

                    #print span.text," >> line:",lineNr,"coordinates:",int(coordinates[1])/3,int(coordinates[2])/3,int(coordinates[3])/3,int(coordinates[4])/3
                    #print "text found:",phrase
                    

                    #print "tempo ocr", time.time() - timex
                    if result != None:

                        x = offset_x + (int(coordinates[1])/3)
                        y = offset_y + (int(coordinates[2])/3)
                        #print "offset x, x", offset_x, (int(coordinates[1])/3)
                        #print "offset y, y", offset_y, (int(coordinates[2])/3)


                        w = (int(coordinates[3])/3) - (int(coordinates[1])/3)
                        h = (int(coordinates[4])/3) - (int(coordinates[2])/3)

                        try:
                            #print "text from Ocr engine:",phrase
                            #print "ocr time:",time.time() - timex,"sec."
                            #phrase = phrase.replace(main_text.text,"")
                            insensitive_phrase = re.compile(re.escape(main_text.text), re.IGNORECASE)
                            phrase = insensitive_phrase.sub('', phrase)
                            #print phrase

                        except Exception, err:
                            pass #print err

                        sub_texts_len = len(self.__sub_components)

                        if sub_texts_len == 0:
                            #good_points.append((x, y, w, h))

                            main_object_result = MatchResult((x, y, w, h))
                            object_found[0] = main_object_result

                            object_found[1] = None
                            objects_found.append(object_found)
                        else:

                            total_sub_template_found = 0

                            sub_objects_found = []
                            for sub_text in self.__sub_components:
                                #print "entering in sub text"

                                if self._flag_thread_have_to_exit is True:
                                    self._flag_thread_have_to_exit = False
                                    self._flag_thread_started = False
                                    self._source_image_color = None
                                    self._source_image_gray = None
                                    return []

                                sub_template_coordinates = self._find_sub_text((x, y), sub_text)

                                if sub_template_coordinates is not None:
                                    sub_objects_found.append(sub_template_coordinates)
                                    total_sub_template_found = total_sub_template_found + 1

                                if total_sub_template_found == sub_texts_len:
                                    #good_points.append((x, y, w, h))

                                    main_object_result = MatchResult((x, y, w, h))
                                    object_found[0] = main_object_result

                                    object_found[1] = sub_objects_found

                                    objects_found.append(object_found)
                                    #print "len obj found:", len(objects_found)
                                    #print "appended"
                                    break_loop = True

                            #if break_loop is True:
                            #    break

                        # write debug message
                        #LogManager.WriteMessage("debug", "text from Ocr engine: " + phrase)
                        #return int(coordinates[1]),int(coordinates[2]),int(coordinates[3]),int(coordinates[4])

                except Exception, err:
                    pass #print err
                    #LogManager.IsInError = True
                    #LogManager.WriteMessage("ERROR", "an exception has occurred: " + str(err) + " on line " + str(sys.exc_traceback.tb_lineno))


            if self._log_manager.is_log_enable() is True:
                self._log_manager.save_info_file(self.__find_log_folder, textfile_log_name, self.__phrase_backup)

            #print len(objects_found)
            if len(objects_found) > 0:
                self._objects_found = copy.deepcopy(objects_found)
                #gray_source_img = cv2.cvtColor(self._source_image, cv2.COLOR_BGR2GRAY)
                self._cacheManager.SetLastObjFoundFullImg(self._source_image_gray)
            self._flag_thread_started = False

            if source_img_auto_set is True:
                self._source_image_color = None
                self._source_image_gray = None
                source_img_auto_set = False

            if self._flag_check_before_exit is True:
                self._flag_checked_before_exit = True

            return self._objects_found
Exemple #17
0
class BaseFinder(object):


    def __init__(self, name=None):
        """
        init the class

        :type name: string
        :param name: the object name
        """
        self._main_component = None
        self._sub_components = []

        self._timedout_main_components = []
        self._timedout_sub_components = []

        self._main_extra_img_log = None
        self._sub_extra_imgages_log = []

        self._rect_extra_timedout_image = None

        self._robot_manager = RobotManager()

        self._rf_is_set = self._robot_manager.context_is_set()


        self._source_image_color = None
        self._source_image_gray = None
        self._objects_found = []
        self._log_manager = None

        self._timed_out_images = []

        self._find_thread_images = []
        self._find_thread_images_disappear = []
        #self._find_thread_images_copy = []
        self._last_thread_image = None
        self._last_thread_image_copy = None
        self._heartbeat_images = []
        self._heartbeat_images_copy = []

        #variables for the perfdata
        self._cacheManager = None
        self._min_different_contours = 15
        self._flag_thread_started = False
        self._flag_check_before_exit = False
        self._flag_checked_before_exit = False
        self._flag_thread_have_to_exit = False
        self._screen_capture = None
        #end perfdata section

        self._info_manager = InfoManager()
        self._scaling_factor = self._info_manager.get_info("SCALING FACTOR INT")

        self._time_checked_before_exit_start = 0

        self._objects_finders_caller = []
        self._name_with_caller = None

        self._name = name
        self._log_manager = LogManager()
        self._log_manager.set_object_name(self._name)
        self._screen_capture = ScreenManager()
        self._cacheManager = CacheManager()
        self._configReader = ConfigReader()

        self.__enable_debug_calcperf = False

        #self._timer_for_disappear = 0

        self._object_is_found_flag = False

        self._is_object_finder = False
        self._objects_found_of_sub_object_finder = []

    def _compress_image(self, img):
        return cv2.imencode('.png', img)[1]

    def _uncompress_image(self, compressed_img):
        return cv2.imdecode(compressed_img, cv2.CV_LOAD_IMAGE_GRAYSCALE)

    def set_name(self, name):
        """
        set the name of the object.

        :type name: string
        :param name: the name of the object
        """
        self._name = name
        self._log_manager.set_object_name(self._name)

    def get_name(self):
        """
        get the name of the object.

        :rtype: string
        :return: the name of the object
        """
        return self._name

    def set_name_with_caller(self):

        tmp_name = self._name

        for object_caller in self._objects_finders_caller:
            tmp_name = object_caller + os.sep + tmp_name

        self._name_with_caller = tmp_name
        self._log_manager.set_object_name(self._name_with_caller)

    def set_source_image_color(self, image_data):
        """
        set the color image on which the find method will search the object.

        :type image_data: numpy.ndarray
        :param image_data: the color image
        """
        self._source_image_color = image_data.copy()
        img_gray = cv2.cvtColor(image_data, cv2.COLOR_BGR2GRAY)
        self.set_source_image_gray(img_gray)
        #self._log_manager.set_image(self._source_image)

    def set_source_image_gray(self, image_data):
        """
        set the gray image on which the find method will search the object.

        :type image_data: numpy.ndarray
        :param image_data: the gray image
        """
        self._source_image_gray = image_data.copy()

    def get_source_image_color(self):
        """
        get the color image on which the find method will search the object.

        :rtype: numpy.ndarray
        :return: the source color image
        """
        return self._source_image_color

    def get_source_image_gray(self):
        """
        get the gray image on which the find method will search the object.

        :rtype: numpy.ndarray
        :return: the source gray image
        """
        return self._source_image_gray

    def find(self):
        raise NotImplementedError

    def wait(self, timeout=-1, wait_disappear=False):
        """
        wait until the object appears on the screen.
        if timeout value is -1 (default value) then timeout value will be read from config file.
        if configuration file doesn't exist, then timeout value will be 15 sec.

        :param timeout: timeout in seconds
        :type timeout: int
        """

        #cv2.imwrite()

        #self._robot_manager.write_log_message("wait method: " + self.get_name(), "ERROR", False)
        #self._robot_manager.write_log_message("wait method: " + self.get_name(), "ERROR", False)
        #sss = self._robot_manager.get_suite_name()
        #ttt = self._robot_manager.get_testcase_name()

        #self._robot_manager.method1().method2()

        timeout_value = 15

        if timeout == -1:
            timeout_value = self._configReader.get_finder_wait_timeout()
        else:
            timeout_value = timeout

        self._objects_found = []

        self._heartbeat_images = []
        self._find_thread_images = []

        self._flag_thread_started = False
        self._flag_thread_have_to_exit = False

        self._heartbeat_images_copy = []

        time_elapsed = 0.0
        #time_of_last_change = 0.0
        self._time_checked_before_exit_start = None

        self._object_is_found_flag = False

        #screenCapture = ScreenManager()
        thread_interval = self._info_manager.get_info("FINDER THREAD INTERVAL") #self._configReader.get_finder_thread_interval()
        #thread_interval = 0.5
        check_diff_interval = self._info_manager.get_info("CHECK DIFF INTERVAL")

        img1 = self._cacheManager.GetLastObjFoundFullImg()

        if img1 is None:
            img1 = self._screen_capture.grab_desktop(self._screen_capture.get_gray_mat)

        thread_t0 = time.time()
        time_before_loop = time.time()
        while True:
            #txx = time.time()
            try:

                if len(self._objects_found) > 0 and self._flag_thread_started is False:
                    #do analysis cjecl_time()
                    """
                    print "len main:", len(self._objects_found)

                    print "main x, y, w, h:", self._objects_found[0][0].x, self._objects_found[0][0].y, self._objects_found[0][0].width, self._objects_found[0][0].height


                    if self._objects_found[0][1] is not None:
                        print "len secodn:", len(self._objects_found[0][1])
                        for sub_obj in self._objects_found[0][1]:
                            print "sub x, y, w, h:", sub_obj.x, sub_obj.y, sub_obj.width, sub_obj.height
                    """

                    self._last_thread_image = self._uncompress_image(self._find_thread_images[-1][1])

                    #time.sleep(3600)

                    if wait_disappear is False:
                        self._log_manager.save_objects_found(self._name, self.get_source_image_gray(), self._objects_found, [x[1] for x in self._sub_components])

                    if wait_disappear is True:
                        self._heartbeat_images_copy = copy.deepcopy(self._heartbeat_images)
                        self._last_thread_image_copy = copy.deepcopy(self._last_thread_image)
                        #self._timer_for_disappear = self._heartbeat_images[-1][0]
                        #self._find_thread_images_copy = copy.deepcopy(self._find_thread_images)
                        return -2
                    else:
                        self._object_is_found_flag = True
                        self._last_thread_image_copy = copy.deepcopy(self._last_thread_image)
                        return self._get_performance()


                if time_elapsed > timeout_value and self._flag_thread_started is False:
                    self._last_thread_image = self._uncompress_image(self._find_thread_images[-1][1])
                    #from alyvix.finders.cv.rectfinder import RectFinder
                    #from alyvix.finders.cv.imagefinder import ImageFinder
                    #from alyvix.finders.cv.textfinder import TextFinder
                    from alyvix.finders.cv.objectfinder import ObjectFinder

                    #if not isinstance(self, ObjectFinder):
                    self._log_manager.save_timedout_objects(self._name + "_timedout", self.get_source_image_gray(), self._timedout_main_components, self._timedout_sub_components, self._main_extra_img_log, self._sub_extra_imgages_log)
                    #else:
                    if isinstance(self, ObjectFinder):

                        #self._log_manager.save_timedout_objects(self._name + "_timedout", self._last_thread_image, self._main_component[0]._timedout_main_components, self._main_component[0]._timedout_sub_components, self._main_component[0]._main_extra_img_log, self._main_component[0]._sub_extra_imgages_log, True, self._main_component[0]._name)

                        if len(self._main_component[0]._objects_found) == 0:
                            self._log_manager.save_timedout_objects(self._name + "_timedout", self._last_thread_image, self._main_component[0]._timedout_main_components, self._main_component[0]._timedout_sub_components, self._main_component[0]._main_extra_img_log, self._main_component[0]._sub_extra_imgages_log, True, self._main_component[0]._name)

                        for t_sub in self._sub_components:
                            self._log_manager.save_timedout_objects(self._name + "_timedout", self._last_thread_image, t_sub[0]._timedout_main_components, t_sub[0]._timedout_sub_components, t_sub[0]._main_extra_img_log, t_sub[0]._sub_extra_imgages_log, True, t_sub[0]._name)

                    return -1

                t0 = time.time()

                #cv2.imwrite('img2.png', img2)

                #if time.time() - thread_t0 >= thread_interval:
                if time_elapsed < timeout_value and time.time() - thread_t0 >= thread_interval and self._flag_thread_started is False:
                    thread_t0 = time.time()

                    self._flag_thread_started = True

                    """
                    folder = 'c:\\log\\buffer_images'
                    for the_file in os.listdir(folder):
                        file_path = os.path.join(folder, the_file)
                        try:
                            if os.path.isfile(file_path):
                                os.unlink(file_path)
                        except Exception, e:
                            print e
                    """


                    #for i in range(len(self._find_thread_images)):
                        #cv2.imwrite("c:\\log\\buffer_images\\_old_" + str(self._find_thread_images[i][0]) + ".png", self._uncompress_image(self._find_thread_images[i][1]))


                    self._find_thread_images = copy.deepcopy(self._heartbeat_images)
                    self._heartbeat_images = []

                    self.set_source_image_color(img2_color)
                    self.set_source_image_gray(img2_gray)
                    if self._log_manager.is_log_enable() is True:
                        self._log_manager.delete_all_items(keep_items=20, exclude_item="difference")
                    worker = Thread(target=self.find)
                    worker.setDaemon(True)
                    worker.start()

                img2_color = self._screen_capture.grab_desktop(self._screen_capture.get_color_mat)
                img2_gray = cv2.cvtColor(img2_color, cv2.COLOR_BGR2GRAY)
                self._heartbeat_images.append((time_elapsed, self._compress_image(img2_gray)))


                t1 = time.time() - t0
                time_sleep = check_diff_interval - t1
                if time_sleep < 0:
                    time_sleep = 0

                time.sleep(time_sleep)

                time_elapsed = time.time() - time_before_loop
                #print time_elapsed

            except Exception, err:
                #print str(err) + " on line " + str(sys.exc_traceback.tb_lineno)
                self._log_manager.save_exception("ERROR", "an exception has occurred: " + str(err) + " on line " + str(sys.exc_traceback.tb_lineno))
                return None
Exemple #18
0
    def find(self):

        self._temp_scraped_string = []

        self._info_manager.set_info('last log image order', 0)
        self._info_manager.set_info('LOG OBJ FINDER COLOR COUNTER', 0)

        time_before_find = time.time()

        for sub_object in self._sub_components:
            sub_object[0]._objects_found_of_sub_object_finder = []

        self._timedout_main_components = []
        self._timedout_sub_components = []

        self._objects_found = []

        main_object = self._main_component[0]

        if not isinstance(main_object, ObjectFinder):
            #main_object[1] = [1,1]
            if self._main_component[1] is not None:
                roi = Roi(self._main_component[1])
            else:
                roi = None

            main_component = main_object._main_component[0]
            main_object._main_component = (main_component, roi)

            """
            if isinstance(main_object, RectFinder):
                main_rect = main_object._main_rect[0]
                main_object._main_rect = (main_rect, roi)

            elif isinstance(main_object, ImageFinder):
                main_template = main_object._main_template[0]
                main_object._main_template = (main_template, roi)

            elif isinstance(main_object, TextFinder):
                main_text = main_object._main_text[0]
                main_object._main_text = (main_text, roi)
            """

        source_img_auto_set = False

        if self._source_image_color is None or self._source_image_gray is None:

            screen_capture = ScreenManager()
            src_img_color = screen_capture.grab_desktop(screen_capture.get_color_mat)
            self.set_source_image_color(src_img_color)
            src_img_gray = cv2.cvtColor(src_img_color, cv2.COLOR_BGR2GRAY)
            self.set_source_image_gray(src_img_gray)
            #main_object.set_source_image_color(self._source_image_color)
            #main_object.set_source_image_gray(self._source_image_gray)
            source_img_auto_set = True

        main_object.set_source_image_color(self._source_image_color)
        main_object.set_source_image_gray(self._source_image_gray)

        objects_found = []

        main_object.find()

        #self._info_manager.set_info(1)

        cnt = 0
        for object in main_object._objects_found:

            self._info_manager.set_info('LOG OBJ FINDER COLOR COUNTER', 1)

            #self._info_manager.set_info('LOG OBJ FINDER COLOR COUNTER', 0)


            """
            if self._flag_thread_have_to_exit is True:
                main_object._flag_thread_have_to_exit = False
                main_object._flag_thread_started = False
                self._flag_thread_have_to_exit = False
                self._flag_thread_started = False
                return []
            """

            object_found = []
            object_found.append([])
            object_found.append([])

            x = object[0].x
            y = object[0].y
            w = object[0].width
            h = object[0].height

            sub_objects_len = len(self._sub_components)

            self._timedout_main_components.append(MatchResult((x, y, w, h)))

            if sub_objects_len == 0:
                #good_points.append((x, y, w, h))

                main_object_result = MatchResult((x, y, w, h))
                object_found[0] = main_object_result

                object_found[1] = None
                objects_found.append(object_found)
                self._main_indexes_to_keep.append(cnt)
            else:

                total_sub_object_found = 0

                sub_objects_found = []
                timed_out_objects = []
                cnt_sub_obj = 0

                for sub_object in self._sub_components:

                    """
                    if self._flag_thread_have_to_exit is True:
                        main_object._flag_thread_have_to_exit = False
                        main_object._flag_thread_started = False
                        self._flag_thread_have_to_exit = False
                        self._flag_thread_started = False
                        return []
                    """

                    #sub_object._objects_found = []

                    if self._sub_components_scraper[cnt_sub_obj] == True:
                        (scraped_string,sub_template_coordinates) = self.find_sub_object((x, y), sub_object, scraper=True)
                        self._temp_scraped_string.append(scraped_string)

                    else:
                        sub_template_coordinates = copy.deepcopy(self.find_sub_object((x, y), sub_object))

                    self._info_manager.set_info('LOG OBJ FINDER COLOR COUNTER',
                                                self._info_manager.get_info('LOG OBJ FINDER COLOR COUNTER') + 1)

                    if self._info_manager.get_info('LOG OBJ FINDER COLOR COUNTER') >= \
                            len(self._info_manager.get_info('LOG OBJ FINDER FILL COLOR')):
                        self._info_manager.set_info('LOG OBJ FINDER COLOR COUNTER', 0)

                    if sub_template_coordinates is not None:
                        sub_objects_found.append(sub_template_coordinates)
                        timed_out_objects.append((sub_template_coordinates, sub_object[1]))
                        total_sub_object_found = total_sub_object_found + 1
                    else:
                        timed_out_objects.append((None, sub_object[1]))

                    if total_sub_object_found == sub_objects_len:
                        #good_points.append((x, y, w, h))

                        main_object_result = MatchResult((x, y, w, h))
                        object_found[0] = main_object_result

                        object_found[1] = sub_objects_found

                        objects_found.append(object_found)
                        self._main_indexes_to_keep.append(cnt)

                        if len(self._temp_scraped_string)>0:
                            #self._scraped_text = self._temp_scraped_string[cnt]
                            print "text from scraper: " + self._temp_scraped_string[cnt]
                            self._index_of_obj_with_scraped_found.append(cnt)

                    cnt_sub_obj += 1

                self._timedout_sub_components.append(timed_out_objects)
            #self._log_manager.save_object_image("img_" + str(cnt) + ".png")
            cnt = cnt + 1

        if len(objects_found) > 0:
            self._info_manager.set_info('LOG OBJ IS FOUND', True)
            self._objects_found = copy.deepcopy(objects_found)
            main_object.rebuild_result(self._main_indexes_to_keep)
            self.rebuild_result_for_sub_component(self._main_indexes_to_keep)
            self._cacheManager.SetLastObjFoundFullImg(self._source_image_gray)

        #self._source_image_color = None
        #self._source_image_gray = None

        if source_img_auto_set is True:
            self._source_image_gray = None
            self._source_image_color = None
            source_img_auto_set = False

        """
        if self._flag_check_before_exit is True:
            self._flag_checked_before_exit = True
            #print "self._flag_checked_before_exit = True"
            #print self._time_checked_before_exit_start
        """

        self._flag_thread_started = False
        main_object._flag_thread_started = False

        if self._calc_last_finder_time is True:
            self._last_finder_time = time.time() - time_before_find
            self._calc_last_finder_time = False

        return self._objects_found
Exemple #19
0
    def find(self):
        """
        find the main rectangle and sub rectangles into the source image.

        :rtype: list[[MatchResult, list[MatchResult]]]
        :return: a list that contains x, y, height, width of rectangle(s) found
        """
        try:
            self._timedout_main_components = []
            self._timedout_sub_components = []

            self._main_extra_img_log = None
            self._sub_extra_imgages_log = []

            self._objects_found = []

            source_img_auto_set = False

            if self._source_image_color is None:
                screen_capture = ScreenManager()
                src_img_color = screen_capture.grab_desktop(screen_capture.get_color_mat)
                self.set_source_image_color(src_img_color)
                src_img_gray = cv2.cvtColor(src_img_color, cv2.COLOR_BGR2GRAY)
                self.set_source_image_gray(src_img_gray)
                source_img_auto_set = True

            self.__find_log_folder = datetime.datetime.now().strftime("%H_%M_%S") + "_" + "searching"

            offset_x = 0
            offset_y = 0

            main_rect = self._main_component[0]
            roi = self._main_component[1]

            if roi is not None:

                y1 = roi.y
                y2 = y1 + roi.height
                x1 = roi.x
                x2 = x1 + roi.width

                offset_x = x1
                offset_y = y1

                source_img_height, source_img_width, channels = self._source_image_color.shape

                if y1 < 0:
                    y1 = 0
                elif y1 > source_img_height:
                    y1 = source_img_height

                if y2 < 0:
                    y2 = 0
                elif y2 > source_img_height:
                    y2 = source_img_height

                if x1 < 0:
                    x1 = 0
                elif x1 > source_img_width:
                    x1 = source_img_width

                if x2 < 0:
                    x2 = 0
                elif x2 > source_img_width:
                    x2 = source_img_width

                #print x1,x2,y1,y2
                source_image = self._source_image_color[y1:y2, x1:x2]
            else:
                source_image = self._source_image_color

            objects_found = []
            analyzed_points = []
            self._objects_found = []

            blue, green, red = cv2.split(source_image)
            # Run canny edge detection on each channel
            blue_edges = self.__median_canny(blue, 0.2, 0.3)
            green_edges = self.__median_canny(green, 0.2, 0.3)
            red_edges = self.__median_canny(red, 0.2, 0.3)

            # Join edges back into image
            edges = blue_edges | green_edges | red_edges
            if self._log_manager.is_log_enable() is True:
                self._log_manager.save_image(self.__find_log_folder, "source_img.png", source_image)
                self._log_manager.save_image(self.__find_log_folder, "edges.png", edges)

            #self._rect_extra_timedout_image = edges.copy()
            if roi is not None:
                self._main_extra_img_log = (edges.copy(), (x1, y1, x2, y2))
            else:
                self._main_extra_img_log = (edges.copy(), None)

            #edges = self.__median_canny(self._source_image, 0.2, 0.3)

            #self._timed_out_images.append(source_image.copy())

            # find the contours
            contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            #self._log_manager.save_image(self.__find_log_folder, "canny.png", edges)

            if main_rect.width != 0 and main_rect.height != 0:
                main_rect.min_width = main_rect.max_width = main_rect.width
                main_rect.min_height = main_rect.max_height = main_rect.height

            if main_rect.width_tolerance != 0 and main_rect.width != 0 and main_rect.height != 0:
                main_rect.min_width = main_rect.min_width - main_rect.width_tolerance
                main_rect.max_width = main_rect.max_width + main_rect.width_tolerance

            if main_rect.height_tolerance != 0 and main_rect.width != 0 and main_rect.height != 0:
                main_rect.min_height = main_rect.min_height - main_rect.height_tolerance
                main_rect.max_height = main_rect.max_height + main_rect.height_tolerance

            cnt = 0

            #print main_rect.min_width, main_rect.max_width, main_rect.min_height, main_rect.max_height
            # For each contour, find the bounding rectangle and draw it
            for c in reversed(contours):

                object_found = []
                object_found.append([])
                object_found.append([])
                self.__timed_out_sub_extra_images = []

                x, y, w, h = cv2.boundingRect(c)
                x = offset_x + x
                y = offset_y + y

                #print x, y, w, h

                if(w >= main_rect.min_width and w <= main_rect.max_width and
                        h >= main_rect.min_height and h <= main_rect.max_height):

                    is_already_found = False

                    for point_already_analyzed in analyzed_points:

                        tolerance_region_w = (((main_rect.min_width + main_rect.max_width)/2)/2)  + (20 * self._scaling_factor)
                        tolerance_region_h = (((main_rect.min_height + main_rect.max_height)/2)/2) + (20 * self._scaling_factor)

                        #tolerance_region = 20 * self._scaling_factor

                        if (x >= point_already_analyzed[0] - tolerance_region_w and
                                    x <= point_already_analyzed[0] + tolerance_region_w) and\
                                (y >= point_already_analyzed[1] - tolerance_region_h and
                                    y <= point_already_analyzed[1] + tolerance_region_h):

                            is_already_found = True

                    if is_already_found == False:

                        analyzed_points.append((x, y, w, h))

                        self._timedout_main_components.append(MatchResult((x, y, w, h)))

                        #self._log_manager.set_main_object_points((x, y, w, h))
                        if self._log_manager.is_log_enable() is True:
                            img_copy = source_image.copy()
                            cv2.rectangle(img_copy, ((x-offset_x), (y-offset_y)), ((x-offset_x)+w, (y-offset_y)+h), (0, 0, 255), 2)
                            self._log_manager.save_image(self.__find_log_folder, "object_found.png", img_copy)

                        sub_templates_len = len(self._sub_components)

                        if sub_templates_len == 0:
                            #good_points.append((x, y, w, h))

                            main_object_result = MatchResult((x, y, w, h))
                            object_found[0] = main_object_result

                            object_found[1] = None
                            objects_found.append(object_found)
                        else:

                            total_sub_template_found = 0

                            sub_objects_found = []
                            timed_out_objects = []

                            for sub_rect in self._sub_components:

                                sub_template_coordinates = self._find_sub_rect((x, y), sub_rect)

                                if sub_template_coordinates is not None:
                                    sub_objects_found.append(sub_template_coordinates)
                                    total_sub_template_found = total_sub_template_found + 1
                                    timed_out_objects.append((sub_template_coordinates, sub_rect[1]))
                                else:
                                    timed_out_objects.append((None, sub_rect[1]))

                                if total_sub_template_found == sub_templates_len:


                                    #good_points.append((x, y, w, h))

                                    main_object_result = MatchResult((x, y, w, h))
                                    object_found[0] = main_object_result

                                    object_found[1] = sub_objects_found

                                    objects_found.append(object_found)

                            self._timedout_sub_components.append(timed_out_objects)
                            self._sub_extra_imgages_log.append(self.__timed_out_sub_extra_images)

                        #self._log_manager.save_object_image("img__result" + str(cnt) + ".png")
                cnt = cnt + 1

            if len(objects_found) > 0:
                self._objects_found = copy.deepcopy(objects_found)
                if self._is_object_finder is True:
                    self._objects_found_of_sub_object_finder.extend(copy.deepcopy(objects_found))
                #gray_source_img = cv2.cvtColor(self._source_image, cv2.COLOR_BGR2GRAY)
                self._cacheManager.SetLastObjFoundFullImg(self._source_image_gray)

            #time.sleep(40)

            if source_img_auto_set is True:
                self._source_image_color = None
                self._source_image_gray = None
                source_img_auto_set = False

            self._flag_thread_started = False

            return self._objects_found

        except Exception, err:
            self._log_manager.save_exception("ERROR", "an exception has occurred: " + str(err) + " on line " + str(sys.exc_traceback.tb_lineno))
            self._flag_thread_started = False
            return None
Exemple #20
0
    def find(self):
        """
        find the main template and sub templates into the source image.

        :rtype: list[(int, int, int, int)]
        :return: x, y, height, width of main template(s)
        """
        try:

            #x = 1 / 0

            #print "main comp:",self._main_component

            source_img_auto_set = False

            self._objects_found = []

            if self._source_image_gray is None or self._source_image_color is None:
                screen_capture = ScreenManager()
                src_img_color = screen_capture.grab_desktop(screen_capture.get_color_mat)
                self.set_source_image_color(src_img_color)
                src_img_gray = cv2.cvtColor(src_img_color, cv2.COLOR_BGR2GRAY)
                self.set_source_image_gray(src_img_gray)
                source_img_auto_set = True

            self.__find_log_folder = datetime.datetime.now().strftime("%H_%M_%S") + "_" + "searching"

            offset_x = 0
            offset_y = 0


            main_template = self._main_component[0]
            #print "main templ:", main_template
            roi = self._main_component[1]

            if roi is not None:

                y1 = roi.y
                y2 = y1 + roi.height
                x1 = roi.x
                x2 = x1 + roi.width

                offset_x = x1
                offset_y = y1

                source_img_height, source_img_width = self._source_image_gray.shape

                if y1 < 0:
                    y1 = 0
                elif y1 > source_img_height:
                    y1 = source_img_height

                if y2 < 0:
                    y2 = 0
                elif y2 > source_img_height:
                    y2 = source_img_height

                if x1 < 0:
                    x1 = 0
                elif x1 > source_img_width:
                    x1 = source_img_width

                if x2 < 0:
                    x2 = 0
                elif x2 > source_img_width:
                    x2 = source_img_width

                #print x1,x2,y1,y2
                source_image = self._source_image_gray[y1:y2, x1:x2]
            else:
                source_image = self._source_image_gray


            if self._log_manager.is_log_enable() is True:
                self._log_manager.save_image(self.__find_log_folder, "source_img.png", source_image)
                self._log_manager.save_image(self.__find_log_folder, "main_template.png", main_template.image_data)

            objects_found = []
            analyzed_points = []
            self._objects_found = []

            w, h = main_template.image_data.shape[::-1]
            src_w, src_h = source_image.shape[::-1]
            tpl_w = w
            tpl_h = h

            if src_h < tpl_h or src_w < tpl_w:
                self._flag_thread_have_to_exit = False
                self._flag_thread_started = False
                self._source_image_gray = None
                self._source_image_color = None
                return []

            res = cv2.matchTemplate(source_image, main_template.image_data, cv2.TM_CCOEFF_NORMED)

            loc = numpy.where(res >= main_template.threshold)

            cnt = 0
            for point in zip(*loc[::-1]):

                object_found = []
                object_found.append([])
                object_found.append([])

                if self._flag_thread_have_to_exit is True:
                    self._flag_thread_have_to_exit = False
                    self._flag_thread_started = False
                    self._source_image_gray = None
                    self._source_image_color = None
                    return []

                x = offset_x + point[0]
                y = offset_y + point[1]

                is_already_found = False

                for point_already_analyzed in analyzed_points:

                    tolerance_region = 20

                    if (x >= point_already_analyzed[0] - tolerance_region and
                                x <= point_already_analyzed[0] + tolerance_region) and\
                            (y >= point_already_analyzed[1] - tolerance_region and
                                     y <= point_already_analyzed[1] + tolerance_region):

                        is_already_found = True
                        #print point[0],point_already_analyzed[0],point[1],point_already_analyzed[1]

                if is_already_found == False:

                    analyzed_points.append((x, y, w, h))

                    #self._log_manager.set_main_object_points((x, y, w, h))
                    if self._log_manager.is_log_enable() is True:
                        img_copy = source_image.copy()
                        cv2.rectangle(img_copy, ((x-offset_x), (y-offset_y)), ((x-offset_x)+w, (y-offset_y)+h), (0, 0, 255), 2)
                        self._log_manager.save_image(self.__find_log_folder, "object_found.png", img_copy)

                    sub_templates_len = len(self.__sub_components)

                    if sub_templates_len == 0:
                        main_object_result = MatchResult((x, y, w, h))
                        object_found[0] = main_object_result
                        object_found[1] = None
                        objects_found.append(object_found)
                    else:
                        #print sub_templates_len
                        total_sub_template_found = 0
                        sub_objects_found = []
                        for sub_template in self.__sub_components:

                            if self._flag_thread_have_to_exit is True:
                                self._flag_thread_have_to_exit = False
                                self._flag_thread_started = False
                                self._source_image_gray = None
                                self._source_image_color = None
                                return []

                            sub_template_coordinates = self._find_sub_template((x, y), sub_template)

                            if sub_template_coordinates is not None:
                                sub_objects_found.append(sub_template_coordinates)
                                total_sub_template_found = total_sub_template_found + 1

                            if total_sub_template_found == sub_templates_len:
                                #good_points.append((x, y, w, h))

                                main_object_result = MatchResult((x, y, w, h))
                                object_found[0] = main_object_result

                                object_found[1] = sub_objects_found

                                objects_found.append(object_found)

                    #self._log_manager.save_object_image("img_" + str(cnt) + ".png")
                cnt = cnt + 1

            if len(objects_found) > 0:
                self._objects_found = copy.deepcopy(objects_found)
                self._cacheManager.SetLastObjFoundFullImg(self._source_image_gray)
            self._flag_thread_started = False

            if source_img_auto_set is True:
                self._source_image_gray = None
                self._source_image_color = None
                source_img_auto_set = False

            if self._flag_check_before_exit is True:
                self._flag_checked_before_exit = True

            return self._objects_found
        except Exception, err:
            self._log_manager.save_exception("ERROR", "an exception has occurred: " + str(err) + " on line " + str(sys.exc_traceback.tb_lineno))

            self._flag_thread_have_to_exit = False
            self._flag_thread_started = False
            self._source_image_gray = None
            self._source_image_color = None
            return []