Example #1
1
def fast_template_matching(img, tmpl, max_level):
    pyr_img = build_pyramid(img, max_level)
    pyr_tmpl = build_pyramid(tmpl, max_level)

    results = []
    for level in range(max_level,-1,-1):
        ref = pyr_img[level]
        tpl = pyr_tmpl[level]
        
        if level == max_level:
            results.append(cv2.matchTemplate(ref, tpl, cv2.TM_CCOEFF_NORMED))
        else:
            mask = cv2.pyrUp(results[-1])
            (_, maxval, _, maxloc) = cv2.minMaxLoc(mask)
            if maxval < 0.5:
                break
            #print maxloc
            mask_h, mask_w = mask.shape
            mask_w = mask_w / 50
            mask_h = mask_h / 50
            tpl_h, tpl_w = tpl.shape
            y = maxloc[1] - mask_h/2
            x = maxloc[0] - mask_w/2
            w = mask_w + tpl_w
            h = mask_h + tpl_h
            res = np.zeros(ref.shape, np.float32)
            if x+w > ref.shape[1] or y+h > ref.shape[0] or x < 0 or y < 0:
                # Out of bounds
                return (0,(0,0))
            
            res[y:y+mask_h+1,x:x+mask_w+1] = cv2.matchTemplate(ref[y:y+h,x:x+w], tpl, cv2.TM_CCOEFF_NORMED)
            results.append(res)

    (_, maxval, _, maxloc) = cv2.minMaxLoc(results[-1])
    return maxval, maxloc
def findscale(img):
    exito = 0
    #path = 'ima/img ('+str(i)+').jpg'
    #img = cv2.imread(path)
    tmp_l = cv2.imread('tmp_l.jpg')
    tmp_b = cv2.imread('tmp_b.jpg')
    tmp_b_n = cv2.imread('tmp_n.jpg')
    
    #La pasamo a escala de grises
    img_v = img
    tmp = cv2.cvtColor(tmp, cv2.CV_32FC1) 
    img_g = cv2.cvtColor(img, cv2.CV_32FC1)
       
    #Aplicamos erode
    kernel = np.ones((2,2),np.uint8)
    num = 4
    img_g = cv2.erode(img_g,kernel,iterations = num)
    
    #Intentamos encontrar la zona buscada
    d = cv2.matchTemplate(tmp,img_g, cv2.cv.CV_TM_SQDIFF_NORMED)
    d = cv2.matchTemplate(tmp, img_g, cv2.cv.CV_TM_SQDIFF_NORMED)
    mn,_,mnLoc,_ = cv2.minMaxLoc(d)
    
    '''if (mn > 0.0243):
        d = cv2.matchTemplate(tmp2,img, cv2.cv.CV_TM_SQDIFF_NORMED) 
        mn,_,mnLoc,_ = cv2.minMaxLoc(d)
    if (mn < 0.036):
        exito = exito + 1'''
    
    #Dibujamos el rectangulo
    MPx,MPy = mnLoc
    trows,tcols = tmp.shape[:2]
    cv2.rectangle(img_v, (MPx,MPy),(MPx+tcols,MPy+trows),(0,0,255),2)
    
    return img_v
Example #3
0
def detectCard(card):
    crop = card[0:50, 0:20]
    gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)
    black = cv2.inRange(crop, (0,0,0), (50, 50, 50))
    color = 0
    if (np.sum(black) > 5000):
        color = 0
    else:
        color = 1
    symbol0 = ''
    symbol0v = 0
    symbol1 = ''
    max_value = 0
    results = []
    for symbol in symbols1:
        res = cv2.matchTemplate(gray, symbol, cv2.TM_CCOEFF_NORMED)
        minv, maxv, minloc, maxloc = cv2.minMaxLoc(res)
        results.append(maxv)
    max_value = max(results)
    if max_value >= symbols1_threshold:
        symbol1 = symbols1_name[results.index(max_value)]
    results = []
    for symbol in symbols0:
        res = cv2.matchTemplate(gray, symbol, cv2.TM_CCOEFF_NORMED)
        minv, maxv, minloc, maxloc = cv2.minMaxLoc(res)
        results.append(maxv)
    max_value = max(results)
    if max_value >= symbols0_threshold:
        symbol0 = symbols_name[results.index(max_value)]
        symbol0v = symbols_value[results.index(max_value)]
#    if color == 0 and (symbol1 == 'D' or symbol1 == 'H'):
#        symbol1 = ''
#    elif color == 1 and (symbol1 == 'S' or symbol1 == 'C'):
#        symbol1 = ''
    return symbol0, symbol1, color, symbol0v
    def find_best(self, roi=None):
        source_img = self.source_img
        target_img = self.target_img

        if roi:
            source_img = source_img.crop(roi)

        if target_img.is_same_color():
            if target_img.is_black():
                source_img = source_img.invert()
                target_img = target_img.invert()

            result = cv2.matchTemplate(source_img.source,
                                       target_img.source,
                                       cv2.TM_SQDIFF_NORMED)

            result = np.ones(result.size(), np.float32) - result
        else:
            result = cv2.matchTemplate(source_img.source,
                                       target_img.source,
                                       cv2.TM_CCOEFF_NORMED)

        self.cache_result = result
        minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(self.cache_result)

        return maxVal, maxLoc
Example #5
0
def myfindChessboardCorners(im,dim):
    gr=30
    patern=np.zeros((gr,gr),dtype='uint8')
    patern[:gr/2,:gr/2]=255
    patern[gr/2:,gr/2:]=255
    m1=cv2.matchTemplate(im,patern,cv2.TM_CCORR_NORMED)
    patern=np.ones((gr,gr),dtype='uint8')*255
    patern[:gr/2,:gr/2]=0
    patern[gr/2:,gr/2:]=0
    m2=cv2.matchTemplate(im,patern,cv2.TM_CCORR_NORMED)
    #m=np.bitwise_or(m1>0.9,m2>0.9)
    #import pdb;pdb.set_trace()
    tresh=0.95
    labels=ndimage.label(np.bitwise_or(m1>tresh,m2>tresh))
    if labels[1]!=dim[0]*dim[1]:
        return False,[]
    objs=ndimage.find_objects(labels[0])
    corners=[]
    for xx,yy in objs:
        xpos=(xx.start+xx.stop)/2.0#+gr/2-0.5
        ypos=(yy.start+yy.stop)/2.0#+gr/2-0.5
        se=5
        #import pdb;pdb.set_trace()
        minVal, maxVal, minLoc, maxLoc=cv2.minMaxLoc(m2[xpos-se:xpos+se,ypos-se:ypos+se])
        if maxVal<tresh:
            minVal, maxVal, minLoc, maxLoc=cv2.minMaxLoc(m1[xpos-se:xpos+se,ypos-se:ypos+se])
        xpos+=-se+maxLoc[0]+gr/2-0.5
        ypos+=-se+maxLoc[1]+gr/2-0.5
        
        #xpos=xx.start+gr/2
        #ypos=yy.start+gr/2
        corners.append((ypos,xpos) )
    return True,np.array(corners)
Example #6
0
    def isFillChanged(self, imageA, imageB):
        
        fillChanged = False    

        template = cv2.imread('fill.png',0)
        template = 255 - template

        imageA = cv2.imread(imageA,0)
        imageA = 255 - imageA
        
        imageB = cv2.imread(imageB,0)
        imageB = 255 - imageB
        

        res = cv2.matchTemplate(imageA,template,3) # 'cv2.TM_SQDIFF' - 4
        min_val, max_val_A, min_loc, max_loc = cv2.minMaxLoc(res)

        res = cv2.matchTemplate(imageB,template,3) # 'cv2.TM_SQDIFF' - 4
        min_val, max_val_B, min_loc, max_loc = cv2.minMaxLoc(res)
        
        if (max_val_A >= 0.7 and max_val_B < 0.7):
            fillChanged = True

        if (max_val_A < 0.7 and max_val_B >= 0.7):
            fillChanged = True

        return fillChanged
Example #7
0
def detector(image):
    img_rgb = image
    img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
    template1 = cv2.imread('template.jpg',0)
    template2 = cv2.imread('template_line.png',0)
    template3 = cv2.imread('template_line_horiz.png',0)
    #w, h = template.shape[::-1]
    counter = 0
    res1 = cv2.matchTemplate(img_gray,template1,cv2.TM_CCOEFF_NORMED)
    res2 = cv2.matchTemplate(img_gray,template2,cv2.TM_CCOEFF_NORMED)
    res3 = cv2.matchTemplate(img_gray,template3,cv2.TM_CCOEFF_NORMED)
    threshold = 0.5
    posloc1 = np.where( res1 >= threshold)
    for pt in zip(*posloc1[::-1]):
        #cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
        counter = 1
    posloc2 = np.where( res2 >= threshold)
    for pt in zip(*posloc2[::-1]):
        #cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
        counter = 2
    posloc3 = np.where( res3 >= threshold)
    for pt in zip(*posloc3[::-1]):
        #cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
        counter = 3
    return counter
Example #8
0
def GetEyeCorners(orig_img, leftTemplate, rightTemplate,pupilPosition=None):
	if leftTemplate != [] and rightTemplate != []:
		ccnorm_left = cv2.matchTemplate(orig_img, leftTemplate, cv2.TM_CCOEFF_NORMED)
		ccnorm_right = cv2.matchTemplate(orig_img, rightTemplate, cv2.TM_CCOEFF_NORMED)

		minVal, maxVal, minLoc, maxloc_left_from = cv2.minMaxLoc(ccnorm_left)
		minVal, maxVal, minLoc, maxloc_right_from, = cv2.minMaxLoc(ccnorm_right)

		l_x,l_y = leftTemplate.shape
		max_loc_left_from_x = maxloc_left_from[0]
		max_loc_left_from_y = maxloc_left_from[1]

		max_loc_left_to_x = max_loc_left_from_x + l_x
		max_loc_left_to_y = max_loc_left_from_y + l_y

		maxloc_left_to = (max_loc_left_to_x, max_loc_left_to_y)

		r_x,r_y = leftTemplate.shape
		max_loc_right_from_x = maxloc_right_from[0]
		max_loc_right_from_y = maxloc_right_from[1]

		max_loc_right_to_x = max_loc_right_from_x + r_x
		max_loc_right_to_y = max_loc_right_from_y + r_y
		maxloc_right_to = (max_loc_right_to_x, max_loc_right_to_y)

		return (maxloc_left_from, maxloc_left_to, maxloc_right_from, maxloc_right_to)
    def match_template(self, cv_image):
        frame = np.array(cv_image, dtype=np.uint8)

        # grey = cv2.equalizeHist(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
        # edges = cv2.Sobel(grey, cv.CV_32F, 1, 1)

        W, H = frame.shape[1], frame.shape[0]
        w, h = self.template.shape[1], self.template.shape[0]
        width = W - w + 1
        height = H - h + 1

        result = cv.CreateMat(height, width, cv.CV_32FC1)
        result_array = np.array(result, dtype=np.float32)

        cv2.matchTemplate(frame, self.template, cv.CV_TM_CCOEFF_NORMED, result_array)

        (min_score, max_score, minloc, maxloc) = cv2.minMaxLoc(result_array)

        # if max_score > 0.7:
        # return None
        (x, y) = maxloc

        match_box = (x, y, w, h)
        cv2.imshow("Match Result", result_array)
        # cv.Rectangle(self.marker_image, (x, y), (x + w, y + h),(255, 255, 0), 3, 0)
        return match_box
Example #10
0
def find_hostiles(screen):
    img_rgb = cv2.imread(screen)
    img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
    neut_img = cv2.imread('neut.png', 0)
    red_img = cv2.imread('red.png', 0)
    neut2_img = cv2.imread('neut2.png', 0)

    w, h = red_img.shape[::-1]

    neut_match = cv2.matchTemplate(img_gray, neut_img, cv2.TM_CCOEFF_NORMED)
    neut2_match = cv2.matchTemplate(img_gray, neut2_img, cv2.TM_CCOEFF_NORMED)
    neut2_match = cv2.matchTemplate(img_gray, neut2_img, cv2.TM_CCOEFF_NORMED)
    red_match = cv2.matchTemplate(img_gray, red_img, cv2.TM_CCOEFF_NORMED)

    threshold = 0.99

    loc_neut = numpy.where(neut_match >= threshold)
    loc_neut2 = numpy.where(red_match >= threshold)
    loc_red = numpy.where(red_match >= threshold)

    total_match = len(loc_neut[0]) + len(loc_red[0]) + len(loc_neut2[0])

    if total_match > 0:
        for pt in zip(*loc_red[::-1]):
            cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)
        for pt in zip(*loc_neut[::-1]):
            cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)
        for pt in zip(*loc_neut2[::-1]):
            cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)

        cv2.imwrite('res.png', img_rgb)

    return total_match
Example #11
0
 def __GetEyeCorners(self, grayscale, leftTemplate, rightTemplate, pupilPosition=None):
     """Given two templates and the pupil center returns the eye corners position."""
     corners = []
     
     if leftTemplate != [] and rightTemplate != []:
         # Template match the templates on the image
         ccnormed_left = cv2.matchTemplate(grayscale, leftTemplate, cv2.TM_CCOEFF_NORMED)
         ccnormed_right = cv2.matchTemplate(grayscale, rightTemplate, cv2.TM_CCOEFF_NORMED)
         
         #cv2.imshow("Left Template", ccnormed_left)
         #cv2.imshow("Right Template", ccnormed_right)
         
         # Get upper left corner of the templates
         minVal, maxVal, minLoc, maxLoc_left_from = cv2.minMaxLoc(ccnormed_left)
         minVal, maxVal, minLoc, maxLoc_right_from = cv2.minMaxLoc(ccnormed_right)
         
         # Calculate lower right corner of the templates
         maxLoc_left_to = (maxLoc_left_from[0] + leftTemplate.shape[1], maxLoc_left_from[1] + leftTemplate.shape[0])
         maxLoc_right_to = (maxLoc_right_from[0] + rightTemplate.shape[1], maxLoc_right_from[1] + rightTemplate.shape[0])
         
         corners.append(maxLoc_left_from)
         corners.append(maxLoc_left_to)
         corners.append(maxLoc_right_from)
         corners.append(maxLoc_right_to)
     return corners
def best_size_offset(image, template, width_nom, threshold, audible = False, fast = False):
    import numpy as np
    import cv2
    
    offsets = np.linspace(-0.3, 0.3, 25)
    best_offset = 0
    most_matches = 0
    n_zeros = 0
    template = template
    width_nom = width_nom
    threshold = threshold
    
    for size_off in offsets:
        img3 = cv2.resize(image, (0,0), fx=width_nom + size_off, fy=width_nom + size_off)
        img3 = cv2.blur(img3, (2,2))
        res = cv2.matchTemplate(img3, template, cv2.TM_CCOEFF_NORMED)
        good_matches = sum(sum(res > threshold))
        if good_matches > most_matches:
            most_matches = good_matches
            best_offset = size_off
            n_zeros = 0
        if (most_matches > 0) & (good_matches == 0):
            n_zeros += 1
        if n_zeros > 2:
            break
        if audible:
            print "Sizing offset: " + str(size_off) + " :: " + str(good_matches)
    
    if audible:
        print "--------------------------------"    
        print "Best offset: " + str(best_offset) + "(" + str(most_matches) + ")"
        print "--------------------------------"   
    
    if fast:
        print(round(best_offset, 4))
        return(round(best_offset, 4))

    small_step_offsets = np.linspace(best_offset - 0.015, best_offset + 0.015, 13)
    best_offset = 0
    most_matches = 0

    for size_off in small_step_offsets:
        img3 = cv2.resize(image, (0,0), fx=width_nom + size_off, fy=width_nom + size_off)
        img3 = cv2.blur(img3, (2,2))
        res = cv2.matchTemplate(img3, template, cv2.TM_CCOEFF_NORMED)
        good_matches = sum(sum(res > threshold))
        if good_matches > most_matches:
            most_matches = good_matches
            best_offset = size_off
        if audible:
            print "Sizing offset: " + str(size_off) + " :: " + str(good_matches)

    if audible:
        print "--------------------------------"    
        print "Best offset: " + str(best_offset) + "(" + str(most_matches) + ")"
        print "--------------------------------"
        
    print(round(best_offset, 4))
    return(round(best_offset, 4))
Example #13
0
def find_all_template(im_source, im_search, threshold=0.5, maxcnt=0, rgb=False, bgremove=False):
    '''
    Locate image position with cv2.templateFind

    Use pixel match to find pictures.

    Args:
        im_source(string): 图像、素材
        im_search(string): 需要查找的图片
        threshold: 阈值,当相识度小于该阈值的时候,就忽略掉

    Returns:
        A tuple of found [(point, score), ...]

    Raises:
        IOError: when file read error
    '''
    # method = cv2.TM_CCORR_NORMED
    # method = cv2.TM_SQDIFF_NORMED
    method = cv2.TM_CCOEFF_NORMED

    if rgb:
        s_bgr = cv2.split(im_search) # Blue Green Red
        i_bgr = cv2.split(im_source)
        weight = (0.3, 0.3, 0.4)
        resbgr = [0, 0, 0]
        for i in range(3): # bgr
            resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], method)
        res = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
    else:
        s_gray = cv2.cvtColor(im_search, cv2.COLOR_BGR2GRAY)
        i_gray = cv2.cvtColor(im_source, cv2.COLOR_BGR2GRAY)
        # 边界提取(来实现背景去除的功能)
        if bgremove:
            s_gray = cv2.Canny(s_gray, 100, 200)
            i_gray = cv2.Canny(i_gray, 100, 200)

        res = cv2.matchTemplate(i_gray, s_gray, method)
    w, h = im_search.shape[1], im_search.shape[0]

    result = []
    while True:
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
            top_left = min_loc
        else:
            top_left = max_loc
        if DEBUG: 
            print 'templmatch_value(thresh:%.1f) = %.3f' %(threshold, max_val) # not show debug
        if max_val < threshold:
            break
        # calculator middle point
        middle_point = (top_left[0]+w/2, top_left[1]+h/2)
        result.append((middle_point, max_val))
        if maxcnt and len(result) >= maxcnt:
            break
        # floodfill the already found area
        cv2.floodFill(res, None, max_loc, (-1000,), max_val-threshold+0.1, 1, flags=cv2.FLOODFILL_FIXED_RANGE)
    return result
Example #14
0
def GetEyeCorners(img, leftTemplate, rightTemplate,pupilPosition=None):
    sliderVals = getSliderVals()
    matchLeft = cv2.matchTemplate(img,leftTemplate,cv2.TM_CCOEFF_NORMED)
    matchRight = cv2.matchTemplate(img,rightTemplate,cv2.TM_CCOEFF_NORMED)
    matchListRight = np.nonzero(matchRight > (sliderVals['templateThr']*0.01))
    matchListLeft =  np.nonzero(matchLeft > (sliderVals['templateThr']*0.01))
    matchList = (matchListLeft,matchListRight)
    return matchList
Example #15
0
def findPicture(screenshot,template, tolerance,allConfigs, multiple = False):
    #This function will work with color images 3 channels minimum
    #The template can have an alpha channel and we will extract it to have the mask

    logging.debug('Tolerance to check is %f' , tolerance)

    logging.debug('*************Start of findPicture')

    h = template.shape[0]
    w = template.shape[1]
    
    #We will now extract the alpha channel
    tmpl = extractAlpha(template)
        
    # the method used for comparison, can be ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR','cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
    meth = 'cv2.TM_CCORR_NORMED'
    method = eval(meth)

    # Apply template Matching
    if tmpl['res']:
        res = cv2.matchTemplate(screenshot,tmpl['image'],method, mask = tmpl['mask'])
    else:
        res = cv2.matchTemplate(screenshot,tmpl['image'],method)
        
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

    # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
        best_val = 1 - min_val
    else:
        top_left = max_loc
        best_val = max_val
    #We need to ensure we found at least one match otherwise we return false
    if best_val >= tolerance:
        if multiple:
            #We need to find all the time the image is found
            all_matches = getMulti(res, float(tolerance),int(w),int(h))
        else:
            bottom_right = (top_left[0] + w, top_left[1] + h)
            center = (top_left[0] + (w/2), top_left[1] + (h/2))
            all_matches = [{'top_left':top_left,'bottom_right':bottom_right,'center':center,'tolerance':best_val}]

        #point will be in the form: [{'tolerance': 0.9889718890190125, 'center': (470, 193), 'bottom_right': (597, 215), 'top_left': (343, 172)}]
        
        logging.debug('The points found will be:')
        logging.debug(all_matches)
        logging.debug('*************End of checkPicture')
        return {'res': True,'best_val':best_val,'points':all_matches}
    else:
        bottom_right = (top_left[0] + w, top_left[1] + h)
        center = (top_left[0] + (w/2), top_left[1] + (h/2))
        all_matches = [{'top_left':top_left,'bottom_right':bottom_right,'center':center,'tolerance':best_val}]

        logging.debug('Could not find a value above tolerance')
        logging.debug('*************End of findPicture')
        return {'res': False,'best_val':best_val,'points':all_matches}
Example #16
0
def temp_match(input, template, max_level):

    results = []

    input = iu.get_image(input)

    source_pyr = buildPyramid(input, max_level)
    template_pyr = buildPyramid(template, max_level)

    for lvl in range(0, int(max_level), 1):

        curr_image = source_pyr[lvl]
        curr_template = template_pyr[lvl]

        dX = curr_image.shape[1] + 1 - curr_template.shape[1]
        dY = curr_image.shape[0] + 1 - curr_template.shape[0]

        result = np.zeros([dX, dY])


        #On the first level performs regular template matching.
        if lvl == 0:
            result = cv.matchTemplate(curr_image, curr_template,
                                      cv.TM_CCORR_NORMED)

        #On every other level, perform pyramid transformation and template
        #matching on the predefined ROI areas, obtained using the result of the
        #previous level.
        else:
            mask = cv.pyrUp(r)

            mask8u = cv.inRange(mask, 0, 255)
            contours = cv.findContours(mask8u, cv.RETR_EXTERNAL,
                                       cv.CHAIN_APPROX_NONE)

            #Uses contours to define the region of interest and perform TM on
            #the areas.

            for i in range(0, np.size(contours)-1):
                x, y, w, h = cv.boundingRect(contours[i][0])
                tpl_X = curr_template.shape[1]
                tpl_Y = curr_template.shape[0]

                #result = cv.matchTemplate(curr_image, curr_template,
                #                          cv.TM_CCORR_NORMED)

                result[y:y+h, x:x+w] = cv.matchTemplate(
                                curr_image[y:y+h+tpl_Y, x:x+w+tpl_X],
                                curr_template, cv.TM_CCORR_NORMED)

        T, r = cv.threshold(result, 0.94, 1., cv.THRESH_TOZERO)
        cv.imshow("test", r)
        cv.waitKey()
        results.append(r)
    return results
Example #17
0
def motion(image, ref, focus=(333, 666, 333, 666), maxaccel=0, delta=(0,0), antishake=2):
    """
    ref画像の,focusで指定された領域内の画像と同じ画像を,image内でさがして,その変位を返す.
    maxaccelとdeltaが指定されている場合は,探索範囲を絞り高速にマッチングできる.
    """
    logger = logging.getLogger()
    hi,wi = ref.shape[0:2]
    wmin = wi*focus[0]//1000
    wmax = wi*focus[1]//1000
    hmin = hi*focus[2]//1000
    hmax = hi*focus[3]//1000
    template = ref[hmin:hmax,wmin:wmax,:]
    h,w = template.shape[0:2]

    # Apply template Matching
    if maxaccel == 0:
        res = cv2.matchTemplate(image,template,cv2.TM_SQDIFF_NORMED)
        #loc is given by x,y
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        return min_loc[0] - wmin, min_loc[1] - hmin
    else:
        #use delta here
        roix0 = wmin + delta[0] - maxaccel
        roiy0 = hmin + delta[1] - maxaccel
        roix1 = wmax + delta[0] + maxaccel
        roiy1 = hmax + delta[1] + maxaccel
        affine = np.matrix(((1.0,0.0,-roix0),(0.0,1.0,-roiy0)))
        logger.debug("maxaccel:{0} delta:{1}".format(maxaccel,delta))
        crop = cv2.warpAffine(image, affine, (roix1-roix0,roiy1-roiy0))
        #imageh,imagew = image.shape[0:2]
        #if roix0 < 0 or roix1 >= imagew or roiy0 < 0 or roiy1 >= imageh:
        #    print(roix0,roix1,roiy0,roiy1,imagew,imageh)
        #    return None
        #crop = image[roiy0:roiy1, roix0:roix1, :]
        res = cv2.matchTemplate(crop,template,cv2.TM_SQDIFF_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        #loc is given by x,y

        #Test: isn't it just a background?
        roix02 = wmin - antishake
        roiy02 = hmin - antishake
        roix12 = wmax + antishake
        roiy12 = hmax + antishake
        crop = image[roiy02:roiy12, roix02:roix12, :]
        res = cv2.matchTemplate(crop,template,cv2.TM_SQDIFF_NORMED)
        min_val2, max_val2, min_loc2, max_loc2 = cv2.minMaxLoc(res)
        #loc is given by x,y
        if min_val <= min_val2:
            return (min_loc[0] + roix0 - wmin, min_loc[1] + roiy0 - hmin)
        else:
            return (min_loc2[0] + roix02 - wmin, min_loc2[1] + roiy02 - hmin)
Example #18
0
    def read_time(self,path):

        img = cv2.imread(path,0) # total image

        x_min = 2195
        x_max = 2208
        y_min = 783
        y_max = 794
        minute_img = img[y_min:y_max,x_min:x_max]
        
        x_min = 2216
        x_max = 2229
        second1_img= img[y_min:y_max,x_min:x_max]

        x_min = 2232
        x_max = 2245
        second2_img= img[y_min:y_max,x_min:x_max]

        minute  = None
        second1 = None
        second2 = None

        minute_globalMax  = 0
        second1_globalMax = 0
        second2_globalMax = 0

        for i in range(0,10):
            minute_result  = cv2.matchTemplate(minute_img,self.numbers[i],cv2.TM_CCOEFF_NORMED)
            second1_result = cv2.matchTemplate(second1_img,self.numbers[i],cv2.TM_CCOEFF_NORMED)
            second2_result = cv2.matchTemplate(second2_img,self.numbers[i],cv2.TM_CCOEFF_NORMED)

            minute_min_val, minute_max_val, a, b = cv2.minMaxLoc(minute_result)
            second1_min_val, second1_max_val, a, b = cv2.minMaxLoc(second1_result)
            second2_min_val, second2_max_val, a, b = cv2.minMaxLoc(second2_result)

            if minute_max_val > minute_globalMax:
                minute_globalMax = minute_max_val
                minute = i

            if second1_max_val > second1_globalMax:
                second1_globalMax = second1_max_val
                second1 = i

            if second2_max_val > second2_globalMax:
                second2_globalMax = second2_max_val
                second2 = i

        #print("Time---> "+str(minute)+":"+str(second1)+str(second2))

        return [minute,second1,second2]
Example #19
0
	def findMarker(self):
		"""	
		returns category of the postcard, if false no marker was found
		apply function on binaryImage image
		"""

		#find squares as possible marker regions
		invertedImage = invertImage(self.binaryImage);
		height, width = invertedImage.shape

		if cv2.__version__[0] == '3':
			_, contours, _ = cv2.findContours(invertedImage, cv2.RETR_EXTERNAL , cv2.CHAIN_APPROX_SIMPLE);
		else:
			contours, _ = cv2.findContours(invertedImage, cv2.RETR_EXTERNAL , cv2.CHAIN_APPROX_SIMPLE);

		# find possible candidate Regions
		markerCandidates = []
		#contourImg = np.zeros((height,width), np.uint8)
		for contour in contours:
			epsilon = 0.1*cv2.arcLength(contour,True)
			approx = cv2.approxPolyDP(contour,epsilon,True)
			area = cv2.contourArea(approx)
			if len(approx) == 4 and area > PATTERN_MIN_SIZE and area < PATTERN_MAX_SIZE:
				#cv2.drawContours(contourImg , [approx], 0, 255, thickness=-1)
				markerCandidates.append(approx);

		# match marker patterns on each candidate region
		results = []
		for candidate in markerCandidates:
			x,y,w,h = cv2.boundingRect(candidate)
			roi = self.binaryImage[y:y+h, x:x+w]
			vals = []
			for i,marker in enumerate(self.markers):
				resizedPattern = cv2.resize(marker.pattern, (w,h), interpolation= cv2.INTER_NEAREST)
				flippedPattern = cv2.flip(resizedPattern, -1)
				mat = cv2.matchTemplate(roi,resizedPattern,cv2.TM_SQDIFF_NORMED)
				vals.append({ 'marker': i, 'flipped' : False, 'value' : mat[0,0] })
				mat = cv2.matchTemplate(roi,flippedPattern,cv2.TM_SQDIFF_NORMED)
				vals.append({ 'marker': i, 'flipped' : True, 'value' : mat[0,0] })

			# find minimum
			minVal = min(vals, key=lambda x: x['value'])
			results.append(minVal)

		#return minimum
		if (len(results) < 1):
			return (-1,False,1.0)

		minResult = min(results, key=lambda x: x['value'])
		return (minResult['marker'], minResult['flipped'], minResult['value'])
def match(img):
    
    tmp_n = cv2.imread('tmp_n.jpg')
    tmp_p = cv2.imread('tmp_p.jpg')
    tmp_g = cv2.imread('tmp_p.jpg')
    tmp_n = cv2.cvtColor(tmp_n, cv2.CV_32FC1)
    tmp_p = cv2.cvtColor(tmp_p, cv2.CV_32FC1)
    tmp_g = cv2.cvtColor(tmp_p, cv2.CV_32FC1)
    
    bad = True
    
    while(bad):
        d1 = cv2.matchTemplate(tmp_n, img, cv2.cv.CV_TM_SQDIFF_NORMED)
        d2 = cv2.matchTemplate(tmp_p, img, cv2.cv.CV_TM_SQDIFF_NORMED)
        d3 = cv2.matchTemplate(tmp_g, img, cv2.cv.CV_TM_SQDIFF_NORMED)
        
        mn1,_,mnLoc1,_ = cv2.minMaxLoc(d1)
        mn2,_,mnLoc2,_ = cv2.minMaxLoc(d2)
        mn3,_,mnLoc3,_ = cv2.minMaxLoc(d3)
        
        mn = min(mn1,mn2,mn3)
        
        if mn == mn1:
            MPx,MPy = mnLoc1
            trows,tcols = tmp_n.shape[:2]
        elif mn == mn2:
            MPx,MPy = mnLoc2
            trows,tcols = tmp_p.shape[:2]
        else:
            MPx,MPy = mnLoc3
            trows,tcols = tmp_g.shape[:2]    
           
        h,w = img.shape[:2]
        # Print it, for comparison    
        print mn
           
        if mn >= 0.24 and bad == True:
            #recortamos la imagen para acercarnos mas al resultado
            crop_img = img[h-70:h,0:w-70] # Crop from x, y, w, h -> 100, 200, 300, 400
            # NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h] 
            bad = False
  
        else:
            bad = False
     
    # Draw the rectangle 
    cv2.rectangle(img, (MPx,MPy),(MPx+tcols,MPy+trows),(0,0,255),2)
    
    return MPx,MPy,mn
def testCorrelation (format, formatMask, path, output) :
    dataset = pd.read_csv(path)
    idxCls = dataset['idx']
    fnList = dataset['path']
    masks = list(map(lambda x: imread(formatMask.format(x)), fnList))
    im = masks[0]

    white = np.nonzero(im)
    left = min(white[0])
    right = max(white[0])
    top = min(white[1])
    bottom = max(white[1])

    for mask in masks :
        if np.sum(mask) == 0:
            mask[len(mask) - 1][len(mask[0]) - 1] = 1

    left = list(map(lambda x: min(np.nonzero(x)[1]), masks))
    right = list(map(lambda x: max(np.nonzero(x)[1]), masks))
    top =  list(map(lambda x: min(np.nonzero(x)[0]), masks))
    bottom = list(map(lambda x: max(np.nonzero(x)[0]), masks))

    images = list(map(lambda x:  imread(format.format(x)), fnList))
    images_croped = np.empty(len(images))
    for i in range(0, len(images) - 1):
        images_croped[i] = images[i][top[i] : bottom[i], left[i] : right[i]] #Y[[0,3],:][:,[0,3]]
        if len(images[i]) > 0 and len(images[i][0]) > 0 :
            images_croped[i] = np.pad(images[i], ((0,512 - len(images[i])),(0, 512 - len(images[i][0]))), mode='constant', constant_values=0)
    correlations = np.empty([len(images), len(images)])
    max_correlations = np.empty([len(images), len(images)])
    for i in range(0, len(images)):
        for j in range(0, len(images)):
            if i == j :
                correlations[i][j] = 0.
            elif len(images[i]) > 0 and len(images[j]) > 0 : 
                correlations[i][j] = np.max(cv2.matchTemplate(images[i], images_croped[j], cv2.TM_CCORR_NORMED))
                max_correlations[i][j] = cv2.matchTemplate(images[i], images_croped[j], cv2.TM_CCORR_NORMED)

    with open(output + 'correlation_distances.csv', 'w', newline='') as fp:
        a = csv.writer(fp, delimiter=',')
        a.writerows(max_correlations)

    with open(output + 'correlation_matrixes.csv', 'w', newline='') as fp:
        a = csv.writer(fp, delimiter=',')
        a.writerows(correlations)

    with open(output + 'correlation_real.csv', 'w', newline='') as fp:
        a = csv.writer(fp, delimiter=',')
        a.writerow(idxCls)
def select_roi(img_rgb,img_gray,img_board,templates):
    temp_count = 0
    w_orig, h_orig = img_rgb.shape[0], img_rgb.shape[1]
    
    wb, hb = img_board.shape[0], img_board.shape[1]
    
    res_b = cv2.matchTemplate(img_gray,img_board,cv2.TM_CCOEFF_NORMED)
    threshold_b = 0.25
    
    loc_b = np.where( res_b >= threshold_b)
    board_x = 0
    board_y = 0
    for ptb in zip(*loc_b[::-1]):
        cv2.rectangle(img_rgb, ptb, (ptb[0] + wb, ptb[1] + hb), (255,0,0), 1)
        board_x = ptb[0]
        board_y = ptb[1]
        
    for i, template in enumerate(templates):
        w, h = template[0].shape[::-1]
        res = cv2.matchTemplate(img_gray,template[0],cv2.TM_CCOEFF_NORMED)
        threshold = 0.948
    
        loc = np.where( res >= threshold)
        
        for pt in zip(*loc[::-1]):
            cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 1)
            #print pt[0] , pt[1]
            centerx = pt[0] + w/2
            centery = pt[1] + h/2
            r = -1
            c = -1
            if centerx <= wb/8 + board_x:
                c = 1
            if centery <= hb/8 + board_y:
                r = 1
            for i in range(2,9):
                if centerx <= i*wb/8.0 + board_x and centerx >=(i-1)*wb/8.0 + board_x:
                    c = i
            for j in range(2,9):                
                if centery <= j*hb/8.0 + board_y and centery >= (j-1)*hb/8.0 + board_y:
                    r = j
            if (r != -1 and c != -1):
                chessboard[r-1][c-1] = template[1]
            
            temp_count+=1
            
    #cv2.imwrite('res.png',img_rgb)
    #print chessboard
    return img_rgb , temp_count , chessboard
def train_capture(minutes):
	if minutes==0:
		while True:
			ts=datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S')
			tslist=ts.split(":")
			minute=int(tslist[1])
			second=int(tslist[2])
			lastmin=lmin(tslist, minute)
			lastsec=lsec(tslist, second)
			tss=tsdelay(tslist, lastmin, lastsec)
			ts1="file-"+tss[0]+"*"
			ts2="file-"+tss[1]+"*"
			ts1files=getfiles(ts1)
			ts2files=getfiles(ts2)
			img1=cv2.imread(ts1files[0])
			img2=cv2.imread(ts2files[0])
			compare=cv2.matchTemplate(img1,img2,cv2.TM_CCORR_NORMED)
			diff=np.amax(compare)
			diffs.append(diff)
			t=thres(diffs)
			result=train(diff, t)
			results.append(result)
			movefile(result, ts1files[0], ts2files[0], dest)
	else:
		timeout=time.time()+60*minutes
		while True:
			if time.time()>timeout:
				break
			ts=datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S')
			tslist=ts.split(":")
			minute=int(tslist[1])
			second=int(tslist[2])
			lastmin=lmin(tslist, minute)
			lastsec=lsec(tslist, second)
			tss=tsdelay(tslist, lastmin, lastsec)
			ts1="file-"+tss[0]+"*"
			ts2="file-"+tss[1]+"*"
			ts1files=getfiles(ts1)
			ts2files=getfiles(ts2)
			img1=cv2.imread(ts1files[0])
			img2=cv2.imread(ts2files[0])
			compare=cv2.matchTemplate(img1,img2,cv2.TM_CCORR_NORMED)
			diff=np.amax(compare)
			diffs.append(diff)
			t=thres(diffs)
			result=train(diff, t)
			results.append(result)
			movefile(result, ts1files[0], ts2files[0], dest)
def template_matching():
    img = cv2.imread('messi.jpg',0)
    img2 = img.copy()
    template = cv2.imread('face.png',0)
    w, h = template.shape[::-1]

    # All the 6 methods for comparison in a list
    methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
            'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']

    for meth in methods:
        img = img2.copy()
        method = eval(meth)

        # Apply template Matching
        res = cv2.matchTemplate(img,template,method)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

        # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
            top_left = min_loc
        else:
            top_left = max_loc
        bottom_right = (top_left[0] + w, top_left[1] + h)

        cv2.rectangle(img,top_left, bottom_right, 255, 2)

        plt.subplot(121),plt.imshow(res,cmap = 'gray')
        plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
        plt.subplot(122),plt.imshow(img,cmap = 'gray')
        plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
        plt.suptitle(meth)

        plt.show()
Example #25
0
File: img.py Project: ftyszyx/tools
    def getMultiTemplePos(self,srcPicPath,templePicPath):
        print("srcpath",srcPicPath,"temppath",templePicPath)
        img_src=cv2.imread(srcPicPath)
        img_src_gray=cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)
        srcw,srch=img_src_gray.shape[::-1]
        print("get pic:",srcw,srch)
        img_temple=cv2.imread(templePicPath)
        img_temple_gray=cv2.cvtColor(img_temple, cv2.COLOR_BGR2GRAY)
        templew,templeh=img_temple_gray.shape[::-1]
        res = cv2.matchTemplate(img_src_gray,img_temple_gray,cv2.TM_CCOEFF_NORMED) 
        # print("get temple",res)
        # cv2.imshow('src',img_src_gray)
        # cv2.imshow('temple',img_temple_gray)
        # cv2.waitKey(0)

        threshold = 0.7 
        loc = np.where( res >= threshold)
        print(loc)
        # zipres=zip(*loc[::-1])
        # print("zipres",zipres)
        # if len(zipres)==0:
        #     return False,None,None,None
        # else:
        #     return True,zipres[0],templew,templeh
        for pt in zip(*loc[::-1]):
            cv2.rectangle(img_src, pt, (pt[0] + templew, pt[1] + templeh),(7,249,151), 2)   
        cv2.imshow('Detected',img_src)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
def matchImage(image,template):
    height, width = template.shape
    match = ocv.matchTemplate(image,template,matching_method)
    min_val, max_val, min_loc, max_loc = ocv.minMaxLoc(match)
    top_left = max_loc
    bottom_right = (top_left[0] + width, top_left[1]+height)
    return [top_left, bottom_right, width, height]
Example #27
0
def onmouse(event, x, y, flags, param):
    global drag_start, sel
    if event == cv.EVENT_LBUTTONDOWN:
        drag_start = x, y
        sel = 0,0,0,0
    elif event == cv.EVENT_LBUTTONUP:
        if sel[2] > sel[0] and sel[3] > sel[1]:
            patch = gray[sel[1]:sel[3],sel[0]:sel[2]]
            result = cv.matchTemplate(gray,patch,cv.TM_CCOEFF_NORMED)
            result = np.abs(result)**3
            val, result = cv.threshold(result, 0.01, 0, cv.THRESH_TOZERO)
            result8 = cv.normalize(result,None,0,255,cv.NORM_MINMAX,cv.CV_8U)
            cv.imshow("result", result8)
        drag_start = None
    elif drag_start:
        #print flags
        if flags & cv.EVENT_FLAG_LBUTTON:
            minpos = min(drag_start[0], x), min(drag_start[1], y)
            maxpos = max(drag_start[0], x), max(drag_start[1], y)
            sel = minpos[0], minpos[1], maxpos[0], maxpos[1]
            img = cv.cvtColor(gray, cv.COLOR_GRAY2BGR)
            cv.rectangle(img, (sel[0], sel[1]), (sel[2], sel[3]), (0,255,255), 1)
            cv.imshow("gray", img)
        else:
            print "selection is complete"
            drag_start = None
Example #28
0
    def TemplateMatching(self, img, tmp):
        '''
            入力された画像とテンプレート画像でtemplate matchingを行う
        '''
        # edgeでやるとき
        # gimg = cv2.Canny(img, threshold1= 100, threshold2= 200,apertureSize = 3)         
        # tmp = cv2.Canny(tmp, threshold1= 100, threshold2= 200,apertureSize = 3) 
        
        # 普通にやるとき
        gimg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        tmp = cv2.cvtColor(tmp,cv2.COLOR_BGR2GRAY)

        gimg2 = gimg
        
        

        rows = len(tmp[:,0]) 
        cols = len(tmp[0]) 
        
        methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
        'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
        # methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR','cv2.TM_CCORR_NORMED']
        # method毎で行う
        for i, meth in enumerate(methods):
            gimg = gimg2
            method = eval(meth)

            # Apply template Matching
            res = cv2.matchTemplate(gimg,tmp,method)

            # 最小値,最大値,その座標
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

            # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
            # if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
                # top_left = min_loc
            # else:
                # top_left = max_loc
            
            # draw color
            # if i == 3:
                # color = [0,0,0]
            # else:
                # color = [0,0,0]
                # color[i] = 255
            color = 255
            # rectangle result
            top_left = max_loc
            bottom_right = (top_left[0] + rows, top_left[1] + rows)
            


            cv2.rectangle(img,(top_left[0],top_left[1]), bottom_right, color, 2)
            cv2.putText(img,meth,(top_left[0],top_left[1]-5),cv2.FONT_HERSHEY_SIMPLEX,0.3,color)
            cv2.imshow(meth,res/np.amax(res))
            cv2.imshow('Srcimg',img)
            cv2.imshow('template',tmp)
            cv2.imshow('GrayImg',gimg)

            print max_val,min_val
Example #29
0
File: img.py Project: ftyszyx/tools
 def getOneTemplePos(self,srcPicPath,templePicPath):
     # print(srcPicPath,templePicPath)
     img_src=cv2.imread(srcPicPath)
     img_src_gray=cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)
     srcw,srch=img_src_gray.shape[::-1]
     print("img_src gray",srcw,srch)
     img_temple=cv2.imread(templePicPath)
     img_temple_gray=cv2.cvtColor(img_temple, cv2.COLOR_BGR2GRAY)
     templew,templeh=img_temple_gray.shape[::-1]
     print("temple gray",templew,templeh)
     # cv2.imshow('rgb',img_src)
     # cv2.imshow('gray',img_src_gray)
     # cv2.imshow('template',img_temple_gray)
     # cv2.waitKey(0)
     # cv2.destroyAllWindows()
     res = cv2.matchTemplate(img_src_gray,img_temple_gray,method) 
     min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
     print(min_val, max_val, min_loc, max_loc)
     # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
     if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
         top_left = min_loc
     else:
         top_left = max_loc
     bottom_right = (top_left[0] + templew, top_left[1] + templeh)
     cv2.rectangle(img_src,top_left, bottom_right, 255, 2)
     print(top_left, bottom_right)
Example #30
0
def normCrossCorrelation(img1, img2, pt0, pt1, status, winsize, method=cv2.cv.CV_TM_CCOEFF_NORMED):
    """
    **SUMMARY**
    
    Calculates normalized cross correlation for every point.
    
    **PARAMETERS**
    
    img1 - Image 1.
    img2 - Image 2.
    pt0 - vector of points of img1
    pt1 - vector of points of img2
    status - Switch which point pairs should be calculated.
             if status[i] == 1 => match[i] is calculated.
             else match[i] = 0.0
    winsize- Size of quadratic area around the point
             which is compared.
    method - Specifies the way how image regions are compared. see cv2.matchTemplate
    
    **RETURNS**
    
    match - Output: Array will contain ncc values.
            0.0 if not calculated.
 
    """
    nPts = len(pt0)
    match = np.zeros(nPts)
    for i in np.argwhere(status):
        i = i[0]
        patch1 = cv2.getRectSubPix(img1,(winsize,winsize),tuple(pt0[i]))
        patch2 = cv2.getRectSubPix(img2,(winsize,winsize),tuple(pt1[i]))
        match[i] = cv2.matchTemplate(patch1,patch2,method)
    return match
Example #31
0
    def start_tracking(self):
        i = 0
        for f in range(self.frames_count):
            timer = cv2.getTickCount()
            ret, self.frame = self.cap.read()
            if not ret:
                print("End!")
                break
            print("Processing Frame {}".format(i))
            img_raw = self.frame
            image = cv2.resize(img_raw.copy(),
                               self.video_size,
                               interpolation=cv2.INTER_CUBIC)

            if i == 0:  #只有在第一帧时才需要框选目标
                while (True):
                    img_first = image.copy()
                    if self.track_window:
                        cv2.rectangle(
                            img_first,
                            (self.track_window[0], self.track_window[1]),
                            (self.track_window[2], self.track_window[3]),
                            self.box_color, 1)
                    elif self.selection:
                        cv2.rectangle(img_first,
                                      (self.selection[0], self.selection[1]),
                                      (self.selection[2], self.selection[3]),
                                      self.box_color, 1)
                    cv2.imshow(self.windowName, img_first)

                    if cv2.waitKey(self.speed) == 13:  #Enter开始追踪
                        break

                if self.tracker_type == 'Dlib_Tracker':

                    self.tracker.start_track(
                        image,
                        dlib.rectangle(self.track_window[0],
                                       self.track_window[1],
                                       self.track_window[2],
                                       self.track_window[3]))

                elif self.tracker_type == 'CamShift':

                    tracker_box = (self.track_window[0], self.track_window[1],
                                   self.track_window[2] - self.track_window[0],
                                   self.track_window[3] - self.track_window[1])
                    roi = image[self.track_window[1]:self.track_window[3],
                                self.track_window[0]:self.track_window[2]]
                    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
                    mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)),
                                       np.array((180., 255., 255.)))
                    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180],
                                            [0, 180])
                    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
                    term_crit = (cv2.TERM_CRITERIA_EPS
                                 | cv2.TERM_CRITERIA_COUNT, 10, 1)

                elif self.tracker_type == 'Template_Matching':
                    '''
                        1.平方差匹配 method = CV_TM_SQDIFF
                        2.标准平方差匹配 method = CV_TM_SQDIFF_NORMED
                        3.相关匹配 method = CV_TM_CCORR
                        4.标准相关匹配 method = CV_TM_CCORR_NORMED
                        5.相关匹配 method = CV_TM_CCOEFF
                        6.标准相关匹配 method = CV_TM_CCOEFF_NORMED

                        cv2.matchTemplate()方法严格要求模板与背景为同一数据类型(CV_8U or CV_32F)

                    '''
                    method = cv2.TM_CCOEFF_NORMED
                    template = image[self.track_window[1]:self.track_window[3],
                                     self.track_window[0]:self.track_window[2]]
                    template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
                    template = template.astype(np.float32)

                else:  #OpenCV预置的五种追踪器
                    ret = self.tracker.init(
                        image, (self.track_window[0], self.track_window[1],
                                self.track_window[2] - self.track_window[0],
                                self.track_window[3] - self.track_window[1]))

            #框选完目标后,第一帧结束就开始追踪目标
            if self.tracker_type == 'Dlib_Tracker':

                self.tracker.update(image)
                tracker_box = self.tracker.get_position()
                x, y, w, h = tracker_box.left(), tracker_box.top(
                ), tracker_box.width(), tracker_box.height()

            elif self.tracker_type == 'CamShift':

                hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
                dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
                ret, tracker_box = cv2.CamShift(dst, tracker_box, term_crit)
                x, y, w, h = tracker_box

            elif self.tracker_type == 'Template_Matching':

                gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                gray = gray.astype(np.float32)
                res = cv2.matchTemplate(gray, template, method)
                min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
                w, h = template.shape[::-1]

                #平方差匹配CV_TM_SQDIFF与标准平方差匹配TM_SQDIFF_NORMED最佳匹配为最小值 0,匹配值越大匹配越差,其余则相反

                if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
                    x = min_loc[0]
                    y = min_loc[1]
                else:
                    x = max_loc[0]
                    y = max_loc[1]

            else:  #OpenCV预置的五种追踪器

                ret, tracker_box = self.tracker.update(image)
                x, y, w, h = tracker_box

            self.drawing(image, x, y, w, h, timer)
            cv2.imshow(self.windowName, image)

            if cv2.waitKey(self.speed) == 27:  #Esc结束
                break

            i += 1

            if i == self.frames_count:
                cv2.imwrite('Video/track_result.jpg', image)

        cv2.destroyAllWindows()
Example #32
0
def _match_template(image, template, mask, method, roi_mask, level, imwrite):

    ddebug("Level %d: image %s, template %s" %
           (level, image.shape, template.shape))

    heatmap_shape = (image.shape[0] - template.shape[0] + 1,
                     image.shape[1] - template.shape[1] + 1)
    NO_MATCH = {
        cv2.TM_SQDIFF: template.size * (255**2),
        cv2.TM_SQDIFF_NORMED: 1,
        cv2.TM_CCORR_NORMED: 0,
        cv2.TM_CCOEFF_NORMED: 0,
    }
    matches_heatmap = numpy.full(heatmap_shape,
                                 NO_MATCH[method],
                                 dtype=numpy.float32)

    if roi_mask is None:
        rois = [  # Initial region of interest: The whole image.
            _Rect(0, 0, matches_heatmap.shape[1], matches_heatmap.shape[0])
        ]
    else:
        rois = [
            _Rect(*x) for x in cv2_compat.find_contour_boxes(
                roi_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        ]

    if get_debug_level() > 1:
        source_with_rois = image.copy()
        for roi in rois:
            r = roi
            t = _Size(*template.shape[:2])
            s = _Size(*source_with_rois.shape[:2])
            cv2.rectangle(source_with_rois, (max(0, r.x), max(0, r.y)),
                          (min(s.w - 1, r.x + r.w + t.w - 1),
                           min(s.h - 1, r.y + r.h + t.h - 1)), (0, 255, 255),
                          thickness=1)
        imwrite("source_with_rois", source_with_rois)

    if mask is not None:
        kwargs = {"mask": mask}
    else:
        kwargs = {}  # For OpenCV < 3.0.0
    for roi in rois:
        r = roi.expand(_Size(*template.shape[:2])).shrink(_Size(1, 1))
        ddebug("Level %d: Searching in %s" % (level, roi))
        cv2.matchTemplate(image[r.to_slice()], template, method,
                          matches_heatmap[roi.to_slice()], **kwargs)

    if method == cv2.TM_SQDIFF:
        # OpenCV's SQDIFF_NORMED normalises by the pixel intensity across
        # the reference image and the source image patch. This doesn't work
        # at all for completely black images, and it exaggerates
        # differences for dark images. With SQDIFF we do our own
        # normalisation based solely on the number of pixels in the sum.
        # We still get a number between 0 - 1.

        if mask is not None:
            # matchTemplateMask normalises the source & template image to [0,1].
            # https://github.com/opencv/opencv/blob/3.2.0/modules/imgproc/src/templmatch.cpp#L840-L917
            scale = max(1, numpy.count_nonzero(mask))
        else:
            scale = template.size * (255**2)
    else:
        scale = 1

    if method in (cv2.TM_CCORR_NORMED, cv2.TM_CCOEFF_NORMED):
        matches_heatmap = 1 - matches_heatmap

    imwrite("source", image)
    imwrite("template", template)
    imwrite("mask", mask)
    imwrite("source_matchtemplate", matches_heatmap, scale=scale)

    return matches_heatmap, scale
Example #33
0
# 匹配小跳棋的模板
temp1 = cv2.imread('temp_player.jpg', 0)
w1, h1 = temp1.shape[::-1]
# 匹配游戏结束画面的模板
temp_end = cv2.imread('temp_end.jpg', 0)
# 匹配中心小圆点的模板
temp_white_circle = cv2.imread('temp_white_circle.jpg', 0)
w2, h2 = temp_white_circle.shape[::-1]

# 循环直到游戏失败结束
for i in range(10000):
    get_screenshot()
    img_rgb = cv2.imread('jump.png', 0)

    # 如果在游戏截图中匹配到带"再玩一局"字样的模板,则循环中止
    res_end = cv2.matchTemplate(img_rgb, temp_end, cv2.TM_CCOEFF_NORMED)
    if cv2.minMaxLoc(res_end)[1] > 0.95:
        print('游戏结束啦!')
        break

    # 模板匹配截图中小跳棋的位置
    res1 = cv2.matchTemplate(img_rgb, temp1, cv2.TM_CCOEFF_NORMED)
    min_val1, max_val1, min_loc1, max_loc1 = cv2.minMaxLoc(res1)
    center1_loc = (max_loc1[0] + 39, max_loc1[1] + 189)

    # 先尝试匹配截图中的中心原点,
    # 如果匹配值没有达到0.95,则使用边缘检测匹配物块上沿
    res2 = cv2.matchTemplate(img_rgb, temp_white_circle, cv2.TM_CCOEFF_NORMED)
    min_val2, max_val2, min_loc2, max_loc2 = cv2.minMaxLoc(res2)
    if max_val2 > 0.95:
        print('发现小白点!准确率++ 分数++')
Example #34
0
import cv2
import numpy as np

# Load input image and convert to grayscale
image = cv2.imread('WaldoBeach.jpg')
cv2.imshow('Where is Waldo?', image)
cv2.waitKey(0)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# Load Template image
template = cv2.imread('waldo.jpg', 0)

result = cv2.matchTemplate(gray, template, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)

#Create Bounding Box
top_left = max_loc
bottom_right = (top_left[0] + 50, top_left[1] + 50)
cv2.rectangle(image, top_left, bottom_right, (0, 0, 255), 5)

cv2.imshow('Where is Waldo?', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
import cv2
import numpy as np

img_rgb = cv2.imread('opencv-template-matching-python-tutorial.jpg')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)

template = cv2.imread('opencv-template-for-matching.jpg', 0)
w, h = template.shape[::-1]

res = cv2.matchTemplate(
    img_gray, template,
    cv2.TM_CCOEFF_NORMED)  # chi so cuoi la phuong thuc do phu ho
threshold = 0.9
loc = np.where(res >= threshold)

for pt in zip(*loc[::-1]):
    cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 255, 255), 2)

cv2.imshow('Detected', img_rgb)
cv2.waitKey(0)
cv2.destroyAllWindows()
Example #36
0
 def tapOnCmp(self, img, rect=(0, 0, 1920, 1080), delta=.05):
     return (lambda loc: loc[0] < delta and (base.touch(
         (rect[0] + loc[2][0] + (img.shape[1] >> 1), rect[1] + loc[2][1] +
          (img.shape[0] >> 1))), fuse.reset())[1])(cv2.minMaxLoc(
              cv2.matchTemplate(self.im[rect[1]:rect[3], rect[0]:rect[2]],
                                img, cv2.TM_SQDIFF_NORMED)))
Example #37
0
 def compare(self, img, rect=(0, 0, 1920, 1080), delta=.05):
     return delta > cv2.minMaxLoc(
         cv2.matchTemplate(self.im[rect[1]:rect[3], rect[0]:rect[2]], img,
                           cv2.TM_SQDIFF_NORMED))[0] and fuse.reset()
Example #38
0
def match_image(largeImg, smallImg, threshold=0.1, debug=False):
    """
    Finds smallImg in largeImg using template matching
    Adjust threshold for the precision of the match (between 0 and 1, the lowest being more precise)

    Returns:
        tuple (x, y) of the center of the match if it's found, False otherwise.
    """

    method = cv2.TM_SQDIFF_NORMED

    small_image = _to_cv2_img(smallImg)
    large_image = _to_cv2_img(largeImg)

    if (small_image is None) or (large_image is None):
        print("Error: large_image or small_image is None")
        return False

    h, w = small_image.shape[:-1]

    if debug:
        print("large_image:", large_image.shape)
        print("small_image:", small_image.shape)

    try:
        result = cv2.matchTemplate(small_image, large_image, method)
    except cv2.error as e:
        # The image was not found. like, not even close. :P
        print(e)
        return False

    # We want the minimum squared difference
    mn, _, mnLoc, _ = cv2.minMaxLoc(result)

    if mn >= threshold:
        if debug:
            cv2.imshow("output", large_image)
            cv2.waitKey(0)
        return False

    # Extract the coordinates of our best match
    x, y = mnLoc

    if debug:
        print(f"Match at ({x}, {y}) relative to region")
        # Draw the rectangle:
        # Get the size of the template. This is the same size as the match.
        trows, tcols = small_image.shape[:2]

        # If I don't call this a get a TypeError :P
        large_image = np.array(large_image)
        # Draw the rectangle on large_image
        cv2.rectangle(large_image, (x, y), (x + tcols, y + trows), (0, 0, 255), 2)

        # Display the original image with the rectangle around the match.
        cv2.imshow("output", large_image)

        # The image is only displayed if we call this
        cv2.waitKey(0)

    # Return coordinates to center of match
    return (x + (w // 2), y + (h // 2))
Example #39
0
import cv2
import numpy as np
import matplotlib.pyplot as plt

img = cv2.imread(r"..\lena.jpg", 0)
template = cv2.imread(r"..\lena_eyes.png", 0)
tw, th = template.shape[::-1]
rv = cv2.matchTemplate(img, template, cv2.TM_CCOEFF)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(rv)
topLeft = maxLoc
bottomRight = (topLeft[0] + tw, topLeft[1] + th)
cv2.rectangle(img, topLeft, bottomRight, 252, 2)

plt.subplot(1, 2, 1)
plt.imshow(rv, cmap='gray')
plt.title('Matching Result')
plt.xticks([])
plt.yticks([])

plt.subplot(1, 2, 2)
plt.imshow(img, cmap='gray')
plt.title('Detected Point')
plt.xticks([])
plt.yticks([])

plt.show()
Example #40
0
from matplotlib import pyplot as plt

img = cv.imread('70.png', 0)
img2 = img.copy()

template = cv.imread('template_real.png', 0)
w, h = template.shape[::-1]

# All the 6 methods for comparison in a list
methods = ['cv.TM_CCOEFF', 'cv.TM_CCOEFF_NORMED', 'cv.TM_CCORR',
            'cv.TM_CCORR_NORMED', 'cv.TM_SQDIFF', 'cv.TM_SQDIFF_NORMED']

for meth in methods:
    img = img2.copy()
    method = eval(meth)
    # Apply template Matching
    res = cv.matchTemplate(img,template,method)
    min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res)
    # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
    if method in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
    bottom_right = (top_left[0] + w, top_left[1] + h)
    cv.rectangle(img,top_left, bottom_right, 255, 2)
    plt.subplot(121),plt.imshow(res,cmap = 'gray')
    plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
    plt.subplot(122),plt.imshow(img,cmap = 'gray')
    plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
    plt.suptitle(meth)
    plt.show()
Example #41
0
    gameObjects.append(GameObject(name, threshold, imgDirPath))

print("Number of processors: ", mp.cpu_count())

startTime = int(time.time())
x = 1

while int(time.time()) < startTime + 5:
    screenshotColor = get_color_screenshot()

    for object in gameObjects:
        patch = cv2.imread(object.imagePath)
        mask = cv2.imread(object.imageMaskPath)
        c, w, h = patch.shape[::-1]

        res = cv2.matchTemplate(screenshotColor, patch, cv2.TM_CCORR_NORMED,
                                None, mask)
        loc = numpy.where(res >= object.threshold)
        for pt in zip(*loc[::-1]):
            cv2.rectangle(screenshotColor, pt, (pt[0] + w, pt[1] + h),
                          (0, 0, 255), 2)
            print(object.objName + " identified")
            log.write(object.objName + " identified\n")
            r = 3

        cv2.imwrite(imgExpPath + "screenshot_" + str(x) + ".png",
                    screenshotColor)

    print("screenshot captured " + str(x))
    log.write("screenshot captured " + str(x) + "\n\n")
    x += 1
target_folder = "D:/Research/ModeTransformation/Data/05_10_2018/"
filename_mask = "mode01.png"                               # "I*A*P*.png"
mode_image = "mode10.png"

images_list = glob.glob(os.path.join(target_folder, filename_mask))     # Get image list to process
mode_fullname = glob.glob(os.path.join(target_folder, mode_image))      # Get the full name for the mode image

mode = cv2.imread(mode_fullname[0], cv2.IMREAD_UNCHANGED)
w, h = mode.shape[::-1]
print("Processing: ", os.path.join(target_folder, filename_mask))


for iter, item in enumerate(images_list):
    image = cv2.imread(item, cv2.IMREAD_UNCHANGED)
    result = cv2.matchTemplate(image, mode, eval("cv2.TM_CCORR_NORMED"))
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
    print(max_val)

# All the 6 methods for comparison in a list
# methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
#             'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
#
# image = cv2.imread(images_list[0], cv2.IMREAD_UNCHANGED)

# for meth in methods:
#
#     img = image.copy()
#     method = eval(meth)
#     # Apply template Matching
#     res = cv2.matchTemplate(img,mode,method)
Example #43
0
    def find_in_scaling_range(cls,
                              image,
                              similarity=DEFAULT_SIMILARITY,
                              lowerEnd=0.8,
                              upperEnd=1.2):
        """Finds the location of the image on the screen. First the image is searched at its default scale,
        and if it isn't found, it will be resized using values inside the range provided until a match that satisfy
        the similarity value is found. If the image isn't found even after it has been resized, the method returns None.

        Args:
            image (string): Name of the image.
            similarity (float, optional): Defaults to DEFAULT_SIMILARITY.
                Percentage in similarity that the image should at least match
            lowerEnd (float, optional): Defaults to 0.8.
                Lowest scaling factor used for resizing.
            upperEnd (float, optional): Defaults to 1.2.
                Highest scaling factor used for resizing.

        Returns:
            Region: Coordinates or where the image appears.
        """
        template = cv2.imread('assets/{}/{}.png'.format(cls.assets, image), 0)
        # first try with default size
        width, height = template.shape[::-1]
        match = cv2.matchTemplate(screen, template, cv2.TM_CCOEFF_NORMED)
        value, location = cv2.minMaxLoc(match)[1], cv2.minMaxLoc(match)[3]
        if (value >= similarity):
            return Region(location[0], location[1], width, height)

        # resize and match using threads

        # change scaling factor if the boss icon searched is small
        # (some events has as boss fleet a shipgirl with a small boss icon at her bottom right)
        if cls.small_boss_icon and image == 'enemy/fleet_boss':
            lowerEnd = 0.4
            upperEnd = 0.6

        # preparing interpolation methods
        middle_range = (upperEnd + lowerEnd) / 2.0
        if lowerEnd < 1 and upperEnd > 1 and middle_range == 1:
            l_interpolation = cv2.INTER_AREA
            u_interpolation = cv2.INTER_CUBIC
        elif upperEnd < 1 and lowerEnd < upperEnd:
            l_interpolation = cv2.INTER_AREA
            u_interpolation = cv2.INTER_AREA
        elif lowerEnd > 1 and upperEnd > lowerEnd:
            l_interpolation = cv2.INTER_CUBIC
            u_interpolation = cv2.INTER_CUBIC
        else:
            l_interpolation = cv2.INTER_NEAREST
            u_interpolation = cv2.INTER_NEAREST

        results_list = []
        regions_detected = []
        count = 0
        loop_limiter = (middle_range - lowerEnd) * 100

        # creating and launching worker processes
        pool = ThreadPool(processes=4)

        while (upperEnd > lowerEnd) and (count < loop_limiter):
            l_result = pool.apply_async(
                cls.resize_and_match,
                (template, lowerEnd, similarity, l_interpolation))
            u_result = pool.apply_async(
                cls.resize_and_match,
                (template, upperEnd, similarity, u_interpolation))
            cls.script_sleep(0.01)
            lowerEnd += 0.02
            upperEnd -= 0.02
            count += 1
            results_list.append(l_result)
            results_list.append(u_result)

        # closing pool and waiting for results
        pool.close()
        pool.join()

        # extract regions from async_result
        for i in range(0, len(results_list)):
            if results_list[i].get() is not None:
                regions_detected.append(results_list[i].get())

        if (len(regions_detected) > 0):
            return regions_detected[0]
        else:
            return None
Example #44
0
                                 cv2.DIST_MASK_PRECISE)  #(432,768)
    distborder = cv2.copyMakeBorder(dist, borderSize, borderSize, borderSize,
                                    borderSize,
                                    cv2.BORDER_CONSTANT | cv2.BORDER_ISOLATED,
                                    0)  #(512,848)

    gap = 10
    kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                        (2 * (borderSize - gap) + 1, 2 *
                                         (borderSize - gap) + 1))  #(61,61)
    kernel2 = cv2.copyMakeBorder(kernel2, gap, gap, gap, gap,
                                 cv2.BORDER_CONSTANT | cv2.BORDER_ISOLATED,
                                 0)  #(81,81)
    distTempl = cv2.distanceTransform(kernel2, cv2.DIST_L2,
                                      cv2.DIST_MASK_PRECISE)  #(81,81)
    nxcor = cv2.matchTemplate(distborder, distTempl,
                              cv2.TM_CCOEFF_NORMED)  #(432,768)

    mn, mx, _, _ = cv2.minMaxLoc(nxcor)  #mn=-0.51,mx=0.945935
    th, peaks = cv2.threshold(nxcor, mx * 0.5, 255,
                              cv2.THRESH_BINARY)  #th=0.47,peaks=(432,768)
    peaks8u = cv2.convertScaleAbs(peaks)  #(432,768)
    _, contours, hierarchy = cv2.findContours(peaks8u, cv2.RETR_CCOMP,
                                              cv2.CHAIN_APPROX_SIMPLE)
    peaks8u = cv2.convertScaleAbs(peaks)  # to use as mask

    # plot reference lines (entrance and exit lines)
    coordYEntranceLine = (height // 2) + OffsetRefLines
    coordYMiddleLine = (height // 2)
    coordYExitLine = (height // 2) - OffsetRefLines
    cv2.line(frame40, (0, coordYEntranceLine), (width, coordYEntranceLine),
             (255, 0, 250), 2)
Example #45
0
def battle():
    turn, stage, stageTurn, servant = 0, 0, 0, [0, 1, 2]
    while True:
        if Check(.1).isTurnBegin():
            turn += 1
            stage, stageTurn, skill, newPortrait = (
                lambda chk: (lambda x: [x, stageTurn + 1 if stage == x else 1])
                (chk.getStage()) + [chk.isSkillReady(),
                                    chk.getPortrait()])(Check(.35))
            if turn == 1: stageTotal = check.getStageTotal()
            else:
                servant = (lambda m, p: [
                    m + p.index(i) + 1 if i in p else servant[i]
                    for i in range(3)
                ])(max(servant), [
                    i for i in range(3) if servant[i] < 6
                    and cv2.matchTemplate(newPortrait[i], portrait[i],
                                          cv2.TM_SQDIFF_NORMED)[0][0] > .03
                ])
            if stageTurn == 1 and dangerPos[stage - 1]:
                doit(('\x69\x68\x67\x66\x65\x64'[dangerPos[stage - 1] - 1],
                      '\xDC'), (250, 500))
            portrait = newPortrait
            logger.info(f'{turn} {stage} {stageTurn} {servant}')
            for i, j in (
                (i, j) for i in range(3) if servant[i] < 6 for j in range(3)
                    if skill[i][j] and skillInfo[servant[i]][j][0]
                    and min(skillInfo[servant[i]][j][0], stageTotal) << 8
                    | skillInfo[servant[i]][j][1] <= stage << 8 | stageTurn):
                doit(('ASD', 'FGH', 'JKL')[i][j], (300, ))
                if skillInfo[servant[i]][j][2]:
                    doit('234'[skillInfo[servant[i]][j][2] - 1], (300, ))
                sleep(2.3)
                while not Check(0, .2).isTurnBegin():
                    pass
            for i in (i for i in range(3)
                      if stage == min(masterSkill[i][0], stageTotal)
                      and stageTurn == masterSkill[i][1]):
                doit(('Q', 'WER'[i]), (300, 300))
                if masterSkill[i][2]:
                    if i == 2 and masterSkill[2][3]:
                        doit(('TYUIOP'[masterSkill[2][2] - 1],
                              'TYUIOP'[masterSkill[2][3] - 1], 'Z'),
                             (300, 300, 300))
                    else:
                        doit('234'[masterSkill[i][2] - 1], (300, ))
                sleep(2.3)
                while not Check(0, .2).isTurnBegin():
                    pass
            doit(' ', (2250, ))
            doit((lambda c, h: [
                '678'[i]
                for i in sorted((i for i in range(3) if h[i]),
                                key=lambda x: -houguInfo[servant[x]][1])
            ] + [
                '12345'[i]
                for i in sorted(range(5),
                                key=(lambda x: c[x] << 1 & 2 | c[x] >> 1 & 1)
                                if any(h) else
                                (lambda x: -1 if c[x] != -1 and c.count(c[x])
                                 >= 3 else c[x] << 1 & 2 | c[x] >> 1 & 1))
            ])(Check().getABQ(), [
                servant[i] < 6 and j and houguInfo[servant[i]][0]
                and stage >= min(houguInfo[servant[i]][0], stageTotal)
                for i, j in zip(range(3), check.isHouguReady())
            ]), (270, 270, 270, 270, 10000))
        elif check.isBattleFinished():
            logger.info('Battle Finished')
            return True
        elif check.isBattleFailed():
            logger.warning('Battle Failed')
            return False
Example #46
0
import cv2
import numpy as np
import matplotlib
matplotlib.use('TkAgg')

from matplotlib import pyplot as plt


img_rgb = cv2.imread('buguan.png')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)

template = cv2.imread('buguanta.jpg',0)
w, h = template.shape[::-1]
 
method = cv2.TM_CCOEFF_NORMED

# Apply template Matching
res = cv2.matchTemplate(img_gray,template,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum

threshold = 0.8
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
    cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)

cv2.imwrite('res.png', img_rgb)
Example #47
0
 def getPartyIndex(self):
     return cv2.minMaxLoc(
         cv2.matchTemplate(self.im[58:92, 768:1152], IMG_PARTYINDEX,
                           cv2.TM_SQDIFF_NORMED))[2][0] // 37 + 1
Example #48
0
tries = 0



small = cv2.resize(img_gray.copy(), (0,0), fx=scale, fy=scale)
found = False


while (found == False and threshold >= 0.2):

	while (scale <= 1 and found == False):
		small = cv2.resize(img_gray.copy(), (0,0), fx=scale, fy=scale)
		try:
			if resize_flag == 1:
			
				res = cv2.matchTemplate(small,resized_temp,cv2.TM_CCOEFF_NORMED)
			else:
				res = cv2.matchTemplate(small,template,cv2.TM_CCOEFF_NORMED)

							
		except:
			print('size issues')

		loc = np.where( res >= threshold)
		resized = cv2.resize(img_rgb.copy(), (0,0), fx=scale, fy=scale)
		tries+=1
		for pt in zip(*loc[::-1]):
			found = True
			
			if resize_flag == 1:
				cv2.rectangle(resized, pt, ((pt[0] + int(w*ratio)), pt[1] + int(h*ratio)), (255,0,1), 2)
Example #49
0
 def select(self, img, rect=(0, 0, 1920, 1080)):
     return (lambda x: x.index(min(x)))([
         cv2.minMaxLoc(
             cv2.matchTemplate(self.im[rect[1]:rect[3], rect[0]:rect[2]], i,
                               cv2.TM_SQDIFF_NORMED))[0] for i in img
     ])
Example #50
0
#!/usr/bin/env python

# ..
import numpy as np
import cv2

IMG_NAME = 'naturmort2'

# .. read image
img = cv2.imread('/data/%s.jpg' % IMG_NAME, 1)
template = cv2.imread('/data/%s_tpl.jpg' % IMG_NAME, 1)
w, h = template.shape[:-1]
# ========================

# 1. match template
res = cv2.matchTemplate(img.copy(), template, cv2.TM_CCOEFF)

# 2. calculate rectangle coordinates
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
print(top_left, bottom_right)

# 3. to paint blue rectangle
cv2.rectangle(img, top_left, bottom_right, 255, 2)

# ========================
# ..write
cv2.imwrite('%s.png' % IMG_NAME, img)
Example #51
0
                resizeratio = wratio

            if resizeratio > 0:
                newheight = float(h) / resizeratio
                newwidth = float(w) / resizeratio

            resized_t = cv.resize(ctemp, (int(newwidth), int(newheight)),
                                  interpolation=cv.INTER_CUBIC)
            # cv2.imshow("Resized", resized_t)
            cv.imwrite(str(pid) + "rz.png", resized_t)

            nt = cv.imread(str(pid) + "rz.png", 0)
            wt, ht = nt.shape[::-1]

            # Apply template Matching 1
            res1 = cv.matchTemplate(img_gray, nt, roiMatchMethod1)
            min_val1, max_val1, min_loc1, max_loc1 = cv.minMaxLoc(res1)
            print "min_val1 : " + str(min_val1) + " max_val1 : " + str(
                max_val1) + " min_loc1 : " + str(
                    min_loc1) + " max_loc1 : " + str(max_loc1)
            loc1 = np.where(res1 >= threshold1)
            zip_loc1 = zip(*loc1[::-1])

            # Apply template Matching 1
            res2 = cv.matchTemplate(img_gray, nt, roiMatchMethod2)
            min_val2, max_val2, min_loc2, max_loc2 = cv.minMaxLoc(res2)
            print "min_val2 : " + str(min_val2) + " max_val2 : " + str(
                max_val2) + " min_loc2 : " + str(
                    min_loc2) + " max_loc2 : " + str(max_loc2)

            if max_val2 >= threshold2:
Example #52
0
 def score_images(self, im1, im2, method="orb"):
     """Score the similarity between two images according to differents methods.
        im1 = framed photo ; im2 = db_im"""
     score = 0
     if method == "ssim":
         im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
         im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
         score = ssim(cv2.resize(im1, (im2.shape[1], im2.shape[0]), interpolation = cv2.INTER_AREA), im2, multichannel=False)
     if method == "hist_inter":
         #crop_zone = (20,35,203,171)
         #im1 = im1[20:203, 35:171]
         #im2 = im2[20:203, 35:171]
         # photo_hist = cv2.calcHist([cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)], [0], None, [256], [0,256])
         # gray_card_im = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
         # image_hist = cv2.calcHist([gray_card_im], [0], None, [256], [0,256])
         score = 0
         r = range(0,im1.shape[-1])
         for i in r:
             photo_hist = cv2.calcHist([im1], [i], None, [256], [0,256])
             image_hist = cv2.calcHist([im2], [i], None, [256], [0,256])
             score += cv2.compareHist(photo_hist, image_hist, method = cv2.HISTCMP_INTERSECT)
     if method == "cor":
         im1 = cv2.resize(im1, (im2.shape[1], im2.shape[0]))
         # im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
         # im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
         score = cv2.matchTemplate(im2, im1, cv2.TM_CCOEFF_NORMED)
         # score = self.correlation_coefficient(im1, im2)
     if method == "diff":
         im1 = cv2.resize(im1, (im2.shape[1], im2.shape[0]))
         diff = im1 - im2
         matrix = np.array(diff)
         flat = matrix.flatten()
         numchange = np.count_nonzero(flat)
         score = 100 * float(numchange) / float(len(flat))
     if method == "hog":
         im1 = cv2.resize(im1, (im2.shape[1], im2.shape[0]))
         im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
         im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
         # H1 = feature.hog(im1, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), transform_sqrt=True)
         # H2 = feature.hog(im2, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), transform_sqrt=True)
         # score = cv2.compareHist(np.float32(H1), np.float32(H2), method = cv2.HISTCMP_BHATTACHARYYA)
         im1 = np.float32(im1) / 255.0
         im2 = np.float32(im2) / 255.0
         # Calculate gradient 
         im1_gx = cv2.Sobel(im1, cv2.CV_32F, 1, 0, ksize=1)
         im1_gy = cv2.Sobel(im1, cv2.CV_32F, 0, 1, ksize=1)
         im2_gx = cv2.Sobel(im2, cv2.CV_32F, 1, 0, ksize=1)
         im2_gy = cv2.Sobel(im2, cv2.CV_32F, 0, 1, ksize=1)
         # Python Calculate gradient magnitude and direction ( in degrees ) 
         mag1, angle1 = cv2.cartToPolar(im1_gx, im1_gy, angleInDegrees=True)
         mag2, angle2 = cv2.cartToPolar(im2_gx, im2_gy, angleInDegrees=True)
         # Compute corelation between angles
         # (h, w) = angle1.shape[:2]
         # print(h)
         # print(w)
         score = np.nanmin(1 - scipy.spatial.distance.cdist(angle1, angle2, "cosine"))
         # score = self.ccoeff_normed(angle1, angle2)
     if method == "orb":
         # See https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html
         im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
         im2 = cv2.imread(im2, 0)
         #im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
         # Initiate SIFT detector
         orb = cv2.ORB_create()
         # find the keypoints and descriptors with SIFT
         kp1, des1 = orb.detectAndCompute(im1,None)
         kp2, des2 = orb.detectAndCompute(im2,None)
         logging.info(des1.shape)
         logging.info(des2.shape)
         logging.info(type(des1))
         logging.info(type(des2))
         # create BFMatcher object
         bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
         # Match descriptors.
         matches = bf.match(des1,des2)
         matches = sorted(matches, key = lambda x:x.distance)
         score = sum(m.distance for m in matches[:20])
         # for m in matches[:20]:
             # score += m.distance
     return score
Example #53
0
# Note how we are using strings, later on we'll use the eval() function to convert to function

methods = [
    'cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
    'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED'
]

for m in methods:
    # CREATE A COPY
    full_copy = full.copy()

    method = eval(m)

    # Template Matching

    res = cv2.matchTemplate(full_copy, face, method)

    min_val, max_val, min_location, max_location = cv2.minMaxLoc(res)

    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_location  # (x,y)
    else:
        top_left = max_location

    height, width, channels = face.shape

    bottom_right = (top_left[0] + width, top_left[1] + height)

    cv2.rectangle(full_copy, top_left, bottom_right, (255, 0, 0), 10)

    # PLOT AND SHOW THE IMAGES
Example #54
0
# Read the main image
img_rgb = cv2.imread('./Test/ScreenInput.png', cv2.IMREAD_COLOR)

# Convert it to grayscale
# img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_GRAY2BGR)

# Read the template
template = cv2.imread('./Test/QuadradoClickTeste.png', cv2.IMREAD_COLOR)

# template_gray = cv2.cvtColor(template, cv2.COLOR_GRAY2BGR)

# Store width and height of template in w and h
w, h = template.shape[:-1]

# Perform match operations.
res = cv2.matchTemplate(img_rgb, template, cv2.TM_CCOEFF_NORMED)

# Specify a threshold
threshold = 0.7

# Store the coordinates of matched area in a numpy array
loc = np.where(res >= threshold)

# Draw a rectangle around the matched region.
for pt in zip(*loc[::-1]):
    cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)

# Show the final image with the matched area.
#cv2.imshow('Detected',img_rgb)

cv2.imwrite('./Test/encontrado.png', img_rgb)
Example #55
0

pic = im1
pic2 = cv.imread("bumps/a9.tif")
picGood = cv.imread("bumps/good5.tif")


gray_img = cv.cvtColor(pic, cv.COLOR_BGR2GRAY)
gray_img2 = cv.cvtColor(pic2, cv.COLOR_BGR2GRAY)
gray_imgGood = cv.cvtColor(picGood, cv.COLOR_BGR2GRAY)


template = cv.imread("bumps/good4.tif", cv.IMREAD_GRAYSCALE)
w, h = template.shape[::-1]

result = cv.matchTemplate(gray_img, template, cv.TM_CCOEFF_NORMED)
result2 = cv.matchTemplate(gray_img2, template, cv.TM_CCOEFF_NORMED)
resultGood = cv.matchTemplate(gray_imgGood, template, cv.TM_CCOEFF_NORMED)


print (result)
print (result2)
print (resultGood)



loc = np.where(result >= 0.82)


for pt in zip(*loc[::-1]):
    cv.rectangle(pic, pt, (pt[0] + 200, pt[1] + 200), (0, 255, 0), 3)
Example #56
0
def matchResImgInWindow(handle, imgName, threshold=0.8, mult=True):
    # 获取目标图片
    tmp = imgName.split(".")
    fSplit = tmp[0].split("_")
    fLen = len(fSplit)
    targetImgPerLeftX = int(fSplit[fLen-4])
    targetImgPerLeftY = int(fSplit[fLen-3])
    targetImgPerRightX = int(fSplit[fLen-2])
    targetImgPerRightY = int(fSplit[fLen-1])

    imgPath = path.getResDirPath()+imgName
    if not os.path.exists(path.getProjectPath()):
        os.makedirs(path.getProjectPath())
    targetImg = Image.open(imgPath)

    targetImgWidth = targetImg.size[0]
    targetImgHeigth = targetImg.size[1]

    perSizeW = (targetImgPerRightX-targetImgPerLeftX)*0.01
    resWinWidth = int(targetImgWidth/perSizeW)
    perSizeH = (targetImgPerRightY-targetImgPerLeftY)*0.01
    resWinHeight = int(targetImgHeigth/perSizeH)



    # 模板图片
    temImg = cv2.cvtColor(numpy.asarray(targetImg), cv2.COLOR_RGB2GRAY)
    # temImg = cv2.cvtColor(numpy.asarray(targetImg), cv2.COLOR_RGB2GRAY)
    
    targetImg.close()

    wLeft, wTop, wRight, wBottom = appGetWindowRect(handle)
    winImg = ImageGrab.grab(bbox=(wLeft, wTop, wRight, wBottom))

    winNowW=wRight-wLeft
    winNowH=wBottom-wTop


    # 对截图缩放,适配资源图片
    toMatchWinImgSrc = cv2.cvtColor(numpy.asarray(winImg), cv2.COLOR_RGB2GRAY)

    
    toMatchWinImg = cv2.resize(
        toMatchWinImgSrc, (resWinWidth, resWinHeight), interpolation=cv2.INTER_AREA)
    winImg.close()


    scaleValueW=winNowW/resWinWidth 
    scaleValueH=winNowH/resWinHeight 


    res = cv2.matchTemplate(toMatchWinImg, temImg, cv2.TM_CCOEFF_NORMED)

    xyList = []
    if mult == True:
        loc = numpy.where(res >= threshold)

        for pt in zip(*loc[::-1]):
            x=wLeft+int((pt[0]+(targetImgWidth>> 1))*scaleValueW)
            y=wTop+int((pt[1]+(targetImgHeigth>> 1))*scaleValueW)
            xyList.append((x,y))

    else:  # 单个很不准确
   
        x=wLeft+int((pt[0]+(targetImgWidth>> 1))*scaleValueW)
        y=wTop+int((pt[1]+(targetImgHeigth>> 1))*scaleValueW)
        xyList.append((x,y))


    print(xyList[:10])
    return xyList
Example #57
0
abs_pos = 0
images = []
poss=[]
print "ready"
for i,frame in enumerate(camera.capture_continuous(rawCapture, format="bgr", use_video_port=True)):
    image = frame.array
    gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)[:,::-1]
    thre = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,15,2)
    print i,
    if i == 1:
        pre_thre = thre[90:150,:]
        images.append(gray)
        poss.append(abs_pos)
        cv2.imshow("Frame", thre)
    elif i>1:
        res = cv2.matchTemplate(thre,pre_thre,cv2.TM_CCOEFF)
        pre_thre = thre[90:150,:]
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        bottom_right = (max_loc[0] + 320, max_loc[1] + 60)
        abs_pos += max_loc[1]-90
        images.append(gray)
        poss.append(abs_pos)
        print abs_pos
        #cv2.rectangle(thre,max_loc, bottom_right, 128, 2)
        cv2.imshow("Frame", thre)
    key = cv2.waitKey(1) & 0xFF
    rawCapture.truncate(0)
    # if the `q` key was pressed, break from the loop
    if key == ord("q"):
        break
    if i==120:
import cv2

#load image
image=cv2.imread("tabriz.jpg")

#load template image & convert to grayscale
template=cv2.imread("b.jpg" , cv2.IMREAD_GRAYSCALE)

# 50% reduction image & temlplate pixel size
image=cv2.resize(image,(0,0), fx=0.5 ,fy=0.5)
template=cv2.resize(template,(0,0), fx=0.5 ,fy=0.5)

#convert image to grayscale
gray_image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)

#extract size of template
w,h=template.shape[: : -1]

#use method cv2.TM_CCOEFF_NORMED for template matching
result = cv2.matchTemplate(gray_image,template,cv2.TM_CCOEFF_NORMED)
location=np.where(result>=0.7)

#determine the rectangle size
for point in zip(*location[: : -1]):
    cv2.rectangle(image,point,(point[0]+w , point[1]+h),(205,0,0) , 1)

#show the final image
cv2.imshow("image",image)

cv2.waitKey(0)
Example #59
0
    # 计算每一组中的每一个数值
    for c in digitCnts:
        # 找到当前数值的轮廓,resize成合适的的大小
        (x, y, w, h) = cv2.boundingRect(c)
        roi = group[y:y + h, x:x + w]
        roi = cv2.resize(roi, (57, 88))
        cv_show('roi', roi)

        # 计算匹配得分
        scores = []

        # 在模板中计算每一个得分
        for (digit, digitROI) in digits.items():
            # 模板匹配
            result = cv2.matchTemplate(roi, digitROI, cv2.TM_CCOEFF)
            (_, score, _,
             _) = cv2.minMaxLoc(result)  # minMaxLoc返回最小值,最大值,并得到最大值,最小值的索引
            scores.append(score)

        # 得到最合适的数字
        groupOutput.append(str(np.argmax(scores)))  # append:列表末尾添加新的对象
        # numpy.argmax(array, axis) 用于返回一个numpy数组中最大值的索引值。

    # 画出来
    cv2.rectangle(image, (gX - 5, gY - 5), (gX + gW + 5, gY + gH + 5),
                  (0, 0, 255), 1)
    cv2.putText(image, "".join(groupOutput), (gX, gY - 15),
                cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)

    # 得到结果
# Load the images in grey scale
originalImg = cv2.imread('../data/messi5.jpg', 0)
template = cv2.imread('../data/messi_face.jpg', 0)
w, h = template.shape[::-1]

# Compare all the methods
methods = [
    'cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
    'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED'
]

for method in methods:
    # Math the template
    methodCode = eval(method)
    result = cv2.matchTemplate(originalImg, template, methodCode)
    minValue, maxValue, minLoc, maxLoc = cv2.minMaxLoc(result)

    # If the methodCode is TM_SQDIFF or TM_SQDIFF_NORMED, take the minimum position
    if methodCode in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        topLeftCorner = minLoc
    else:
        topLeftCorner = maxLoc
    bottomRightCorner = (topLeftCorner[0] + w, topLeftCorner[1] + h)

    # Draw the square in a copy of the original image
    img = originalImg.copy()
    cv2.rectangle(img, topLeftCorner, bottomRightCorner, 255, 2)

    # Display the results
    plt.subplot(121)