Esempio n. 1
1
def fast_template_matching(img, tmpl, max_level):
    pyr_img = build_pyramid(img, max_level)
    pyr_tmpl = build_pyramid(tmpl, max_level)

    results = []
    for level in range(max_level,-1,-1):
        ref = pyr_img[level]
        tpl = pyr_tmpl[level]
        
        if level == max_level:
            results.append(cv2.matchTemplate(ref, tpl, cv2.TM_CCOEFF_NORMED))
        else:
            mask = cv2.pyrUp(results[-1])
            (_, maxval, _, maxloc) = cv2.minMaxLoc(mask)
            if maxval < 0.5:
                break
            #print maxloc
            mask_h, mask_w = mask.shape
            mask_w = mask_w / 50
            mask_h = mask_h / 50
            tpl_h, tpl_w = tpl.shape
            y = maxloc[1] - mask_h/2
            x = maxloc[0] - mask_w/2
            w = mask_w + tpl_w
            h = mask_h + tpl_h
            res = np.zeros(ref.shape, np.float32)
            if x+w > ref.shape[1] or y+h > ref.shape[0] or x < 0 or y < 0:
                # Out of bounds
                return (0,(0,0))
            
            res[y:y+mask_h+1,x:x+mask_w+1] = cv2.matchTemplate(ref[y:y+h,x:x+w], tpl, cv2.TM_CCOEFF_NORMED)
            results.append(res)

    (_, maxval, _, maxloc) = cv2.minMaxLoc(results[-1])
    return maxval, maxloc
Esempio n. 2
0
def matchTemplate(img, tmpl, *args):
  imgs, tmpls, fctrs = [img], [tmpl], [1]
  idx = 0
  for arg in args:
    if isinstance(arg, (int,float)):
      fctrs[idx] = arg
    else:
      if idx > len(tmpls)-1:
        tmpls.append(arg)
      else:
        imgs.append(arg)
        fctrs.append(1)
        idx += 1
  res, maxVals = 0, []
  for i, t, f in zip(imgs, tmpls, fctrs):
    r = cv2.matchTemplate(i, t, cv2.TM_CCOEFF_NORMED)
    maxVals.append(cv2.minMaxLoc(r)[1])
    if len(imgs) > 1:
      r = normalize(r, max=1) * f
    res += r
  minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(res)
  tlx, tly = maxLoc
  br = (tlx+tmpl.shape[1], tly+tmpl.shape[0])
  minD = min(min(maxLoc), img.shape[1]-br[0], img.shape[0]-br[1])
  return res, tlx, tly, br, minD, maxVal, maxVals
Esempio n. 3
0
	def box_sobel(self, img):
		SOBEL_K = 3
		K = 0.1

		sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=SOBEL_K).astype(np.float)
		sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=SOBEL_K).astype(np.float)

		fil, col = img.shape
		maskx = 1 + np.tile(np.arange(col), (fil,1))
		maskx_ = maskx[::,::-1]
		masky = 1 + np.tile(np.arange(fil).reshape((fil,1)), (1, col))
		masky_ = masky[::-1,::]

		distx  = np.absolute(sobelx) * np.exp(-maskx*K)
		distx_ = np.absolute(sobelx) * np.exp(-maskx_*K)
		disty  = np.absolute(sobely) * np.exp(-masky*K)
		disty_ = np.absolute(sobely) * np.exp(-masky_*K)

		_, _, _, x0 = cv2.minMaxLoc(distx)
		_, _, _, x1 = cv2.minMaxLoc(distx_)
		_, _, _, y0 = cv2.minMaxLoc(disty)
		_, _, _, y1 = cv2.minMaxLoc(disty_)

		x0y, x0x = x0
		x1y, x1x = x1
		y0y, y0x = y0
		y1y, y1x = y1
		box = (x0y, x1y+1, y0x, y1x+1)
		return box
Esempio n. 4
0
def myfindChessboardCorners(im,dim):
    gr=30
    patern=np.zeros((gr,gr),dtype='uint8')
    patern[:gr/2,:gr/2]=255
    patern[gr/2:,gr/2:]=255
    m1=cv2.matchTemplate(im,patern,cv2.TM_CCORR_NORMED)
    patern=np.ones((gr,gr),dtype='uint8')*255
    patern[:gr/2,:gr/2]=0
    patern[gr/2:,gr/2:]=0
    m2=cv2.matchTemplate(im,patern,cv2.TM_CCORR_NORMED)
    #m=np.bitwise_or(m1>0.9,m2>0.9)
    #import pdb;pdb.set_trace()
    tresh=0.95
    labels=ndimage.label(np.bitwise_or(m1>tresh,m2>tresh))
    if labels[1]!=dim[0]*dim[1]:
        return False,[]
    objs=ndimage.find_objects(labels[0])
    corners=[]
    for xx,yy in objs:
        xpos=(xx.start+xx.stop)/2.0#+gr/2-0.5
        ypos=(yy.start+yy.stop)/2.0#+gr/2-0.5
        se=5
        #import pdb;pdb.set_trace()
        minVal, maxVal, minLoc, maxLoc=cv2.minMaxLoc(m2[xpos-se:xpos+se,ypos-se:ypos+se])
        if maxVal<tresh:
            minVal, maxVal, minLoc, maxLoc=cv2.minMaxLoc(m1[xpos-se:xpos+se,ypos-se:ypos+se])
        xpos+=-se+maxLoc[0]+gr/2-0.5
        ypos+=-se+maxLoc[1]+gr/2-0.5
        
        #xpos=xx.start+gr/2
        #ypos=yy.start+gr/2
        corners.append((ypos,xpos) )
    return True,np.array(corners)
Esempio n. 5
0
def GetEyeCorners(orig_img, leftTemplate, rightTemplate,pupilPosition=None):
	if leftTemplate != [] and rightTemplate != []:
		ccnorm_left = cv2.matchTemplate(orig_img, leftTemplate, cv2.TM_CCOEFF_NORMED)
		ccnorm_right = cv2.matchTemplate(orig_img, rightTemplate, cv2.TM_CCOEFF_NORMED)

		minVal, maxVal, minLoc, maxloc_left_from = cv2.minMaxLoc(ccnorm_left)
		minVal, maxVal, minLoc, maxloc_right_from, = cv2.minMaxLoc(ccnorm_right)

		l_x,l_y = leftTemplate.shape
		max_loc_left_from_x = maxloc_left_from[0]
		max_loc_left_from_y = maxloc_left_from[1]

		max_loc_left_to_x = max_loc_left_from_x + l_x
		max_loc_left_to_y = max_loc_left_from_y + l_y

		maxloc_left_to = (max_loc_left_to_x, max_loc_left_to_y)

		r_x,r_y = leftTemplate.shape
		max_loc_right_from_x = maxloc_right_from[0]
		max_loc_right_from_y = maxloc_right_from[1]

		max_loc_right_to_x = max_loc_right_from_x + r_x
		max_loc_right_to_y = max_loc_right_from_y + r_y
		maxloc_right_to = (max_loc_right_to_x, max_loc_right_to_y)

		return (maxloc_left_from, maxloc_left_to, maxloc_right_from, maxloc_right_to)
Esempio n. 6
0
    def isFillChanged(self, imageA, imageB):
        
        fillChanged = False    

        template = cv2.imread('fill.png',0)
        template = 255 - template

        imageA = cv2.imread(imageA,0)
        imageA = 255 - imageA
        
        imageB = cv2.imread(imageB,0)
        imageB = 255 - imageB
        

        res = cv2.matchTemplate(imageA,template,3) # 'cv2.TM_SQDIFF' - 4
        min_val, max_val_A, min_loc, max_loc = cv2.minMaxLoc(res)

        res = cv2.matchTemplate(imageB,template,3) # 'cv2.TM_SQDIFF' - 4
        min_val, max_val_B, min_loc, max_loc = cv2.minMaxLoc(res)
        
        if (max_val_A >= 0.7 and max_val_B < 0.7):
            fillChanged = True

        if (max_val_A < 0.7 and max_val_B >= 0.7):
            fillChanged = True

        return fillChanged
Esempio n. 7
0
 def __GetEyeCorners(self, grayscale, leftTemplate, rightTemplate, pupilPosition=None):
     """Given two templates and the pupil center returns the eye corners position."""
     corners = []
     
     if leftTemplate != [] and rightTemplate != []:
         # Template match the templates on the image
         ccnormed_left = cv2.matchTemplate(grayscale, leftTemplate, cv2.TM_CCOEFF_NORMED)
         ccnormed_right = cv2.matchTemplate(grayscale, rightTemplate, cv2.TM_CCOEFF_NORMED)
         
         #cv2.imshow("Left Template", ccnormed_left)
         #cv2.imshow("Right Template", ccnormed_right)
         
         # Get upper left corner of the templates
         minVal, maxVal, minLoc, maxLoc_left_from = cv2.minMaxLoc(ccnormed_left)
         minVal, maxVal, minLoc, maxLoc_right_from = cv2.minMaxLoc(ccnormed_right)
         
         # Calculate lower right corner of the templates
         maxLoc_left_to = (maxLoc_left_from[0] + leftTemplate.shape[1], maxLoc_left_from[1] + leftTemplate.shape[0])
         maxLoc_right_to = (maxLoc_right_from[0] + rightTemplate.shape[1], maxLoc_right_from[1] + rightTemplate.shape[0])
         
         corners.append(maxLoc_left_from)
         corners.append(maxLoc_left_to)
         corners.append(maxLoc_right_from)
         corners.append(maxLoc_right_to)
     return corners
Esempio n. 8
0
 def match_template(self, img, template, threshold, multiple=False, mml=False):
     """Matches a template
     Converts it to grayscale
     Converts img to detect edges
     Checks against provided threshold if matched.
     Return None when below threshold"""
     img = img.convert('L')
     template = template.convert('L')
     if img.size[0] < template.size[0] or img.size[1] < template.size[1]:
         logger.error('Template cannot be smaller than image provided')
         return
     res = cv2.matchTemplate(np.array(img), np.array(template), cv2.TM_CCOEFF_NORMED)
     if mml:
         return cv2.minMaxLoc(res)
     elif not multiple:
         mml = cv2.minMaxLoc(res)
         logger.debug(f'Min Max Loc: {mml}')
         if mml[0] == 1:
             # todo fix mml
             logger.error(f'mml {mml} is broken with minimum being max :(')
             return False
         loc_found = mml[1] >= threshold
         logger.info(f'template match found: {loc_found} @ {mml[-1]} [{mml[1]:.2f} >= {threshold:.2f}]')
         return loc_found and list(mml[-1])
     else:
         locs = np.where(res >= threshold)
         locs = [list(l) for l in zip(*locs[::-1])]
         logger.info(f'template match found {len(locs):d} with threshold {threshold:.2f}')
         return list(map(list, locs))
Esempio n. 9
0
def detectCard(card):
    crop = card[0:50, 0:20]
    gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)
    black = cv2.inRange(crop, (0,0,0), (50, 50, 50))
    color = 0
    if (np.sum(black) > 5000):
        color = 0
    else:
        color = 1
    symbol0 = ''
    symbol0v = 0
    symbol1 = ''
    max_value = 0
    results = []
    for symbol in symbols1:
        res = cv2.matchTemplate(gray, symbol, cv2.TM_CCOEFF_NORMED)
        minv, maxv, minloc, maxloc = cv2.minMaxLoc(res)
        results.append(maxv)
    max_value = max(results)
    if max_value >= symbols1_threshold:
        symbol1 = symbols1_name[results.index(max_value)]
    results = []
    for symbol in symbols0:
        res = cv2.matchTemplate(gray, symbol, cv2.TM_CCOEFF_NORMED)
        minv, maxv, minloc, maxloc = cv2.minMaxLoc(res)
        results.append(maxv)
    max_value = max(results)
    if max_value >= symbols0_threshold:
        symbol0 = symbols_name[results.index(max_value)]
        symbol0v = symbols_value[results.index(max_value)]
#    if color == 0 and (symbol1 == 'D' or symbol1 == 'H'):
#        symbol1 = ''
#    elif color == 1 and (symbol1 == 'S' or symbol1 == 'C'):
#        symbol1 = ''
    return symbol0, symbol1, color, symbol0v
Esempio n. 10
0
    def tplmatch(self, source_image, master_image, algo=None):
        """ テンプレートマッチング 処理 """
        if algo is None:
            algo = 5
        # 類似度判定アルゴリズム 解説# {{{
        # cv2.TM_SQDIFF    :輝度値の差の2乗の合計     小さいほど類似
        # cv2.TM_CCORR     :輝度値の相関               大きいほど類似
        # cv2.TM_CCOEFF    :輝度値の平均を引いた相関   大きいほど類似
        #                  (テンプレート画像と探索画像の明暗差に影響され難い)
        # cv2.TM_***_NORMED :上記それぞれの正規化版
        # }}} """
        ALGOS = ["cv2.TM_SQDIFF",
                 "cv2.TM_SQDIFF_NORMED",
                 "cv2.TM_CCORR",
                 "cv2.TM_CCORR_NORMED",
                 "cv2.TM_CCOEFF",
                 "cv2.TM_CCOEFF_NORMED"]

        cmt = cv2.matchTemplate
        match = cmt(source_image, master_image, eval(ALGOS[algo]))

        if ALGOS in ["cv2.TM_SQDIFF", "cv2.TM_CCORR", "cv2.TM_CCOEFF"]:
            # ノルム正規化 処理
            norm = self.cim.normalize(match)
            # 類似度の最小・最大値と各座標 取得
            val_min, val_max, loc_min, loc_max = cv2.minMaxLoc(norm)
        else:
            val_min, val_max, loc_min, loc_max = cv2.minMaxLoc(match)

        return match, val_min, val_max, loc_min, loc_max
Esempio n. 11
0
def img_properties(img,description=None):
    if description:
        print description.upper(), "......................."
    print type(img)
    print img.dtype
    print img.shape
    #print img   
    print cv2.minMaxLoc(img)
Esempio n. 12
0
def scaleImgValues(img):
	maxVal = cv2.minMaxLoc(img)[1]

	if maxVal>0:
		print 'values '+str(cv2.minMaxLoc(img))
		print 'coef '+str(256/maxVal)+' highest value '+str(maxVal*(256/maxVal))
		aux = np.array((255/maxVal)*1.1*img,np.float)
		img = np.array(np.clip(aux,0,255),np.uint8)
		print 'max value as numpy array '+str(img.max())
		print 'new values '+str(cv2.minMaxLoc(img))
	return img
Esempio n. 13
0
def _MinMaxLock2nd(arr,ex_size,is_min):
    if is_min: idx = 0
    else: idx = 1
    status = cv2.minMaxLoc(arr)
    pt1 = (max(status[2+idx][0]-ex_size[0]/2,0),
           max(status[2+idx][1]-ex_size[1]/2,0))
    pt2 = (min(status[2+idx][0]+ex_size[0]/2,arr.shape[1]),
           min(status[2+idx][1]+ex_size[1]/2,arr.shape[0]))
    mask = np.ones((arr.shape[0], arr.shape[1]), dtype=np.uint8) * 255
    mask[pt1[0]:pt2[0], pt1[1]:pt2[1]] = 0
    status2 = cv2.minMaxLoc(arr, mask)
    return (status[0+idx],status2[0+idx],status[2+idx],status2[2+idx])
Esempio n. 14
0
def motion(image, ref, focus=(333, 666, 333, 666), maxaccel=0, delta=(0,0), antishake=2):
    """
    ref画像の,focusで指定された領域内の画像と同じ画像を,image内でさがして,その変位を返す.
    maxaccelとdeltaが指定されている場合は,探索範囲を絞り高速にマッチングできる.
    """
    logger = logging.getLogger()
    hi,wi = ref.shape[0:2]
    wmin = wi*focus[0]//1000
    wmax = wi*focus[1]//1000
    hmin = hi*focus[2]//1000
    hmax = hi*focus[3]//1000
    template = ref[hmin:hmax,wmin:wmax,:]
    h,w = template.shape[0:2]

    # Apply template Matching
    if maxaccel == 0:
        res = cv2.matchTemplate(image,template,cv2.TM_SQDIFF_NORMED)
        #loc is given by x,y
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        return min_loc[0] - wmin, min_loc[1] - hmin
    else:
        #use delta here
        roix0 = wmin + delta[0] - maxaccel
        roiy0 = hmin + delta[1] - maxaccel
        roix1 = wmax + delta[0] + maxaccel
        roiy1 = hmax + delta[1] + maxaccel
        affine = np.matrix(((1.0,0.0,-roix0),(0.0,1.0,-roiy0)))
        logger.debug("maxaccel:{0} delta:{1}".format(maxaccel,delta))
        crop = cv2.warpAffine(image, affine, (roix1-roix0,roiy1-roiy0))
        #imageh,imagew = image.shape[0:2]
        #if roix0 < 0 or roix1 >= imagew or roiy0 < 0 or roiy1 >= imageh:
        #    print(roix0,roix1,roiy0,roiy1,imagew,imageh)
        #    return None
        #crop = image[roiy0:roiy1, roix0:roix1, :]
        res = cv2.matchTemplate(crop,template,cv2.TM_SQDIFF_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        #loc is given by x,y

        #Test: isn't it just a background?
        roix02 = wmin - antishake
        roiy02 = hmin - antishake
        roix12 = wmax + antishake
        roiy12 = hmax + antishake
        crop = image[roiy02:roiy12, roix02:roix12, :]
        res = cv2.matchTemplate(crop,template,cv2.TM_SQDIFF_NORMED)
        min_val2, max_val2, min_loc2, max_loc2 = cv2.minMaxLoc(res)
        #loc is given by x,y
        if min_val <= min_val2:
            return (min_loc[0] + roix0 - wmin, min_loc[1] + roiy0 - hmin)
        else:
            return (min_loc2[0] + roix02 - wmin, min_loc2[1] + roiy02 - hmin)
Esempio n. 15
0
    def read_time(self,path):

        img = cv2.imread(path,0) # total image

        x_min = 2195
        x_max = 2208
        y_min = 783
        y_max = 794
        minute_img = img[y_min:y_max,x_min:x_max]
        
        x_min = 2216
        x_max = 2229
        second1_img= img[y_min:y_max,x_min:x_max]

        x_min = 2232
        x_max = 2245
        second2_img= img[y_min:y_max,x_min:x_max]

        minute  = None
        second1 = None
        second2 = None

        minute_globalMax  = 0
        second1_globalMax = 0
        second2_globalMax = 0

        for i in range(0,10):
            minute_result  = cv2.matchTemplate(minute_img,self.numbers[i],cv2.TM_CCOEFF_NORMED)
            second1_result = cv2.matchTemplate(second1_img,self.numbers[i],cv2.TM_CCOEFF_NORMED)
            second2_result = cv2.matchTemplate(second2_img,self.numbers[i],cv2.TM_CCOEFF_NORMED)

            minute_min_val, minute_max_val, a, b = cv2.minMaxLoc(minute_result)
            second1_min_val, second1_max_val, a, b = cv2.minMaxLoc(second1_result)
            second2_min_val, second2_max_val, a, b = cv2.minMaxLoc(second2_result)

            if minute_max_val > minute_globalMax:
                minute_globalMax = minute_max_val
                minute = i

            if second1_max_val > second1_globalMax:
                second1_globalMax = second1_max_val
                second1 = i

            if second2_max_val > second2_globalMax:
                second2_globalMax = second2_max_val
                second2 = i

        #print("Time---> "+str(minute)+":"+str(second1)+str(second2))

        return [minute,second1,second2]
def match(img):
    
    tmp_n = cv2.imread('tmp_n.jpg')
    tmp_p = cv2.imread('tmp_p.jpg')
    tmp_g = cv2.imread('tmp_p.jpg')
    tmp_n = cv2.cvtColor(tmp_n, cv2.CV_32FC1)
    tmp_p = cv2.cvtColor(tmp_p, cv2.CV_32FC1)
    tmp_g = cv2.cvtColor(tmp_p, cv2.CV_32FC1)
    
    bad = True
    
    while(bad):
        d1 = cv2.matchTemplate(tmp_n, img, cv2.cv.CV_TM_SQDIFF_NORMED)
        d2 = cv2.matchTemplate(tmp_p, img, cv2.cv.CV_TM_SQDIFF_NORMED)
        d3 = cv2.matchTemplate(tmp_g, img, cv2.cv.CV_TM_SQDIFF_NORMED)
        
        mn1,_,mnLoc1,_ = cv2.minMaxLoc(d1)
        mn2,_,mnLoc2,_ = cv2.minMaxLoc(d2)
        mn3,_,mnLoc3,_ = cv2.minMaxLoc(d3)
        
        mn = min(mn1,mn2,mn3)
        
        if mn == mn1:
            MPx,MPy = mnLoc1
            trows,tcols = tmp_n.shape[:2]
        elif mn == mn2:
            MPx,MPy = mnLoc2
            trows,tcols = tmp_p.shape[:2]
        else:
            MPx,MPy = mnLoc3
            trows,tcols = tmp_g.shape[:2]    
           
        h,w = img.shape[:2]
        # Print it, for comparison    
        print mn
           
        if mn >= 0.24 and bad == True:
            #recortamos la imagen para acercarnos mas al resultado
            crop_img = img[h-70:h,0:w-70] # Crop from x, y, w, h -> 100, 200, 300, 400
            # NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h] 
            bad = False
  
        else:
            bad = False
     
    # Draw the rectangle 
    cv2.rectangle(img, (MPx,MPy),(MPx+tcols,MPy+trows),(0,0,255),2)
    
    return MPx,MPy,mn
Esempio n. 17
0
def template_matching():
    img = cv2.imread('messi.jpg',0)
    img2 = img.copy()
    template = cv2.imread('face.png',0)
    w, h = template.shape[::-1]

    # All the 6 methods for comparison in a list
    methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
            'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']

    for meth in methods:
        img = img2.copy()
        method = eval(meth)

        # Apply template Matching
        res = cv2.matchTemplate(img,template,method)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

        # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
            top_left = min_loc
        else:
            top_left = max_loc
        bottom_right = (top_left[0] + w, top_left[1] + h)

        cv2.rectangle(img,top_left, bottom_right, 255, 2)

        plt.subplot(121),plt.imshow(res,cmap = 'gray')
        plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
        plt.subplot(122),plt.imshow(img,cmap = 'gray')
        plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
        plt.suptitle(meth)

        plt.show()
Esempio n. 18
0
    def TemplateMatching(self, img, tmp):
        '''
            入力された画像とテンプレート画像でtemplate matchingを行う
        '''
        # edgeでやるとき
        # gimg = cv2.Canny(img, threshold1= 100, threshold2= 200,apertureSize = 3)         
        # tmp = cv2.Canny(tmp, threshold1= 100, threshold2= 200,apertureSize = 3) 
        
        # 普通にやるとき
        gimg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        tmp = cv2.cvtColor(tmp,cv2.COLOR_BGR2GRAY)

        gimg2 = gimg
        
        

        rows = len(tmp[:,0]) 
        cols = len(tmp[0]) 
        
        methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
        'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
        # methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR','cv2.TM_CCORR_NORMED']
        # method毎で行う
        for i, meth in enumerate(methods):
            gimg = gimg2
            method = eval(meth)

            # Apply template Matching
            res = cv2.matchTemplate(gimg,tmp,method)

            # 最小値,最大値,その座標
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

            # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
            # if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
                # top_left = min_loc
            # else:
                # top_left = max_loc
            
            # draw color
            # if i == 3:
                # color = [0,0,0]
            # else:
                # color = [0,0,0]
                # color[i] = 255
            color = 255
            # rectangle result
            top_left = max_loc
            bottom_right = (top_left[0] + rows, top_left[1] + rows)
            


            cv2.rectangle(img,(top_left[0],top_left[1]), bottom_right, color, 2)
            cv2.putText(img,meth,(top_left[0],top_left[1]-5),cv2.FONT_HERSHEY_SIMPLEX,0.3,color)
            cv2.imshow(meth,res/np.amax(res))
            cv2.imshow('Srcimg',img)
            cv2.imshow('template',tmp)
            cv2.imshow('GrayImg',gimg)

            print max_val,min_val
def scaleImg(wei):
    min, max, minPos, maxPos = cv2.minMaxLoc(wei)
    # print min,max
    wei = (wei - min) * 255 / (max - min)
    wei = wei.astype(int)
    wei = cv2.convertScaleAbs(wei, alpha=1)
    return wei
Esempio n. 20
0
File: img.py Progetto: ftyszyx/tools
 def getOneTemplePos(self,srcPicPath,templePicPath):
     # print(srcPicPath,templePicPath)
     img_src=cv2.imread(srcPicPath)
     img_src_gray=cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)
     srcw,srch=img_src_gray.shape[::-1]
     print("img_src gray",srcw,srch)
     img_temple=cv2.imread(templePicPath)
     img_temple_gray=cv2.cvtColor(img_temple, cv2.COLOR_BGR2GRAY)
     templew,templeh=img_temple_gray.shape[::-1]
     print("temple gray",templew,templeh)
     # cv2.imshow('rgb',img_src)
     # cv2.imshow('gray',img_src_gray)
     # cv2.imshow('template',img_temple_gray)
     # cv2.waitKey(0)
     # cv2.destroyAllWindows()
     res = cv2.matchTemplate(img_src_gray,img_temple_gray,method) 
     min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
     print(min_val, max_val, min_loc, max_loc)
     # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
     if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
         top_left = min_loc
     else:
         top_left = max_loc
     bottom_right = (top_left[0] + templew, top_left[1] + templeh)
     cv2.rectangle(img_src,top_left, bottom_right, 255, 2)
     print(top_left, bottom_right)
Esempio n. 21
0
 def find_match(self, img_proc, entry_name):
     # マッチテーブル読み込み
     entry = self.tbl[entry_name]
     img_part = entry[0]
     (ymin, ymax, xmin, xmax) = entry[1]
     ytgt = entry[2][0] - ymin
     xtgt = entry[2][1] - xmin
     confid = entry[3]
     img_all = img_proc[ymin:ymax, xmin:xmax]
     # サイズチェック
     if img_all.shape[0] < img_part.shape[0] or img_all.shape[1] < img_part.shape[1]:
         # キャプチャ失敗?
         print 'ERROR: Bad Capture.'
         return 0
     # マッチング
     result = cv2.matchTemplate(img_part, img_all, cv2.TM_CCOEFF_NORMED)
     (minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(result)
     (x, y) = maxLoc
     #(y, x) = np.unravel_index(result.argmax(), result.shape)
     if self.debug:
         print '/ Confid:', round(maxVal, 3), '/ Actual:', (y, x), '/ Target:', (ytgt, xtgt)
     # 評価
     if maxVal > confid and abs(y - ytgt) < 10 and abs(x - xtgt) < 10:
         # 自信度が指定値以上、かつx/yが指定値と10ピクセル以上ずれていない
         return round(maxVal, 1)
     else:
         return 0
    def match_template(self, cv_image):
        frame = np.array(cv_image, dtype=np.uint8)

        # grey = cv2.equalizeHist(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
        # edges = cv2.Sobel(grey, cv.CV_32F, 1, 1)

        W, H = frame.shape[1], frame.shape[0]
        w, h = self.template.shape[1], self.template.shape[0]
        width = W - w + 1
        height = H - h + 1

        result = cv.CreateMat(height, width, cv.CV_32FC1)
        result_array = np.array(result, dtype=np.float32)

        cv2.matchTemplate(frame, self.template, cv.CV_TM_CCOEFF_NORMED, result_array)

        (min_score, max_score, minloc, maxloc) = cv2.minMaxLoc(result_array)

        # if max_score > 0.7:
        # return None
        (x, y) = maxloc

        match_box = (x, y, w, h)
        cv2.imshow("Match Result", result_array)
        # cv.Rectangle(self.marker_image, (x, y), (x + w, y + h),(255, 255, 0), 3, 0)
        return match_box
Esempio n. 23
0
def isMatch(subPath, srcPath, threshold=0.01):
    '''
    check wether the subPath image exists in the srcPath image.
    @type subPath: string
    @params subPath: the path of searched template. It must be not greater than the source image and have the same data type.
    @type srcPath: string
    @params srcPath: the path of the source image where the search is running.
    @type threshold: float
    @params threshold: the minixum value which used to increase or decrease the matching threshold. 0.01 means at most 1% difference. default is 0.01. 
    @rtype: boolean
    @return: true if the sub image founded in the src image. return false if sub image not found or any exception.
    '''
    for img in [subPath, srcPath]: assert os.path.exists(img) , 'No such image:  %s' % (img)
    method = cv2.cv.CV_TM_SQDIFF_NORMED #Parameter specifying the comparison method 
    try:
        subImg = cv2.imread(subPath) #Load the sub image
        srcImg = cv2.imread(srcPath) #Load the src image
        result = cv2.matchTemplate(subImg, srcImg, method) #comparision
        minVal = cv2.minMaxLoc(result)[0] #Get the minimum squared difference
        if minVal <= threshold: #Compared with the expected similarity
            return True
        else:
            return False
    except:
        return False
Esempio n. 24
0
def getMatchedCenterOffset(subPath, srcPath, threshold=0.01, rotation=0):
    '''
    get the coordinate of the mathced sub image center point.
    @type subPath: string
    @params subPath: the path of searched template. It must be not greater than the source image and have the same data type.
    @type srcPath: string
    @params srcPath: the path of the source image where the search is running.
    @type threshold: float
    @params threshold: the minixum value which used to increase or decrease the matching threshold. 0.01 means at most 1% difference.
                       default is 0.01.
    @type rotation: int
    @params rotation: the degree of rotation. default is closewise. must be oone of 0, 90, 180, 270
    @rtype: tuple
    @return: (x, y) the coordniate tuple of the matched sub image center point. return None if sub image not found or any exception.
    '''
    for img in [subPath, srcPath]: assert os.path.exists(img) , "No such image:  %s" % (img)
    method = cv2.cv.CV_TM_SQDIFF_NORMED #Parameter specifying the comparison method 
    try:
        subImg = cv2.imread(subPath) #Load the sub image
        srcImg = cv2.imread(srcPath) #Load the src image
        result = cv2.matchTemplate(subImg, srcImg, method) #comparision
        minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result) #Get the minimum squared difference
        if minVal <= threshold: #Compared with the expected similarity
            minLocXPoint, minLocYPoint = minLoc
            subImgRow, subImgColumn = subImg.shape[:2]
            centerPoint = (minLocXPoint + int(subImgRow/2), minLocYPoint + int(subImgColumn/2))
            #if image is binary format shape return (w, h) else return (w, h, d)
            (height, width) = srcImg.shape[:2]

            return adaptRotation(coord=centerPoint, size=(height, width), rotation=rotation)
        else:
            return None    
    except Exception, e:
        return None
Esempio n. 25
0
 def normalizar(self, src):
     minn, maxx, dummy1, dummy2 = cv2.minMaxLoc(src)
     if maxx!=minn:
         dst = src/(maxx-minn) + minn/(minn-maxx)
     else:
         dst = src - minn
     return dst
Esempio n. 26
0
def compute_frame_likeness(path):
    file_list = os.listdir(path)
    file_list.sort()
    previous_image = cv2.imread(path + os.sep + file_list[0])
    current_label = file_list[0].partition('.')[0]

    output = ""
    likenesses = []

    # for each extracted frame: compute difference to previous frame
    for img_name in file_list[1:]:
        # frame might be first frame of next video
        if not img_name.partition('.')[0] == current_label:
                # output now holds all frame differences for this vid
            output += current_label + ";" + \
                ";".join(map(str, likenesses)) + "\r\n"
            print current_label + " done"
            current_label = img_name.partition('.')[0]
            likenesses = []

        # compute frame differences
        full_path = path + os.sep + img_name
        image = cv2.imread(full_path)
        try:
            result = cv2.matchTemplate(
                image, previous_image, cv2.TM_CCORR_NORMED)
        except cv2.error:
            continue

        previous_image = image
        minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
        likenesses.append(maxVal)

    return output
Esempio n. 27
0
def calcMatchTemplate(fimg1, fimg2, parMethod=cv2.TM_CCORR_NORMED):
    frm1=cv2.imread(fnfrm1, 0)
    frm2=cv2.imread(fnfrm2, 0)
    frm1=cv2.resize(frm1, (int(frm1.shape[1]/kdif), int(frm1.shape[0]/kdif)))
    frm2=cv2.resize(frm2, (int(frm2.shape[1]/kdif), int(frm2.shape[0]/kdif)))
    fsiz=np.array((frm1.shape[1], frm1.shape[0]))
    tsiz=np.floor(fsiz*ksiz)
    p0=np.floor((fsiz-tsiz)/2)
    frm2p=frm2[p0[1]:p0[1]+tsiz[1], p0[0]:p0[0]+tsiz[0]].copy()
    CC=cv2.matchTemplate(frm1, frm2p, cv2.TM_CCORR_NORMED)
    minVal,maxVal,minLoc,maxLoc = cv2.minMaxLoc(CC)
    dxy=p0-maxLoc
    # print maxVal
    # frm2_shift=np.roll(frm2, int(math.floor(-dxy[0])), 1)
    # frm2_shift=np.roll(frm2_shift, int(math.floor(-dxy[1])), 0)
    # tmp=np.zeros((frm1.shape[0], frm1.shape[1], 3), np.uint8)
    # tmp[:,:,2]=frm1
    # tmp[:,:,1]=frm2_shift
    # tmp[:,:,0]=0
    # cv2.imshow("win-frm1", frm1)
    # cv2.imshow("win-prt2", frm2p)
    # cv2.imshow("win-shift", tmp)
    # cv2.waitKey(0)
    # pl.imshow(CC)
    # pl.show()
    return dxy
Esempio n. 28
0
def verificar_url(url, template_path):
    """
    Verifica se a URL contem o template informado
    """
    nome_arquivo = url.replace('http:', '').replace('/', '') + '.png'
    template = cv2.imread(template_path, 0)
    imagem = screenshot_url(url)

    # Implementar no docopt primeiro
    # imagem_tresh = cv2.adaptiveThreshold(imagem, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,11,2)
    # template_tresh = cv2.adaptiveThreshold(template, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,11,2)
    
    
    resultado = cv2.matchTemplate(imagem, template, cv2.cv.CV_TM_SQDIFF_NORMED)
    mn, _, mnLoc, _ = cv2.minMaxLoc(resultado)
    # Se o minimo encontrado for menor que o minimo configurado
    # então nao foi encontrado.
    # Criando uma borda para identificar o template.
    MPx, MPy = mnLoc
    trows,tcols = template.shape[:2]
    cv2.rectangle(imagem, (MPx,MPy),(MPx+tcols,MPy+trows),(0,0,255),2)
    cv2.imwrite(nome_arquivo, imagem)
    if mn > 0.01:
        return False
    return mn
Esempio n. 29
0
def matchImage(image,template):
    height, width = template.shape
    match = ocv.matchTemplate(image,template,matching_method)
    min_val, max_val, min_loc, max_loc = ocv.minMaxLoc(match)
    top_left = max_loc
    bottom_right = (top_left[0] + width, top_left[1]+height)
    return [top_left, bottom_right, width, height]
Esempio n. 30
0
def crosses(inputfile):
    im = cv2.imread(inputfile)
    gray = _gray(im)

    # NOW DETECT CROSSES
    # code based on http://nbviewer.ipython.org/5861365

    score_threshold = 0.954  # certainty there IS a cross

    cross1 = cv2.imread(CROSS)

    cross_count = 0
    cross_data = {}

    if cross1.shape[0] < im.shape[0] and cross1.shape[1] < im.shape[1]:
        graycross1 = cv2.cvtColor(cross1, cv.CV_RGB2GRAY)
        match1 = cv2.matchTemplate(gray, graycross1, cv2.TM_CCORR_NORMED)
        min_score, max_score, (min_x, min_y), (max_x, max_y) = cv2.minMaxLoc(match1)

        if max_score >= score_threshold:
            # only testing 1 cross for now
            cross_count = 1
            corner_topL = (max_x, max_y)
            corner_botR = (corner_topL[0] + cross1.shape[1], corner_topL[1] + cross1.shape[0])
            cross_data = {"top_left": corner_topL, "bottom_right": corner_botR, "score": max_score}

    return {"count": cross_count, "data": cross_data}
Esempio n. 31
0
def pavlung(dirName, cla):
    #    cv2.namedWindow('image',cv2.WINDOW_NORMAL)
    print 'pav lung in:', dirName, 'class :', cla
    """ generate patches from lung"""
    lung_dir1 = os.path.join(dirName, lungmask)
    lung_dir = os.path.join(lung_dir1, lungmaskbmp)
    listlung = os.listdir(lung_dir)

    #    dblung_dir = os.path.join(dirName, dblung)
    dirdbname = os.path.join(dirName, bmpname)
    ldb = os.listdir(dirdbname)
    #    print ldb
    label = cla

    pxy = dimpavx * dimpavy * 255
    for n in listlung:
        print n
        tabp = np.zeros((dimtabx, dimtaby, 3), dtype='i')

        endnumslice = n.find('.bmp')
        posend = endnumslice
        #            print n
        while n.find('_', posend) == -1:
            posend -= 1
        debnumslice = posend + 1
        slicenumberl = int((n[debnumslice:endnumslice]))
        #            print slicenumberl
        for ld in ldb:
            endnumslice = ld.find('.bmp')
            posend = endnumslice
            while ld.find('-', posend) == -1:
                posend -= 1
            debnumslice = posend + 1
            slicenumbers = int((ld[debnumslice:endnumslice]))
            #                print slicenumbers, slicenumberl
            if slicenumbers == slicenumberl:
                #                    print slicenumbers
                filescan = os.path.join(dirdbname, ld)
                #                    print filescan
                break
#            print filescan
        lungfile = os.path.join(lung_dir, n)
        imglung = cv2.imread(lungfile, 1)
        #            print filescan

        img1 = cv2.imread(filescan, 1)

        img2 = cv2.medianBlur(imglung, 9)
        #            cv2.imshow('image',img2)
        #            cv2.waitKey(0)
        imgray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
        ret, thresh = cv2.threshold(imgray, 0, 255, 0)
        if label == 'nolung':
            thresh = cv2.bitwise_not(thresh)

#            cv2.imshow('image',thresh)
#            cv2.waitKey(0)
        atabf = np.nonzero(thresh)
        imagemax = cv2.countNonZero(imgray)

        if imagemax > 0:
            #            print thresh.size
            xmin = atabf[1].min()
            xmax = atabf[1].max()
            ymin = atabf[0].min()
            ymax = atabf[0].max()
        else:
            xmin = 0
            xmax = 20
            ymin = 0
            ymax = 20

        x = xmin
        nbp = 0
        while x <= xmax:
            y = ymin
            while y <= ymax:
                crop_img = thresh[y:y + dimpavy, x:x + dimpavx]
                #
                # convention img[y: y + h, x: x + w]
                #

                #                    cv2.imshow('image',crop_img)
                #                    cv2.waitKey(0)
                area = crop_img.sum()
                #
                targ = float(area) / pxy
                #                    print targ, area ,pxy
                if targ > thr:
                    #                        print targ, area ,pxy
                    crop_img_orig = img1[y:y + dimpavy, x:x + dimpavx]

                    imgray = cv2.cvtColor(crop_img_orig, cv2.COLOR_BGR2GRAY)
                    imagemax = cv2.countNonZero(imgray)
                    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(imgray)
                    #                        print imagemax

                    #                        print dimpavx*dimpavy*thblack, dimpavx*dimpavy
                    if imagemax > dimpavx * dimpavy * thblack and min_val != max_val:
                        #                            print imagemax,dimpavx*dimpavy/2
                        nbp += 1
                        nampa = f + '_' + str(slicenumbers) + '_' + str(
                            nbp) + '.' + typei
                        #                            print nampa
                        fw = os.path.join(patchpath, label)
                        fxloca = os.path.join(fw, loca)
                        fw1 = os.path.join(fxloca, nampa)
                        #                            print fw1
                        #                            ooo
                        scipy.misc.imsave(fw1, imgray)
                        if normiInternal:
                            normpatch = normi(imgray)
                        else:
                            normpatch = cv2.equalizeHist(imgray)
#                            min_val, max_val, min_loc,max_loc = cv2.minMaxLoc(imgray)
#                            print (min_val, max_val,min_loc, max_loc)
#                            normpatch = cv2.equalizeHist(imgray)
#normalize patches and put in patches_norm
                        fw = os.path.join(patchNormpath, label)
                        fxloca = os.path.join(fw, loca)
                        fw1 = os.path.join(fxloca, nampa)
                        cv2.imwrite(fw1, normpatch)

                        #                print('pavage',i,j)
                        i = 0
                        #we draw the rectange
                        col = classifc[label]
                        while i < dimpavx:
                            j = 0
                            while j < dimpavy:
                                if y + j < 512 and x + i < 512:
                                    tabp[y + j][x + i] = col
                                if i == 0 or i == dimpavx - 1:
                                    j += 1
                                else:
                                    j += dimpavy - 1
                            i += 1
                        #we cancel the source

                        thresh[y:y + dimpavy, x:x + dimpavx] = 0

                        y += dimpavy - 1


#                            cv2.imshow('image',imglung+tabp)
#                            cv2.waitKey(0)

                y += 1
            x += 1

        tabp = imglung + tabp

        mfl = open(jpegpath + '/' + f + '_' + str(slicenumbers) + '.txt', "w")
        mfl.write('#number of patches: ' + str(nbp) + '\n')
        mfl.close()
        scipy.misc.imsave(
            jpegpath + '/' + f + '_' + label + '_' + str(slicenumbers) +
            '.jpg', tabp)
Esempio n. 32
0
def register_image(color_img, temp_img, cv_type, manual, ecc_file_man,
                   ecc_file_in, img_out, path, ep_num):
    warp_matrix = []
    points_from_file = []
    if manual is True:
        # read in warp_matrix from manual selection
        ecc_file_man = os.path.join(path, ecc_file_man)

        file = open(ecc_file_man, "r")

        for item in file:
            points_from_file.append(float(item.strip()))

        file.close()
        # create CV_F32 (3, 3) or (2, 3)
        for i in range(0, len(points_from_file), 3):
            warp_matrix.append([
                points_from_file[i], points_from_file[i + 1],
                points_from_file[i + 2]
            ])

        warp_matrix = np.asarray(warp_matrix, dtype=np.float32)

    elif manual is False and ecc_file_in is not None:
        # if warp_matrix file specified read it in
        ecc_file_in = os.path.join(path, ecc_file_in)

        file = open(ecc_file_in, "r")

        for item in file:
            points_from_file.append(float(item.strip()))

        file.close()
        # create CV_F32 (3, 3) or (2, 3)
        for i in range(0, len(points_from_file), 3):
            warp_matrix.append([
                points_from_file[i], points_from_file[i + 1],
                points_from_file[i + 2]
            ])

        warp_matrix = np.asarray(warp_matrix, dtype=np.float32)
        # if warp_matrix dimensions do not match cv2 motion type create identity matrix to fit
        if Warp[cv_type].value is cv.MOTION_HOMOGRAPHY and warp_matrix.ndim < 3:
            warp_matrix = np.eye(3, 3, dtype=np.float32)
        elif Warp[
                cv_type].value is not cv.MOTION_HOMOGRAPHY and warp_matrix.ndim > 3:
            warp_matrix = np.eye(2, 3, dtype=np.float32)

    else:
        # create identity matrix for automatic registration
        if Warp[cv_type].value == cv.MOTION_HOMOGRAPHY:
            warp_matrix = np.eye(3, 3, dtype=np.float32)
        else:
            warp_matrix = np.eye(2, 3, dtype=np.float32)

    size = temp_img.shape

    iterations = 50

    criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, iterations,
                ep_num)

    im_aligned = np.zeros_like(color_img)

    template = cv.merge((temp_img, temp_img, temp_img))

    # since color image is getting aligned, you must align all channels
    for i in range(3):

        (_, warp_matrix) = cv.findTransformECC(template[:, :, i],
                                               color_img[:, :, i],
                                               warp_matrix,
                                               Warp[cv_type].value,
                                               criteria,
                                               inputMask=None,
                                               gaussFiltSize=5)

        if Warp[cv_type].value is cv.MOTION_HOMOGRAPHY:
            im_aligned[:, :, i] = cv.warpPerspective(
                color_img[:, :, i],
                warp_matrix, (size[1], size[0]),
                flags=cv.INTER_LINEAR + cv.WARP_INVERSE_MAP)
        else:
            im_aligned[:, :, i] = cv.warpAffine(
                color_img[:, :, i],
                warp_matrix, (size[1], size[0]),
                flags=cv.INTER_LINEAR + cv.WARP_INVERSE_MAP)

    gray_aligned = cv.cvtColor(im_aligned, cv.COLOR_BGR2GRAY)

    err_img = temp_img - gray_aligned

    abs_err = cv.absdiff(temp_img, gray_aligned)

    min_v, max_v, min_l, max_l = cv.minMaxLoc(err_img)

    abs_err = abs_err * (255 / max_v)

    cv.imshow("Warped Image", im_aligned)
    cv.imshow("Error Image", abs_err)
    img_out = os.path.join(path, img_out)
    cv.imwrite(img_out, im_aligned)
Esempio n. 33
0
def upperBody(im):
    if not os.path.isdir('model'):
        os.mkdir("model")
    # Load a Caffe Model
    protoFile = os.path.join(path,"model/pose_deploy_linevec_faster_4_stages.prototxt")
    weightsFile = os.path.join(path,"model/pose_iter_440000.caffemodel")

    # Specify number of points in the model
    nPoints = 18
    net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)

    # Read Image
    # global im
    # im = cv2.imread(filename)
    im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
    inWidth = im.shape[1]
    inHeight = im.shape[0]

    # Convert image to blob
    netInputSize = (368, 368)
    inpBlob = cv2.dnn.blobFromImage(im, 1.0 / 255, netInputSize, (0, 0, 0), swapRB=True, crop=False)
    net.setInput(inpBlob)

    # Run Inference (forward pass)
    output = net.forward()

    # X and Y Scale
    scaleX = float(inWidth) / output.shape[3]
    scaleY = float(inHeight) / output.shape[2]

    # Empty list to store the detected keypoints
    points = []

    # Confidence treshold
    threshold = 0.1

    for i in range(nPoints):
        # Obtain probability map
        probMap = output[0, i, :, :]
        # print(probMap)

        # Find global maxima of the probMap.
        minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)

        # Scale the point to fit on the original image
        x = scaleX * point[0]
        y = scaleY * point[1]

        if prob > threshold:
            # Add the point to the list if the probability is greater than the threshold
            points.append((int(x), int(y)))
        else:
            points.append(None)

    maxX, maxY = 0, 0
    minX, minY = list(points[0])[0], list(points[0])[1]
    # if points[8] or points[11]:# 检测到上半身关节
    #TODO
    if points[8] or points[11]:
        for i, p in enumerate(points):
            if p:
                p1 = list(p)
                if i == 9 or i == 10 or i == 12 or i == 13:# 剔除下半身
                    continue
                # 遍历所有关节获取最大值最小值
                if p1[0] > maxX:
                    maxX = p1[0]
                if p1[1] > maxY:
                    maxY = p1[1]
                if p1[0] < minX:
                    minX = p1[0]
        w = maxX - minX
    else:# 未检测到上半身关节
        h = inHeight
        w = inWidth
        return 0,0,w,h
    x,y,w,maxY = minX,minY,w,maxY
    return x,y,w,maxY
 def match(self, image):
     res = cv2.matchTemplate(self.image, np.array(image), cv2.TM_CCOEFF_NORMED)
     _, similarity, _, _ = cv2.minMaxLoc(res)
     return similarity > TEMPLATE_THRESHOLD
def call_detect():
    global finalImage
    global avgX
    global avgY
    global detected

    h, w = template.shape

    while True:
        #start = time.time()
        clone = image
        gray = cv2.cvtColor(clone, cv2.COLOR_BGR2GRAY)
        found = None
        # loop over the scales of the image
        for scale in np.linspace(2.4, 0.2, 10)[::-1]:
            # resize the image according to the scale, and keep track
            # of the ratio of the resizing
            resized = imutils.resize(gray, width=int(gray.shape[1] * scale))
            r = gray.shape[1] / float(resized.shape[1])
            # if the resized image is smaller than the template, then break
            # from the loop
            if resized.shape[0] < tH or resized.shape[1] < tW:
                break
            # detect edges in the resized, grayscale image and apply template
            # matching to find the template in the image

            edged = cv2.Canny(resized, 50, 200)
            result = cv2.matchTemplate(edged, template, cv2.TM_CCOEFF)
            (_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)

            # if we have found a new maximum correlation value, then ipdate
            # the bookkeeping variable
            if found is None or maxVal > found[0]:
                found = (maxVal, maxLoc, r)

                (startX, startY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))
                (endX, endY) = (int((maxLoc[0] + tW) * r), int((maxLoc[1] + tH) * r))

                resized = gray[startY:endY, startX:endX]

                resized = cv2.resize(resized, (w, h))

                if ssim(original, resized) > 0.2:
                    break

        # unpack the bookkeeping varaible and compute the (x, y) coordinates
        # of the bounding box based on the resized ratio

        (_, maxLoc, r) = found
        (startX, startY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))
        (endX, endY) = (int((maxLoc[0] + tW) * r), int((maxLoc[1] + tH) * r))

        resized = gray[startY:endY, startX:endX]

        resized = cv2.resize(resized, (w, h))
        if ssim(original, resized) > 0.2:
            # draw a bounding box around the detected result and display the image
            cv2.rectangle(clone, (startX, startY), (endX, endY), (0, 0, 255), 2)
            finalImage = clone
            avgX = (startX + endX) / 2
            avgY = (startY + endY) / 2
            detected = True
        else:
            detected = False
import cv2
import numpy as np

# cari gambar yang ingin dicari dari scene gambar yang disediakan
# dengan menggunakan matchTemplate
# caranya dengan cek satu2 dari sudut atas scene gambar smpai sudut bawah scene gambar
image = cv2.imread('WaldoBeach.jpg')
cv2.imshow('Where is waldo?', image)
cv2.waitKey(0)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

template = cv2.imread('waldo.jpg', 0)

result = cv2.matchTemplate(gray, template, cv2.TM_CCOEFF)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)

# creating bounding box
topLeft = maxLoc
bottomRight = (topLeft[0] + 50, topLeft[1] + 50)
cv2.rectangle(image, topLeft, bottomRight, (0, 0, 255), 5)

cv2.imshow('Where is waldo?', image)
cv2.waitKey(0)
        dft_filtered = cv2.mulSpectrums(dft_shifted, lp_filter, flags=0)

        # shift it back to original quaderant ordering

        dft = np.fft.fftshift(dft_filtered)

        # recover the original image via the inverse DFT

        filtered_img = cv2.dft(dft, flags=cv2.DFT_INVERSE)

        # normalized the filtered image into 0 -> 255 (8-bit grayscale) so we
        # can see the output

        min_val, max_val, min_loc, max_loc = \
            cv2.minMaxLoc(filtered_img[:, :, 0])
        filtered_img_normalized = filtered_img[:, :, 0] * (
            1.0 / (max_val - min_val)) + ((-min_val) / (max_val - min_val))
        filtered_img_normalized = np.uint8(filtered_img_normalized * 255)

        # calculate the magnitude spectrum and log transform + scale it for
        # visualization

        magnitude_spectrum = np.log(
            cv2.magnitude(dft_filtered[:, :, 0], dft_filtered[:, :, 1]))

        # create a 8-bit image to put the magnitude spectrum into

        magnitude_spectrum_normalized = np.zeros((nheight, nwidth, 1),
                                                 np.uint8)
 fimage = np.zeros(image.shape,np.float32)
 for r in xrange(image.shape[0]):
     for c in xrange(image.shape[1]):
         if (r+c)%2:
             fimage[r][c] = -1*image[r][c]
         else:
             fimage[r][c] = image[r][c]
 #第三和四步:补零和快速傅里叶变换
 fImagefft2 = fft2Image(fimage)
 #傅里叶谱
 amplitude = amplitudeSpectrum(fImagefft2)
 #傅里叶谱的灰度级显示
 spectrum = graySpectrum(amplitude)
 cv2.imshow("originalSpectrum",spectrum)
 #找到傅里叶谱最大值的位置
 minValue,maxValue,minLoc,maxLoc = cv2.minMaxLoc(amplitude)
 #低通傅里叶谱灰度级的显示窗口
 cv2.namedWindow("lpFilterSpectrum",1)
 def nothing(*arg):
     pass
 #调节低通滤波类型
 cv2.createTrackbar("lpType","lpFilterSpectrum",lpType,MAX_LPTYPE,nothing)
 #调节截断频率
 cv2.createTrackbar("radius","lpFilterSpectrum",radius,MAX_RADIUS,nothing)
 #低通滤波结果
 result = np.zeros(spectrum.shape,np.float32)
 while True:
     #得到当前的截断频率、低通滤波类型
     radius = cv2.getTrackbarPos("radius","lpFilterSpectrum")
     lpType = cv2.getTrackbarPos("lpType","lpFilterSpectrum")
     #第五步:构建低通滤波器
Esempio n. 39
0
def main():
    ctx = POINTER(uvc_context)()
    dev = POINTER(uvc_device)()
    devh = POINTER(uvc_device_handle)()
    ctrl = uvc_stream_ctrl()

    res = libuvc.uvc_init(byref(ctx), 0)

    if res < 0:
        print("uvc_init error")
        exit(1)

    try:
        res = libuvc.uvc_find_device(ctx, byref(dev), PT_USB_VID, PT_USB_PID,
                                     0)
        if res < 0:
            print("uvc_find_device error")
            exit(1)

        try:
            res = libuvc.uvc_open(dev, byref(devh))
            if res < 0:
                print("uvc_open error")
                exit(1)

            print("device opened!")
            #
            # print_device_info(devh)
            # print_device_formats(devh)

            frame_formats = uvc_get_frame_formats_by_guid(
                devh, VS_FMT_GUID_Y16)

            if len(frame_formats) == 0:
                print("device does not support Y16")
                exit(1)

            libuvc.uvc_get_stream_ctrl_format_size(
                devh, byref(ctrl), UVC_FRAME_FORMAT_Y16,
                frame_formats[0].wWidth, frame_formats[0].wHeight,
                int(1e7 / frame_formats[0].dwDefaultFrameInterval))

            res = libuvc.uvc_start_streaming(devh, byref(ctrl),
                                             PTR_PY_FRAME_CALLBACK, None, 0)

            if res < 0:
                print("uvc_start_streaming failed: {0}".format(res))
                exit(1)

            try:
                startTime = time.time()
                while True:
                    #try:
                    data = q.get(True, 500)
                    dateTime = datetime.datetime.now()

                    if data is not None:

                        dataKelvin = cv2.resize(data[:, :], (640, 480))
                        minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(
                            dataKelvin)
                        # img = raw_to_8bit(dataKelvin)
                        # displayTemperatureInCelcius(img, minVal, minLoc, (255, 0, 0))
                        # displayTemperatureInCelcius(img, maxVal, maxLoc, (0, 0, 255))
                        # cv2.imshow('MINTS Thermal', img)
                        # cv2.waitKey(1)

                        if ((time.time() - startTime) > 10):
                            startTime = time.time()
                            dataCelciusMultiplied = kelvinToCelcius(dataKelvin)
                            sensorDictionary = OrderedDict([
                                ("dateTime", str(dateTime)),
                                ("maxTemperature", ktoc(maxVal)),
                                ("minTemperature", ktoc(minVal)),
                                ("maxTempLocX", maxLoc[0]),
                                ("maxTempLocY", maxLoc[1]),
                                ("minTempLocX", minLoc[0]),
                                ("minTempLocY", minLoc[1])
                            ])
                            mSR.sensorFinisherSummaryOnly(
                                dateTime, "FLIR001", sensorDictionary)
                            # mSR.sensorFinisherThermal(dateTime,"FLIR001",dataCelciusMultiplied)
                            print(" ")
                            print(
                                "============== MINTS Thermal ==============")
                            print(" ")
                            print("Maximum Temperature Read:" +
                                  str(ktoc(maxVal)))
                            print("Maximum Temperature Location X:" +
                                  str(maxLoc[0]))
                            print("Maximum Temperature Location Y:" +
                                  str(maxLoc[1]))
                            print("Minimum Temperature Read:" +
                                  str(ktoc(minVal)))
                            print("Minimum Temperature Location X:" +
                                  str(minLoc[0]))
                            print("Minimum Temperature Location Y:" +
                                  str(minLoc[1]))
                            print(" ")
                            print(
                                "============== MINTS Thermal ==============")
                #
                #except:
                # time.sleep(10)
                # print("Thermal Loop Not Read")

            finally:
                libuvc.uvc_stop_streaming(devh)
            #
            # print("done")
        finally:
            libuvc.uvc_unref_device(dev)
    finally:
        libuvc.uvc_exit(ctx)
Esempio n. 40
0
method = cv2.TM_SQDIFF_NORMED

# Read the images from the file
large_image = cv2.imread('gui-test/screenshot.png')[745:1150, 75:1850]

champions = glob.glob('test/*.png')
champs = []

for champion in champions:
    small_image = cv2.imread(champion)

    result = cv2.matchTemplate(small_image, large_image, method)

    # We want the minimum squared difference
    mn, _, mnLoc, _ = cv2.minMaxLoc(result)
    MPx, _ = mnLoc
    slice_champ = champion[5:]
    name_champ = slice_champ.split("-")[0]
    champs.append((name_champ, mn, mnLoc))

team = sorted(champs, key=lambda x: x[1])[:10]
picks = sorted(team, key=lambda x: x[2])
#print(picks)
print([x[0] for x in picks])

# Display the original image
cv2.imshow('output', large_image)

# The image is only displayed if we call this
cv2.waitKey(0)
Esempio n. 41
0
        radius = cv2.getTrackbarPos("radius", windowName2)
        hp_filter = create_high_pass_filter(nwidth, nheight, radius)

        dft_filtered = cv2.mulSpectrums(dft_shifted, hp_filter, flags=0)

        # shift it back to original quaderant ordering

        dft = np.fft.fftshift(dft_filtered)

        # recover the original image via the inverse DFT

        filtered_img = cv2.dft(dft, flags=cv2.DFT_INVERSE)

        # normalized the filtered image into 0 -> 255 (8-bit grayscale) so we can see the output

        minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(filtered_img[:, :, 0])
        filtered_img_normalized = filtered_img[:, :, 0] * (
            1.0 / (maxVal - minVal)) + ((-minVal) / (maxVal - minVal))
        filtered_img_normalized = np.uint8(filtered_img_normalized * 255)

        # calculate the magnitude spectrum and log transform + scale it for visualization

        magnitude_spectrum = np.log(
            cv2.magnitude(dft_filtered[:, :, 0], dft_filtered[:, :, 1]))

        # create a 8-bit image to put the magnitude spectrum into

        magnitude_spectrum_normalized = np.zeros((nheight, nwidth, 1),
                                                 np.uint8)

        # normalized the magnitude spectrum into 0 -> 255 (8-bit grayscale) so we can see the output
def detect(frame):
    global state, cnt, count
    pub = rospy.Publisher('/kinect2/openpose', String, queue_size=10)
    
    try:
        frame.shape
    except:
        return
    else:
        if state == 0:
            state = 2
    
    if state == 2:
        #detect the watch
        kp1, des1 = sift.detectAndCompute(template,None)
        kp2, des2 = sift.detectAndCompute(frame,None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        search_params = dict(checks = 50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1,des2,k=2)

        good = []

        for m,n in matches:
            if m.distance < 0.7*n.distance:
                good.append(m)
        if len(good)>MIN_MATCH_COUNT:
            
            src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
            dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
            
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            matches_Mask = mask.ravel().tolist()
            h,w = template.shape

            pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
            dst = cv2.perspectiveTransform(pts,M)
            cv2.polylines(frame,[np.int32(dst)],True,0,2, cv2.LINE_AA)
            #publish detected the watch
            pub.publish('has watch')
            state = 3
        else:
            print( "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT))
            #publish not detected the watch
            matches_Mask = None
        draw_params = dict(matchColor=(0,255,0), 
                           singlePointColor=None,
                           matchesMask=matches_Mask, 
                           flags=2)
        frame = cv2.drawMatches(template,kp1,frame,kp2,good,None,**draw_params)
    else:
        frameWidth = frame.shape[1]
        frameHeight = frame.shape[0]
        
        net.setInput(cv2.dnn.blobFromImage(frame, 1.0, (368, 368), (127.5, 127.5, 127.5), swapRB=True, crop=False))
        out = net.forward()
        out = out[:, :19, :, :]  # MobileNet output [1, 57, -1, -1], we only need the first 19 elements

        assert(len(BODY_PARTS) == out.shape[1])

        points = []
        for i in range(len(BODY_PARTS)):
            # Slice heatmap of corresponging body's part.
            heatMap = out[0, i, :, :]

            # Originally, we try to find all the local maximums. To simplify a sample
            # we just find a global one. However only a single pose at the same time
            # could be detected this way.
            _, conf, _, point = cv2.minMaxLoc(heatMap)

            x = (frameWidth * point[0]) / out.shape[3]
            y = (frameHeight * point[1]) / out.shape[2]
            # Add a point if it's confidence is higher than threshold.
            points.append((int(x), int(y)) if conf > 0.2 else None)
        
        #falling down
        if state == 3:
            if points[1] is not None and points[8] is not None and points[11] is not None:
                #if points[8][1] - points[1][1] < frameHeight / 8 or points[11][1] - points[1][1] < frameHeight / 8 :
                if points[1] < frameHeight / 5 * 3:
                    print('detected the falling down\n')
                    pub.publish('fall down')
        
        #line each point
        for pair in POSE_PAIRS:
            partFrom = pair[0]
            partTo = pair[1]
            assert(partFrom in BODY_PARTS)
            assert(partTo in BODY_PARTS)

            idFrom = BODY_PARTS[partFrom]
            idTo = BODY_PARTS[partTo]

            if points[idFrom] and points[idTo]:
                cv2.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3)
                cv2.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360, (0, 0, 255), cv2.FILLED)
                cv2.ellipse(frame, points[idTo], (3, 3), 0, 0, 360, (0, 0, 255), cv2.FILLED)
        
        #show the time of processing a image
        t, _ = net.getPerfProfile()
        freq = cv2.getTickFrequency() / 1000
        cv2.putText(frame, '%.2fms' % (t / freq), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))

    count = 0
    #show the image
    cv2.imshow('OpenPose using Opencv2', frame)
    if cv2.waitKey(3) & 0xFF == ord('q'):
        print('quit')
Esempio n. 43
0
def find_clefs_from_reference(df_0, df_1, img):

    if (df_1['x'].max() - df_1['x'].min() < 75):
        df_2 = df_0.loc[df_0['x0'] < (
            (df_1['x'].max() + df_1['width'].max()) * 1.1)].copy()
        df_2 = df_2.loc[df_2['x0'] > df_1['x'].min() * 0.9]
        df_2 = df_2.loc[df_2['width'] > df_1['width'].min() * 0.5]
        df_2 = df_2.loc[df_2['height'] < df_1['height'].max() * 1.3]
        df_2 = df_2.loc[df_2['ratio'] > 1]
        #        df_2=df_2.loc[df_2['area']>df_1['area'].min()*0.5]
        df_2 = df_2.sort_values(by=['y0', 'area'], ascending=[True, False])
        df_2['crossover'] = df_2['y1'].shift().fillna(-1)
        df_2['crossover'] = df_2['crossover'].subtract(df_2['y0'])
        df_2['co_scaler'] = df_2['height'].shift().fillna(1)
        df_2['crossover'] = df_2['crossover'].divide(df_2['co_scaler'])
        df_2['numrow'] = df_2.index.tolist()
        df_2 = df_2.reset_index(drop=True)
        container_index = []
        for index in range(len(df_2.index.tolist()) - 1):
            if df_2.iloc[index:index +
                         1, :]['crossover'].values[0] < 0 and df_2.iloc[
                             index + 1:index +
                             2, :]['crossover'].values[0] > 0.75:
                container_index.append(index)
                container_index.append(index + 1)
        locations = [
            vari for vari in df_2.copy().index.tolist()
            if vari not in container_index
        ]
        df_3 = df_2.iloc[locations, :].copy()
        for index in range(int(len(container_index) / 2)):
            df_4 = df_2.iloc[(index * 2):((index * 2) +
                                          2), :].copy().sort_values(
                                              by=['x0'], ascending=True)
            df_3 = df_3.append(df_4.iloc[:1, :])
        df_3 = df_3.sort_values(by=['y0'], ascending=True)
        for index in range(len(df_3.index.tolist())):
            info = df_3.iloc[index:, :]
            w, h = int(info['width'].tolist()[0] * 0.25), int(
                info['height'].tolist()[0] * 0.25)
            if w % 2 != 1:
                w = w - 1
            if h % 2 != 1:
                h = h - 1

            img_dup = img.copy()
            img_dup_blr = cv2.GaussianBlur(img_dup, (w, h), 0)
            template = img[info['y0'].tolist()[0]:info['y1'].tolist()[0],
                           info['x0'].tolist()[0]:info['x1'].tolist()[0]]
            template_blr = cv2.GaussianBlur(template, (w, h), 0)

            cv2.imshow('template', template)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

            res = cv2.matchTemplate(img_dup_blr, template_blr,
                                    eval('cv2.TM_SQDIFF_NORMED'))
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            #
            resolution = 0.045
            #
            match_locations = np.where(res <= resolution)
            #            resolution=resolution-0.005
            df_5 = pd.DataFrame(data={
                'x': match_locations[1],
                'y': match_locations[0]
            })
            df_6 = df_5.copy()
            df_6['delta_x'] = df_6['x'].diff().shift(1).fillna(
                df_6['x'].diff().shift(-1))
            df_6['delta_y'] = df_6['y'].diff().shift(1).fillna(
                df_6['y'].diff().shift(-1))
            df_6 = df_6.loc[df_6['delta_y'] > 1]
            df_6 = df_6.append(df_5.iloc[0:1, :])
            print(df_6)
            #
            if len(df_6.index.tolist()) > 0:
                w, h = info['width'].tolist()[0], info['height'].tolist()[0]
                img_dup = img.copy()
                for index, row in df_6.iterrows():
                    cv2.rectangle(img_dup, (int(row['x']), int(row['y'])),
                                  (int(row['x'] + w), int(row['y'] + h)),
                                  [155, 155, 155], 2)


#
            cv2.imshow('thing', img_dup)
            cv2.waitKey(0)
            cv2.destroyAllWindows()
    else:
        container_change = []
        for index in range(len(df_1.index.tolist())):
            val_0, val_1 = df_1.iloc[index:(
                index + 2), :]['x'].max(), df_1.iloc[index:(index +
                                                            2), :]['x'].min()
            val_2 = val_0 - val_1
            if val_2 > 75:
                container_change.append(index)
        for index_0 in range(len(container_change) + 1):
            if index_0 == (len(container_change)):
                df_2 = df_0.loc[df_0['y0'] > df_1.iloc[
                    container_change[index_0 - 1]:, :]['y'].tolist()[0] *
                                0.98].copy()
                df_2 = df_2.loc[
                    df_2['x0'] > df_1.iloc[container_change[index_0 - 1] +
                                           1:, :]['x'].tolist()[0] * 0.98]
                df_2 = df_2.loc[
                    df_2['x0'] < df_1.iloc[container_change[index_0 - 1] +
                                           1:, :]['x'].tolist()[0] * 1.02]
            else:
                if index_0 == 0:
                    df_2 = df_0.loc[df_0['y0'] < df_1.iloc[
                        (container_change[index_0] + 1):, :]['y'].tolist()[0] *
                                    1.02].copy()
                else:
                    df_2 = df_0.loc[df_0['y0'] > df_1.iloc[
                        (container_change[index_0] - 1):, :]['y'].tolist()[0] *
                                    0.98].copy()
                    df_2 = df_2.loc[df_2['y0'] < df_1.iloc[
                        (container_change[index_0] + 1):, :]['y'].tolist()[0] *
                                    1.02]
                df_2 = df_2.loc[df_2['x0'] > df_1.iloc[
                    container_change[index_0]:, :]['x'].tolist()[0] * 0.98]
                df_2 = df_2.loc[df_2['x0'] < df_1.iloc[
                    container_change[index_0]:, :]['x'].tolist()[0] * 1.1]
            df_2 = df_0.loc[df_0['x0'] < (
                (df_1['x'].max() + df_1['width'].max()) * 1.1)].copy()
            df_2 = df_2.loc[df_2['x0'] > df_1['x'].min() * 0.9]
            df_2 = df_2.loc[df_2['width'] > df_1['width'].min() * 0.5]
            df_2 = df_2.loc[df_2['height'] < df_1['height'].max() * 1.3]
            df_2 = df_2.loc[df_2['ratio'] > 1]
            df_2 = df_2.sort_values(by=['y0', 'area'], ascending=[True, False])
            df_2['crossover'] = df_2['y1'].shift().fillna(-1)
            df_2['crossover'] = df_2['crossover'].subtract(df_2['y0'])
            df_2['co_scaler'] = df_2['height'].shift().fillna(1)
            df_2['crossover'] = df_2['crossover'].divide(df_2['co_scaler'])
            df_2['numrow'] = df_2.index.tolist()
            df_2 = df_2.reset_index(drop=True)
            container_df = []
            for index_1 in range(len(df_2.index.tolist())):
                info = df_2.iloc[index_1:, :]
                w, h = int(info['width'].tolist()[0] * 0.25), int(
                    info['height'].tolist()[0] * 0.25)
                if w % 2 != 1:
                    w = w - 1
                if h % 2 != 1:
                    h = h - 1

                img_dup = img.copy()
                img_dup_blr = cv2.GaussianBlur(img_dup, (w, h), 0)
                template = img[info['y0'].tolist()[0]:info['y1'].tolist()[0],
                               info['x0'].tolist()[0]:info['x1'].tolist()[0]]
                template_blr = cv2.GaussianBlur(template, (w, h), 0)

                #                cv2.imshow('template',template)
                #                cv2.waitKey(0)
                #                cv2.destroyAllWindows()

                res = cv2.matchTemplate(img_dup_blr, template_blr,
                                        eval('cv2.TM_SQDIFF_NORMED'))
                min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

                resolution = 0.045

                match_locations = np.where(res <= resolution)
                df_3 = pd.DataFrame(data={
                    'x': match_locations[1],
                    'y': match_locations[0]
                })
                df_4 = df_3.copy()
                df_4['delta_x'] = df_4['x'].diff().shift(1).fillna(
                    df_4['x'].diff().shift(-1))
                df_4['delta_y'] = df_4['y'].diff().shift(1).fillna(
                    df_4['y'].diff().shift(-1))
                df_4 = df_4.loc[df_4['delta_y'] > 1]
                df_4 = df_4.append(df_3.iloc[0:1, :].copy())
                df_4['height'] = [int(info['height'].tolist()[0])] * len(
                    df_4.index.tolist())
                df_4['width'] = [int(info['width'].tolist()[0])] * len(
                    df_4.index.tolist())
                df_4['index_0'] = [index_0] * len(df_4.index.tolist())
                df_4['index_1'] = [index_1] * len(df_4.index.tolist())
                container_df.append(df_4.copy())
            for index_1 in range(len(container_df)):
                if index_1 == 0:
                    df_5 = container_df[index_1].copy()
                else:
                    df_5 = df_5.append(container_df[index_1].copy())
            if index_0 == 0:
                df_6 = df_5.copy()
            else:
                df_6 = df_6.append(df_5.copy())
        df_6 = df_6.sort_values(by=['y'], ascending=[True])
        df_6['delta_y'] = df_6['y'].diff().shift(1).fillna(
            df_6['y'].diff().shift(-1))
        df_7 = df_6.loc[df_6['delta_y'] > 1]
        df_7 = df_7.append(df_6.iloc[:1, :])
        df_7 = df_7.sort_values(by=['y'], ascending=[True])
        df_7['delta_y'] = df_7['y'].diff().shift(1).fillna(
            df_7['y'].diff().shift(-1))
        df_7 = df_7.loc[df_7['x'] <= df_1['x'].max() * 1.02]
        df_7 = df_7.loc[df_7['x'] >= df_1['x'].min() * 0.98]
        df_7 = df_7.reset_index(drop=True)
        df_7['delta_y'] = df_7['y'].diff().shift(1).fillna(
            df_7['y'].diff().shift(-1))
        df_7['delta_x'] = df_7['x'].diff().shift(1).fillna(
            df_7['x'].diff().shift(-1))
        if len(df_7.index.tolist()) > 0:
            w, h = info['width'].tolist()[0], info['height'].tolist()[0]
            img_dup = img.copy()
            for index, row in df_7.iterrows():
                cv2.rectangle(img_dup, (int(row['x']), int(row['y'])),
                              (int(row['x'] + row['width']),
                               int(row['y'] + row['height'])), [155, 155, 155],
                              2)

        cv2.imshow('thing', img_dup)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    return df_7
print(w, h)
# print(template.shape)

# 6种对比方法
methods = [
    'cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
    'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED'
]
for meth in methods:
    img = img2.copy()
    method = eval(meth)

    # 使用模板在目标上匹配
    res = cv2.matchTemplate(img, template, method)
    # 返回灰度结果的最大最小值
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

    # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc

    bottom_right = (top_left[0] + w, top_left[1] + h)  # 右下角坐标
    cv2.rectangle(img, top_left, bottom_right, 255, 2)

    plt.subplot(131), plt.imshow(res, cmap='gray')
    plt.title('Match Result'), plt.xticks([]), plt.yticks([])
    plt.subplot(132), plt.imshow(img, cmap='gray')
    plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
    plt.subplot(133), plt.imshow(template, cmap='gray')
Esempio n. 45
0
def find_g_clefs(df, img):
    df_0 = df.loc[df['ratio'] > 2.5]
    df_0 = df_0.loc[df_0['ratio'] < 3.1]
    df_0 = df_0.loc[df_0['area'] < 25000]
    df_0 = df_0.loc[df_0['area'] > 3000]

    df_0 = df_0.sort_values(by=['area'], ascending=[False])

    df_0 = df_0.loc[df_0['pixel_mean_q0'] > df_0['pixel_mean_q1']]
    df_0 = df_0.loc[df_0['pixel_mean_q0'] > df_0['pixel_mean_q2']]
    df_0 = df_0.loc[df_0['pixel_mean_q0'] > df_0['pixel_mean_q3']]

    df_0 = df_0.loc[df_0['x0'] < df_0['x0'].min() * 2]
    if df_0['x0'].max() != df_0['x0'].min():
        df_0 = df_0.loc[df_0['x0'] < df_0['x0'].max()]

    count = len(df_0.index.tolist())
    if count > 6:
        count = 6

    for index_0 in range(count):
        info = df_0.iloc[index_0:, :]
        w, h = int(info['width'].tolist()[0] * 0.25), int(
            info['height'].tolist()[0] * 0.25)
        if w % 2 != 1:
            w = w - 1
        if h % 2 != 1:
            h = h - 1

        img_dup = img.copy()
        img_dup_blr = cv2.GaussianBlur(img_dup, (w, h), 0)
        template = img[info['y0'].tolist()[0]:info['y1'].tolist()[0],
                       info['x0'].tolist()[0]:info['x1'].tolist()[0]]
        template_blr = cv2.GaussianBlur(template, (w, h), 0)

        cv2.imshow('template', template)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

        res = cv2.matchTemplate(img_dup_blr, template_blr,
                                eval('cv2.TM_SQDIFF_NORMED'))
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

        resolution = 0.035
        difference_boo = False

        for index_1 in range(7):
            match_locations = np.where(res <= resolution)
            resolution = resolution - 0.005
            df_1 = pd.DataFrame(data={
                'x': match_locations[1],
                'y': match_locations[0]
            })
            if len(df_1.index.tolist()) > 0 and len(
                    df_1.index.tolist()) < 5000:
                df_2 = df_1.copy()
                df_3 = df_1.copy()
                df_2['delta_x'] = df_2['x'].diff().shift(1).fillna(
                    df_2['x'].diff().shift(-1))
                df_2['delta_y'] = df_2['y'].diff().shift(1).fillna(
                    df_2['y'].diff().shift(-1))
                df_2 = df_2.loc[df_2['delta_y'] > 1]
                df_2 = df_2.append(df_1.iloc[0:1, :])
                df_2['delta_y'] = df_2['y'].diff().shift(1).fillna(
                    df_2['y'].diff().shift(-1))
                df_2 = df_2.loc[df_2['delta_y'] > 100]

                df_2 = df_2.append(
                    pd.DataFrame(data={
                        'x': [0, 0],
                        'y': [0, img.shape[:2][0]]
                    }))
                df_2 = df_2.sort_values(by=['y'], ascending=[True])
                index_container = []
                difference_boo = True

                for index_2 in range(len(df_2.index.tolist()) - 1):
                    temp_0 = df_3.loc[df_3['y'] >= df_2.iloc[index_2:, :]
                                      ['y'].tolist()[0]].copy().index.tolist()
                    temp_1 = df_3.loc[df_3['y'] < df_2.iloc[(index_2 + 1):, :]
                                      ['y'].tolist()[0]].copy().index.tolist()
                    index_container.append(
                        [vari for vari in temp_0 if vari in temp_1])

                for instance in index_container:
                    if len(instance) > 0 and difference_boo == True:
                        df_4 = df_1.iloc[instance[0]:instance[-1], :].copy()
                        df_4['delta_x'] = df_4['x'].diff().shift(1).fillna(
                            df_4['x'].diff().shift(-1))
                        df_4['delta_y'] = df_4['y'].diff().shift(1).fillna(
                            df_4['y'].diff().shift(-1))
                        df_4 = df_4.loc[df_4['delta_y'] > 0]
                        if df_4['x'].max() - df_4['x'].min(
                        ) > 35:  # and df_4['y'].max()-df_4['y'].min()>35:
                            print('difference_boo is false')
                            difference_boo = False
            else:
                break
            if len(df_1.index.tolist()) < 1 or difference_boo == True:
                break


#                resolution=resolution-0.005
#                df_2=df_1.copy()
#                df_2['delta_x']=df_2['x'].diff().shift(1).fillna(df_2['x'].diff().shift(-1))
#                df_2['delta_y']=df_2['y'].diff().shift(1).fillna(df_2['y'].diff().shift(-1))
#            df_2=df_2.loc[df_2['delta_y']>1]
#            df_2=df_2.append(df_1.iloc[0:1,:])
#            if len(df_2.index.tolist())>1:
#                break
#        else:
#            break
        df_2 = df_1.copy()
        df_2['delta_x'] = df_2['x'].diff().shift(1).fillna(
            df_2['x'].diff().shift(-1))
        df_2['delta_y'] = df_2['y'].diff().shift(1).fillna(
            df_2['y'].diff().shift(-1))
        df_2 = df_2.loc[df_2['delta_y'] > 1]
        df_2 = df_2.append(df_1.iloc[0:1, :])
        df_2 = df_2.sort_values(by=['y'], ascending=[True])
        if len(df_2.index.tolist()) > 1 and difference_boo == True:
            break
    if len(df_2.index.tolist()) > 0:
        w, h = info['width'].tolist()[0], info['height'].tolist()[0]
        img_dup = img.copy()
        for index, row in df_2.iterrows():
            cv2.rectangle(img_dup, (int(row['x']), int(row['y'])),
                          (int(row['x'] + w), int(row['y'] + h)),
                          [155, 155, 155], 2)

        cv2.imshow('thing', img_dup)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    df_2['kind'] = ['g_clef'] * len(df_2.index.tolist())
    df_2['width'] = [w] * len(df_2.index.tolist())
    df_2['height'] = [h] * len(df_2.index.tolist())
    df_2['area'] = df_2['width'].multiply(df_2['height'])

    return df_2
Esempio n. 46
0
import time
import picamera
import picamera.array
import cv2

radius = 5

myCar = car.Car()

with picamera.PiCamera() as camera:
    camera.start_preview()
    time.sleep(2)
    while True:
	with picamera.array.PiRGBArray(camera) as stream:
		camera.capture(stream, format='bgr')
        	# At this point the image is available as stream.array
        	image = stream.array
        	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        	#blur = cv2.GaussianBlur(gray, (radius, radius), 0)
        	(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(gray)
        	print(maxLoc[0])
		if maxLoc[0] < 200:
			myCar.right() 
		elif maxLoc[0] > 520:
			myCar.left()
		time.sleep(0.1)
		myCar.forward()
		time.sleep(0.1)
		myCar.stop()
		myCar.center()
    hull_area = cv2.contourArea(hull)
    aspect_ratio = float(w) / h  # aspect_ratio = width / height
    extend = float(area) / rect_area  # extend = area(contour) / area(rect)
    solidity = float(area) / hull_area  # solidity = area(contour)/ area(hull)
    equi_diameter = np.sqrt(4 * area / np.pi)  # equi_diameter

    # #(x,y), (MA, ma), angle = cv2.fitEllipse(contour)
    # cv2.drawContours(mask, [contour], 0, (255,0,0), -1)
    # pixelpoints = np.transpose(np.nonzero(mask))

    # Extreme points
    leftmost = tuple(contour[contour[:, :, 0].argmin()][0])
    rightmost = tuple(contour[contour[:, :, 0].argmax()][0])
    topmost = tuple(contour[contour[:, :, 1].argmin()][0])
    bottommost = tuple(contour[contour[:, :, 1].argmax()][0])
    cv2.circle(img, leftmost, 8, (255, 127, 0), -1)
    cv2.circle(img, rightmost, 8, (255, 127, 0), -1)
    cv2.circle(img, topmost, 8, (255, 127, 0), -1)
    cv2.circle(img, bottommost, 8, (255, 127, 0), -1)

#  Maximum Value, Minimum Value and their locations
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(imgray, mask=mask)
cv2.circle(img, min_loc, 9, (0, 255, 255), -1)
cv2.circle(img, max_loc, 9, (0, 255, 255), -1)

# Mean Color (Mean Intensity)
min_valc = cv2.mean(img, mask=mask)

cv2.imshow('original', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Esempio n. 48
0
if len(sys.argv) != 3:
    print("Specify image path and template path")
    quit()

img_path = sys.argv[1]
templ_path = sys.argv[2]

img = cv2.imread(img_path, 0)
img2 = img.copy()
img3 = img.copy()
img4 = img.copy()
template = cv2.imread(templ_path, 0)
w, h = template.shape[::-1]

res = cv2.matchTemplate(img, template, cv2.TM_SQDIFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

my_res = ssd(img2, template)
my_min_val, my_max_val, my_min_loc, my_max_loc = cv2.minMaxLoc(my_res)

res_ncc = ncc(img3, template)
min_val_ncc, max_val_ncc, min_loc_ncc, max_loc_ncc = cv2.minMaxLoc(res_ncc)

res_sad = ncc(img4, template)
min_val_sad, max_val_sad, min_loc_sad, max_loc_sad = cv2.minMaxLoc(res_sad)

print(my_res.shape)
print(res.shape)
print(res)
print(my_res)
print(min_val, max_val, min_loc, max_loc)
def convert_tiff_to_grayscale(tiff_img):
    min_intensity, max_intensity, _, _ = cv2.minMaxLoc(tiff_img)
    img_scaled = (tiff_img - min_intensity) * (255.0 /
                                               (max_intensity - min_intensity))
    return img_scaled.astype(np.uint8)
Esempio n. 50
0
    def find_in_scaling_range(cls,
                              image,
                              similarity=DEFAULT_SIMILARITY,
                              lowerEnd=0.8,
                              upperEnd=1.2):
        """Finds the location of the image on the screen. First the image is searched at its default scale,
        and if it isn't found, it will be resized using values inside the range provided until a match that satisfy
        the similarity value is found. If the image isn't found even after it has been resized, the method returns None.

        Args:
            image (string): Name of the image.
            similarity (float, optional): Defaults to DEFAULT_SIMILARITY.
                Percentage in similarity that the image should at least match
            lowerEnd (float, optional): Defaults to 0.8.
                Lowest scaling factor used for resizing.
            upperEnd (float, optional): Defaults to 1.2.
                Highest scaling factor used for resizing.

        Returns:
            Region: Coordinates or where the image appears.
        """
        template = cv2.imread('assets/{}/{}.png'.format(cls.assets, image), 0)
        # first try with default size
        width, height = template.shape[::-1]
        match = cv2.matchTemplate(screen, template, cv2.TM_CCOEFF_NORMED)
        value, location = cv2.minMaxLoc(match)[1], cv2.minMaxLoc(match)[3]
        if (value >= similarity):
            return Region(location[0], location[1], width, height)

        # resize and match using threads

        # change scaling factor if the boss icon searched is small
        # (some events has as boss fleet a shipgirl with a small boss icon at her bottom right)
        if cls.small_boss_icon and image == 'enemy/fleet_boss':
            lowerEnd = 0.4
            upperEnd = 0.6

        # preparing interpolation methods
        middle_range = (upperEnd + lowerEnd) / 2.0
        if lowerEnd < 1 and upperEnd > 1 and middle_range == 1:
            l_interpolation = cv2.INTER_AREA
            u_interpolation = cv2.INTER_CUBIC
        elif upperEnd < 1 and lowerEnd < upperEnd:
            l_interpolation = cv2.INTER_AREA
            u_interpolation = cv2.INTER_AREA
        elif lowerEnd > 1 and upperEnd > lowerEnd:
            l_interpolation = cv2.INTER_CUBIC
            u_interpolation = cv2.INTER_CUBIC
        else:
            l_interpolation = cv2.INTER_NEAREST
            u_interpolation = cv2.INTER_NEAREST

        results_list = []
        regions_detected = []
        count = 0
        loop_limiter = (middle_range - lowerEnd) * 100

        # creating and launching worker processes
        pool = ThreadPool(processes=4)

        while (upperEnd > lowerEnd) and (count < loop_limiter):
            l_result = pool.apply_async(
                cls.resize_and_match,
                (template, lowerEnd, similarity, l_interpolation))
            u_result = pool.apply_async(
                cls.resize_and_match,
                (template, upperEnd, similarity, u_interpolation))
            cls.script_sleep(0.01)
            lowerEnd += 0.02
            upperEnd -= 0.02
            count += 1
            results_list.append(l_result)
            results_list.append(u_result)

        # closing pool and waiting for results
        pool.close()
        pool.join()

        # extract regions from async_result
        for i in range(0, len(results_list)):
            if results_list[i].get() is not None:
                regions_detected.append(results_list[i].get())

        if (len(regions_detected) > 0):
            return regions_detected[0]
        else:
            return None
Esempio n. 51
0
def found_rectlogo(image, file_name, alpha):
    #    path="./logo-origin/"
    #    path="./static/img/croped_img/"
    path = "./static/img/croped_img_200/"

    #cv2.imshow("Template", template)
    count = 0
    # loop over the images to find the template in
    found = None
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    #    rec_number=0
    listdir = os.listdir(path)
    print(listdir)
    #    bar = Bar('Processing', max=len(listdir))
    for name in listdir:
        template_path = path + name
        #    	print(template_path)
        #    	template_source = cv2.imread(template_path)
        im1 = Image.open(template_path)
        im2 = im1.rotate(90, expand=1)

        #    	template_source = cv2.imread(template_path)
        template = np.array(im1)
        template90 = np.array(im2)
        template_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
        #    	template_source = Image.open(template_path)
        #    	print(template.shape,'---',template_path)
        #    	template = cv2.cvtColor(template_gray, cv2.COLOR_BGR2GRAY)
        template = cv2.Canny(template_gray, 50, 200)
        #    	imgplot = plt.imshow(template)

        (tH, tW) = template.shape[:2]

        (h, w) = (tH, tW)
        center = (w / 2, h / 2)

        #    	center = (w / 2, h / 2)
        angle90 = 270
        scale = 1.0
        template_gray90 = cv2.cvtColor(template90, cv2.COLOR_BGR2GRAY)

        #    	M = cv2.getRotationMatrix2D(center, angle90, scale)
        #    	template90 = cv2.warpAffine(template_gray, M, (h, w))
        #    	img2=img2.rotate(90, expand=1)
        (tH90, tW90) = template90.shape[:2]

        template90 = cv2.Canny(template_gray90, 50, 200)
        #    	imgplot = plt.imshow(template90)

        for scale in np.linspace(0.2, 1.0, 20)[::-1]:
            print('scale  ', scale)
            # resize the image according to the scale, and keep track
            # of the ratio of the resizing.
            resized = imutils.resize(gray, width=int(gray.shape[1] * scale))

            r = gray.shape[1] / float(resized.shape[1])
            #    		bar1.next()

            # if the resized image is smaller than the template, then break
            # from the loop
            if resized.shape[0] < tH or resized.shape[1] < tW:
                pass
            else:
                rotate = 0

                # detect edges in the resized, grayscale image and apply template
                # matching to find the template in the image
                edged = cv2.Canny(resized, 50, 200)
                result = cv2.matchTemplate(edged, template, cv2.TM_CCOEFF)
                (_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)

                if found is None or maxVal > found[0]:
                    found = (maxVal, maxLoc, r, tH, tW, name, rotate)

################################################################################
#
################################################################################

#    		for scale in np.linspace(0.2, 1.0,20)[::-1]:

# resize the image according to the scale, and keep track
# of the ratio of the resizing

#    		resized = imutils.resize(gray, width = int(gray.shape[1] * scale))
#    		r = gray.shape[1] / float(resized.shape[1])
#    		bar1.next()

# if the resized image is smaller than the template, then break
# from the loop
            if resized.shape[0] < tH90 or resized.shape[1] < tW90:
                continue
            # detect edges in the resized, grayscale image and apply template
            # matching to find the template in the image
#    		edged = cv2.Canny(resized, 50, 200)
            result = cv2.matchTemplate(edged, template90, cv2.TM_CCOEFF)
            (_, maxVal90, _, maxLoc90) = cv2.minMaxLoc(result)
            #    		print(maxVal)

            if maxVal > found[0]:
                rotate = 1
                found = (maxVal90, maxLoc90, r, tH90, tW90, name, rotate)
#    			print(maxVal)
#################################################################################
#
################################################################################
#    	if found is None or maxVal > found[0]:
#    			found = (maxVal, maxLoc, r,tH, tW,name,rotate)
# unpack the bookkeeping varaible and compute the (x, y) coordinates
# of the bounding box based on the resized ratio
    (_, maxLoc, r, tH, tW, name, rotate) = found
    (startX, startY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))
    (endX, endY) = (int((maxLoc[0] + tW) * r), int((maxLoc[1] + tH) * r))
    print('max==', maxVal)
    # draw a bounding box around the detected result and display the image
    rect = image[startY:endY, startX:endX]
    #    imgplot = plt.imshow(rect)
    #    cv2.imwrite('./rect0/{}'.format(name),rect)

    cv2.rectangle(image, (startX, startY), (endX, endY), (0, 0, 255), 2)
    rect = image[startY:endY, startX:endX]
    rect_temp = cv2.imread('./static/img/check_convert_300/' + file_name)
    x0 = startX * alpha
    x1 = endX * alpha
    y0 = startY * alpha
    y1 = endY * alpha
    rect0 = rect_temp[y0:y1, x0:x1]
    if rotate == 1:
        im_pil0 = Image.fromarray(rect0)
        im_pil0 = im_pil0.rotate(90, expand=1)
        rect0 = np.asarray(im_pil0)


# For reversing the operation:

    count = count + 1
    #    cv2.waitKey(0)
    #    return  image,rect,rect0,name
    return rect0, name
    def imageDepthCallback(self, data):
        try:
            # we select bgr8 because its the OpenCv encoing by default
            cv_image = self.bridge.imgmsg_to_cv2(data, data.encoding)

            # cal forward depth
            def center_depth():
                pix = (data.width / 2, data.height / 2)
                l = [
                    cv_image[x, y] for x in range(26, 32)
                    for y in range(37, 41)
                ]
                center_depth = sum(l) / len(l)
                #                print(cv_image)
                #                print(data);
                #		print('Depth at center %d' % center_depth)
                return center_depth

            self.centerDepth = center_depth()  # print center depth
            if self.state == 0 and self.centerDepth < 7500 and self.waitInterval > 40:
                self.msg.vel = -0.08
                self.msg.angle = -0.78
                self.waitInterval = 0
                self.turnOne = True
                print(self.state)
            elif self.turnOne and self.centerDepth > 7500:
                self.state = 1
                self.turnOne = False

            if self.state == 1 and self.centerDepth < 6500 and self.waitInterval > 40:
                self.msg.vel = -0.08
                self.msg.angle = -0.78
                self.waitInterval = 0
                self.turnTwo = True
                print(self.state)
            elif self.turnTwo and self.centerDepth > 7500:
                self.state = 2
                self.turnTwo = False

            if not self.turnOne and not self.turnTwo:
                self.waitInterval += 1
                minVal, maxVal, _, _ = cv2.minMaxLoc(cv_image)
                #print(maxVal)

                #maxVal = 6959.0
                cv_image = cv2.convertScaleAbs(cv_image,
                                               alpha=(255 / (maxVal - 100)))
                blurred = cv2.GaussianBlur(cv_image, (31, 31), 0)
                if self.state == 0:
                    ret, thresh = cv2.threshold(
                        blurred, self.thresh1, 255,
                        0)  # hallway: 1:100 , hallway 2:
                elif self.state == 1:
                    ret, thresh = cv2.threshold(blurred, self.thresh2, 255, 0)
                elif self.state == 2:
                    ret, thresh = cv2.threshold(blurred, self.thresh1, 255, 0)

# find contour
                cnts = cv2.findContours(thresh, cv2.RETR_LIST,
                                        cv2.CHAIN_APPROX_SIMPLE)[-2]

                width, height = cv_image.shape
                centroid = (int(width / 2), int(height / 2))
                best_cX = 0
                for c in cnts:
                    #print('within contour for loop')
                    #area = cv2.contourArea(c)

                    #if not AREA/100 <area <AREA/20:
                    #    continue

                    #compute the center of the contour
                    M = cv2.moments(c)
                    cX = int(M['m10'] / M['m00'])
                    cY = int(M['m01'] / M['m00'])

                    if cX > best_cX:
                        best_cX = cX
                        centroid = (cX, cY)
                        # draw the contour and center of the shape on teh image

                    mask = np.zeros(thresh.shape, 'uint8')

                    cv2.drawContours(mask, [c], -1, 255, 2)
                    x, y, w, h = cv2.boundingRect(c)
                    mask = cv2.rectangle(mask, (x, y), (x + w, y + h), 255, 2)
                    cv2.circle(mask, (cX, cY), 2, 255, -1)

                def calculate_heading(centroid):
                    # calculate heading
                    width, height = cv_image.shape

                    camera_mid_offset_percent = 0.00  # 0.0 means car pointing to center, -0.03: car is centered to left, +0.03 means car pointing to right
                    mid = int((width / 2) * (1 + camera_mid_offset_percent))

                    # calculate centroid offset from mid
                    x_offset = centroid[0] - mid
                    y_offset = height - centroid[1]

                    angle_to_mid_radian = math.atan(x_offset / float(y_offset))

                    #print('angle heading: %s' % (np.rad2deg(angle_to_mid_radian)))

                    # cv2.line(cv_image, (mid , height), (centroid[0], centroid[1]), (0,255,0), 2)

                    return max(
                        -0.78,
                        min(0.78, angle_to_mid_radian -
                            (angle_to_mid_radian % 0.05)))

                def calculate_speed(speed):
                    return speed

                heading = calculate_heading(centroid)
                #                speed = calculate_speed(-0.2)
                '''
		if self.start_delay < 20:
			self.msg.angle = 0.0
			self.msg.vel = -0.22
		else:
			self.msg.angle = -heading
                	self.msg.vel = -0.25#speed
                '''
                self.msg.angle = -heading
                self.msg.vel = -0.1

                if self.centerDepth < 9000:
                    self.msg.vel = -0.1

        except CvBridgeError as e:
            print(e)

        except ZeroDivisionError as e:
            print(e)

        self.pub.publish(self.msg)
        cv2.imshow("original image", cv_image)
        cv2.waitKey(1)
Esempio n. 53
0
                                              cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                              cv2.THRESH_BINARY_INV, 11, 2)

            #imgThresh = cv2.morphologyEx(imgThresh, cv2.MORPH_OPEN, kernel)
            #testing
            roi = cv2.resize(imgThresh, (250, 100))
            cv2.imshow('roi', roi)

            scores = []
            groupOutput = []

            for (templateCount, templateROI) in template.items():
                result = cv2.matchTemplate(roi, templateROI,
                                           cv2.TM_CCOEFF_NORMED)
                #print(result)
                (_, score, _, _) = cv2.minMaxLoc(result)
                print(score)
                scores.append(score)

            arrayOfResults.append(str(np.argmax(scores)))
            groupOutput.append(str(np.argmax(scores)))

            #return the most frequent of 10 results
            if len(arrayOfResults) == 10:
                counts = np.bincount(arrayOfResults)
                string = str(np.argmax(counts))
                #only show if it's above the acceptable threshold
                if max(scores) > acceptedThreshold:
                    cv2.putText(img, "Type " + "".join(string), (x, y),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                else:
Esempio n. 54
0
def playground(imagePath):

    global prevImage

    #segmentation in HSV space:
    logger.info("%s", imagePath)
    image = cv2.imread(imagePath)
    height, width, channels = image.shape

    #create a mask to remove some of the image which is irrelevant
    mask = np.zeros((height, width, channels), np.uint8)

    mask[0:0.66 * height, :] = (255, 255, 255)
    #mask[0:1*height,:] = (255,255,255)

    #cv2.imshow("mask", blank_image)

    img = cv2.cvtColor(cv2.bitwise_and(mask, image), cv2.COLOR_BGR2HSV)
    #_, thresh = cv2.threshold(img[:,:,1], 45, 255, cv2.THRESH_BINARY)
    #_, thresh2 = cv2.threshold(img[:,:,0], 65, 255, cv2.THRESH_BINARY)
    #        thresh2 = cv2.adaptiveThreshold(img[:,:,1],255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,51,-5)

    #first scale up the values
    minH, maxH, _, _ = cv2.minMaxLoc(img[:, :, 0])
    minS, maxS, _, _ = cv2.minMaxLoc(img[:, :, 1])

    Hchannel = (img[:, :, 0] * 255.0 / float(maxH)).astype(np.uint8)
    Schannel = (img[:, :, 1] * 255.0 / float(maxS)).astype(np.uint8)
    cv2.imshow("H", Hchannel)
    cv2.imshow("S", Schannel)
    cv2.imshow("V", image)

    #if prevImage is not None:
    #    img = cv2.absdiff(image, prevImage)

    #for the fuzzy type AND do MIN. Fuzzy type OR do MAX

    minImage = cv2.min(Hchannel, Schannel)
    minI, maxI, _, _ = cv2.minMaxLoc(minImage)
    _, minImg = cv2.threshold(
        cv2.blur((minImage[:, :] * 255.0 / float(maxI)).astype(np.uint8),
                 (10, 10)), 110, 255, cv2.THRESH_BINARY)

    #cv2.imshow("TEST", thresh2)

    segmentedImage = cv2.bitwise_and(img[:, :, 2], minImg)
    cv2.imshow("AND", segmentedImage)  #,cv2.bitwise_and(thresh, thresh2)))

    #do the canny
    edgeSegment = cv2.bitwise_and(cv2.Canny(minImg, 100, 50),
                                  mask[:, :, 0])  #minImg)
    cv2.imshow("EDGE", edgeSegment)

    #do the circle
    #circles = cv2.HoughCircles(edgeSegment, cv2.cv.CV_HOUGH_GRADIENT, 2, 10, None, 10, 35, 7, 35)
    circles = CV_.HoughCirclesWithDefaultGradient(edgeSegment, 2, 10, None, 10,
                                                  35, 7, 35)
    # circles = cv2.HoughCircles(edgeSegment, cv2.HOUGH_GRADIENT, 2, 10, None, 20, 35, 7, 35)

    if circles is not None and circles.shape > 0:
        circ = np.round(circles[0, :]).astype("int")
        for (x, y, r) in circ:
            cv2.circle(image, (x, y), r, (0, 0, 255), 1)

    centre = findPupilFromCircles(circles)

    cv2.circle(image, centre, 3, 255)

    cv2.imshow("circles", image)

    prevImage = image

    cv2.waitKey()

    return centre
Esempio n. 55
0
 def _FindCoordsByTmpl(source, tmpl):
     result = cv2.matchTemplate(source, tmpl, cv2.TM_CCOEFF_NORMED)
     loc = cv2.minMaxLoc(result)
     x = loc[3][0] + tmpl.shape[0] // 2
     y = loc[3][1] + tmpl.shape[1] // 2
     return int(x), int(y)
Esempio n. 56
0
def analyze_ub_frame(frame, roi, time_min, time_10sec, time_sec, ub_data,
                     ub_data_value, characters_find):
    """analyze frame to find ub name

    analyze ub name roi and find best match character
    if 5 characters found, then search only these 5.

    Args
        frame (ndarray): edited frame from movie
        roi (list): search roi
        time_min (string): minute value
        time_10sec (string): 10 sec value
        time_sec (string): 1 sec value
        ub_data (list): ub name data
        ub_data_value (list): founded ub data
        characters_find (list): founded characters

    Returns
        ub_result (string): ub FOUND or NOT_FOUND


    """
    analyze_frame = frame[roi[1]:roi[3], roi[0]:roi[2]]

    characters_num = len(CHARACTERS)
    ub_result = NOT_FOUND
    tmp_character = [False, 0]
    tmp_value = UB_THRESH

    if len(characters_find) < 5:
        # all characters search
        for j in range(characters_num):
            result_temp = cv2.matchTemplate(analyze_frame, CHARACTERS_DATA[j],
                                            cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result_temp)
            if max_val > tmp_value:
                # better match character found
                tmp_character = [CHARACTERS[j], j]
                tmp_value = max_val
                ub_result = FOUND

        if ub_result is FOUND:
            ub_data.append(time_min + ":" + time_10sec + time_sec + " " +
                           tmp_character[0])
            ub_data_value.extend([[
                int(int(time_min) * 60 + int(time_10sec) * 10 + int(time_sec)),
                tmp_character[1]
            ]])
            if tmp_character[1] not in characters_find:
                characters_find.append(tmp_character[1])
    else:
        for j in range(5):
            # 5 characters search
            result_temp = cv2.matchTemplate(
                analyze_frame, CHARACTERS_DATA[characters_find[j]],
                cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result_temp)
            if max_val > tmp_value:
                # better match character found, update
                tmp_character = [
                    CHARACTERS[characters_find[j]], characters_find[j]
                ]
                tmp_value = max_val
                ub_result = FOUND

        if ub_result is FOUND:
            ub_data.append(time_min + ":" + time_10sec + time_sec + " " +
                           tmp_character[0])
            ub_data_value.extend([[
                int(int(time_min) * 60 + int(time_10sec) * 10 + int(time_sec)),
                tmp_character[1]
            ]])

    return ub_result
Esempio n. 57
0
output = net.forward()
print("time taken by network : {:.3f}".format(time.time() - t))

H = output.shape[2]
W = output.shape[3]

# Empty list to store the detected keypoints
points = []

for i in range(nPoints):
    # confidence map of corresponding body's part.
    probMap = output[0, i, :, :]

    # Find global maxima of the probMap.
    minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)

    # Scale the point to fit on the original image
    x = (frameWidth * point[0]) / W
    y = (frameHeight * point[1]) / H

    if prob > threshold:
        cv2.circle(frameCopy, (int(x), int(y)),
                   8, (0, 255, 255),
                   thickness=-1,
                   lineType=cv2.FILLED)
        cv2.putText(frameCopy,
                    "{}".format(i), (int(x), int(y)),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1, (0, 0, 255),
                    2,
Esempio n. 58
0
    for x in range(256):
        pt1 = (x, 100)
        pt2 = (x, 100 - int(hist[x, 0] * 100 / histMax))
        cv2.line(imgHist, pt1, pt2, 0)

    return imgHist


src = cv2.imread('.\\ch03\\Hawkes.jpg', cv2.IMREAD_GRAYSCALE)

if src is None:
    print('Image load failed!')
    sys.exit()

# dst = cv2.normalize(src, None, 0, 255, cv2.NORM_MINMAX)
gmin, gmax, _, _ = cv2.minMaxLoc(src)
dst = ((src - gmin) * 255. / (gmax - gmin)).astype(np.uint8)

hist = cv2.calcHist([src], [0], None, [256], [0, 256])
histImg = getGrayHistImage(hist)

hist2 = cv2.calcHist([dst], [0], None, [256], [0, 256])
histImg2 = getGrayHistImage(hist2)

cv2.imshow('src', src)
cv2.imshow('dst', dst)
cv2.imshow('histImg1', histImg)
cv2.imshow('histImg2', histImg2)
cv2.waitKey()

cv2.destroyAllWindows()
Esempio n. 59
0
# 0612.py
import cv2
import numpy as np

src = cv2.imread('../data/alphabet.bmp', cv2.IMREAD_GRAYSCALE)
tmp_A = cv2.imread('../data/A.bmp', cv2.IMREAD_GRAYSCALE)
tmp_S = cv2.imread('../data/S.bmp', cv2.IMREAD_GRAYSCALE)
tmp_b = cv2.imread('../data/b.bmp', cv2.IMREAD_GRAYSCALE)
dst = cv2.cvtColor(src, cv2.COLOR_GRAY2BGR)  # 출력 표시 영상

# 1
R1 = cv2.matchTemplate(src, tmp_A, cv2.TM_SQDIFF_NORMED)
minVal, _, minLoc, _ = cv2.minMaxLoc(R1)
print('TM_SQDIFF_NORMED:', minVal, minLoc)

w, h = tmp_A.shape[:2]
cv2.rectangle(dst, minLoc, (minLoc[0] + h, minLoc[1] + w), (255, 0, 0), 2)

# 2
R2 = cv2.matchTemplate(src, tmp_S, cv2.TM_CCORR_NORMED)
_, maxVal, _, maxLoc = cv2.minMaxLoc(R2)
print('TM_CCORR_NORMED:', maxVal, maxLoc)
w, h = tmp_S.shape[:2]
cv2.rectangle(dst, maxLoc, (maxLoc[0] + h, maxLoc[1] + w), (0, 255, 0), 2)

# 3
R3 = cv2.matchTemplate(src, tmp_b, cv2.TM_CCORR_NORMED)
_, maxVal, _, maxLoc = cv2.minMaxLoc(R3)
print('TM_CCORR_NORMED:', maxVal, maxLoc)
w, h = tmp_S.shape[:2]
cv2.rectangle(dst, maxLoc, (maxLoc[0] + h, maxLoc[1] + w), (0, 0, 255), 2)
Esempio n. 60
0
    def read_numbers(x, y, w, h, max_digits=5):
        """ Method to ocr numbers.
            Returns int.
        """
        text = []

        crop = screen[y: y + h, x: x + w]
        crop = cv2.resize(crop, None, fx=3, fy=3, interpolation=cv2.INTER_CUBIC)

        # # 使用阈值进行二值化
        thresh = cv2.threshold(crop, 0, 255, cv2.THRESH_OTSU)[1]
        # cv2.imwrite('thresh1.png', thresh)

        #  在阈值图像中查找轮廓
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        cnts = grab_contours(cnts)
        cnts = contours.sort_contours(cnts, method="left-to-right")[0]
        numpy.argmax
        if len(cnts) > max_digits:
            return 0

        # 循环处理每一个数字
        for c in cnts:
            scores = []

            # 计算轮廓的边界框
            (x, y, w, h) = cv2.boundingRect(c)

            # 画圈,debug 用
            # cv2.rectangle(thresh, (x, y), (x + w, y + h), (255, 255, 255), 2)
            # cv2.imshow("crop", thresh)
            # cv2.waitKey()

            # 获取ROI区域
            roi = thresh[y: y + h, x: x + w]
            # cv2.imwrite(f"{v}.png", roi)
            # cv2.imshow("crop", roi)
            # cv2.waitKey()

            # 分别计算每一段的宽度和高度
            row, col = roi.shape[:2]

            width = round(abs((50 - col)) / 2) + 5
            height = round(abs((94 - row)) / 2) + 5

            # 边界扩展
            resized = cv2.copyMakeBorder(
                roi, top=height, bottom=height, left=width, right=width, borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0]
            )

            # cv2.imshow("resized", resized)
            # cv2.waitKey()

            for x in range(0, 10):
                template = cv2.imread("assets/number/{}.png".format(x), 0)
                result = cv2.matchTemplate(resized, template, cv2.TM_CCOEFF_NORMED)
                (_, score, _, _) = cv2.minMaxLoc(result)
                scores.append(score)

            # 获取最大值下标
            text.append(str(numpy.argmax(scores)))

        text = "".join(text)
        return int(text)