def find_matches(imgpaths, patch, C=0.8):
    """ Runs template matching to find PATCH in each IMGPATHS.
    Input:
        list imgpaths: [imgpath_i, ...]
        IplImage patch: 
        float C:
    Output:
        list matches, [[imgpath_i, (x,y), score_i], ...]
    """
    matches = {}  # maps {str imgpath: [(x,y,score_i), ...]}
    for imgpath in imgpaths:
        img = cv.LoadImage(imgpath, cv.CV_LOAD_IMAGE_GRAYSCALE)
        img_smooth = cv.CreateImage((img.width, img.height), img.depth,
                                    img.channels)
        cv.Smooth(img, img_smooth, cv.CV_GAUSSIAN, param1=17, param2=17)
        M = cv.CreateMat(img.height - patch.height + 1,
                         img.width - patch.width + 1, cv.CV_32F)
        cv.MatchTemplate(img_smooth, patch, M, cv.CV_TM_CCOEFF_NORMED)
        M_np = np.array(M)
        score = np.inf
        while score > C:
            M_idx = np.argmax(M_np)
            i = int(M_idx / M.cols)
            j = M_idx % M.cols
            score = M_np[i, j]
            if score < C:
                break
            matches.setdefault(imgpath, []).append((j, i, score))
            # Suppression
            M_np[i - (patch.height / 3):i + (patch.height / 3),
                 j - (patch.width / 3):j + (patch.width / 3)] = -1.0
    return matches
Esempio n. 2
0
def detect(templatePath, targetPath, threshold=0.8):
    """
  targetPathで指定される画像からtemplatePathで指定される画像の座標を返す
  @param templatePath {String} 探索対象画像
  @param targetPath {String} 探索範囲画像
  @param threshold=0.8 {Number} マッチ度の閾値
  @return {Tuple} or False テンプレートの中心座標。threshold以上の座標がなければFalse
  """
    target = cv.LoadImage(targetPath)
    template = cv.LoadImage(templatePath)

    dstSize = (target.width - template.width + 1,
               target.height - template.height + 1)
    dstImg = cv.CreateImage(dstSize, cv.IPL_DEPTH_32F, 1)

    cv.MatchTemplate(target, template, dstImg, cv.CV_TM_CCOEFF_NORMED)
    minMaxLoc = cv.MinMaxLoc(dstImg)
    logger.debug("%s, %.2f%%" % (templatePath, minMaxLoc[1] * 100))
    if minMaxLoc[1] < threshold:
        return False

    maxLoc = minMaxLoc[3]

    x = maxLoc[0] + template.width / 2
    y = maxLoc[1] + template.height / 2

    return (x, y)
Esempio n. 3
0
def find(template, image):
    image_size = cv.GetSize(image)
    template_size = cv.GetSize(template)
    result_size = [s[0] - s[1] + 1 for s in zip(image_size, template_size)]
    result = cv.CreateImage(result_size, cv.IPL_DEPTH_32F, 1)
    cv.CV_BGR2RGB
    cv.MatchTemplate(image, template, result, cv.CV_TM_CCOEFF_NORMED)
    min_val, max_val, min_loc, max_loc = cv.MinMaxLoc(result)
    x, y = max_loc
    return {'score': max_val, 'x': x, 'y': y}
Esempio n. 4
0
 def distance3(img, imgpath2):
     """ NCC score between img1, img2. """
     imgCv = cv.fromarray(np.copy(img.astype(np.float32)))
     img2 = shared.standardImread(imgpath2, flatten=True)
     bb2 = bb_map.get(imgpath2, None)
     if bb2 != None:
         img2 = img2[bb2[0]:bb2[2], bb2[2]:bb2[3]]
     img2Cv = cv.fromarray(np.copy(img2.astype(np.float32)))
     outCv = cv.CreateMat(imgCv.height - img2Cv.height + 1,
                          imgCv.width - img2Cv.width + 1, imgCv.type)
     cv.MatchTemplate(imgCv, img2Cv, outCv, cv.CV_TM_CCOEFF_NORMED)
     return outCv.max()
Esempio n. 5
0
def NCC(I, patch):
    I = prepOpenCV(I)
    patch = prepOpenCV(patch)
    patchCv = cv.fromarray(np.copy(patch))
    ICv = cv.fromarray(np.copy(I))
    outCv = cv.CreateMat(I.shape[0] - patch.shape[0] + 1,
                         I.shape[1] - patch.shape[1] + 1, patchCv.type)
    cv.MatchTemplate(ICv, patchCv, outCv, cv.CV_TM_CCOEFF_NORMED)
    Iout = np.asarray(outCv)
    Iout[Iout == 1.0] = 0  # opencv bug
    outPad = np.ones(I.shape) * -1
    outPad[0:Iout.shape[0], 0:Iout.shape[1]] = Iout
    return outPad
Esempio n. 6
0
def matchAll(digit_hash, I):
    result_hash = {}
    for key in digit_hash.keys():
        patch = prepOpenCV(digit_hash[key])
        patchCv = cv.fromarray(np.copy(patch))
        ICv = cv.fromarray(np.copy(I))
        outCv = cv.CreateMat(I.shape[0] - patch.shape[0] + 1,
                             I.shape[1] - patch.shape[1] + 1, patchCv.type)
        cv.MatchTemplate(ICv, patchCv, outCv, cv.CV_TM_CCOEFF_NORMED)
        Iout = np.asarray(outCv)
        Iout[Iout == 1.0] = 0  # opencv bug
        result_hash[key] = Iout

    return result_hash
Esempio n. 7
0
def get_tempmatches(A,
                    B,
                    T=0.8,
                    do_smooth=True,
                    xwin=13,
                    ywin=13,
                    MAX_MATS=50,
                    atleastone=False):
    """ Runs template matching, trying to find image A within image
    B. Returns location (and responses) of all matches greater than
    some threshold T.
    Input:
        IplImage A:
        IplImage B:
        float T:
    Output:
        list matches, i.e. [(x1, y1, float resp), ...]
    """
    if do_smooth:
        B_smooth = cv.CreateImage(cv.GetSize(B), B.depth, B.channels)
        cv.Smooth(B, B_smooth, cv.CV_GAUSSIAN, param1=xwin, param2=ywin)
        B = B_smooth
    wA, hA = cv.GetSize(A)
    wB, hB = cv.GetSize(B)
    M = cv.CreateMat(hB - hA + 1, wB - wA + 1, cv.CV_32F)
    cv.MatchTemplate(B, A, M, cv.CV_TM_CCOEFF_NORMED)
    M_np = np.array(M)
    score = np.inf
    #print 'best score:', np.max(M_np)
    num_mats = 0
    matches = []
    while score > T and num_mats < MAX_MATS:
        M_idx = np.argmax(M_np)
        i = int(M_idx / M.cols)
        j = M_idx % M.cols
        score = M_np[i, j]
        if score < T:
            break
        matches.append((j, i, score))
        # Suppression
        M_np[i - (hA / 3):i + (hA / 3), j - (wA / 3):j + (wA / 3)] = -1.0
        num_mats += 1
    if not matches and atleastone:
        M_idx = np.argmax(M_np)
        i = int(M_idx / M.cols)
        j = M_idx % M.cols
        score = M_np[i, j]
        matches.append((j, i, score))
    return matches
Esempio n. 8
0
def puntosParaTemplate (imagen, template):
  res_width  = imagen.width - template.width + 1;
  res_height = imagen.height - template.height + 1;
  resultado  = cv.CreateImage( ( res_width, res_height ), cv.IPL_DEPTH_32F, 1 )
  cv.MatchTemplate(imagen,template,resultado, cv.CV_TM_SQDIFF)
  pos = []
  eig_image = cv.CreateMat(imagen.rows, imagen.cols, cv.CV_32FC1)
  temp_image = cv.CreateMat(imagen.rows, imagen.cols, cv.CV_32FC1)
  for (x,y) in cv.GoodFeaturesToTrack(resultado, eig_image, temp_image, 0, 0.2, template.width, useHarris = True):
    pos.append((x,y)) 
  pos =  sorted(pos)
  opencv.cvReleaseImage(resultado)
  opencv.cvReleaseImage(eig_image)
  opencv.cvReleaseImage(temp_image)  
  return pos
Esempio n. 9
0
def bestmatch(A, B):
    """ Tries to find the image A within the (larger) image B.
    For instance, A could be a voting target, and B could be a
    contest.
    Input:
        cvMat A: Patch to search for
        cvMat B: Image to search over
    Output:
        ((x,y), s_mat),  location on B of the best match for A.
    """
    w_A, h_A = A.cols, A.rows
    w_B, h_B = B.cols, B.rows
    s_mat = cv.CreateMat(h_B - h_A + 1, w_B - w_A + 1, cv.CV_32F)
    cv.MatchTemplate(B, A, s_mat, cv.CV_TM_CCOEFF_NORMED)
    minResp, maxResp, minLoc, maxLoc = cv.MinMaxLoc(s_mat)
    return maxLoc, s_mat
Esempio n. 10
0
def template_match(img, refimg, confidence=0.6, xwin=19, ywin=19):
    """
    Return all matches of refimg inside img, using Template Matching.
    (Gratefully) borrowed from:
        http://stackoverflow.com/questions/7670112/finding-a-subimage-inside-a-numpy-image/9253805#9253805
    Input:
        obj img: A numpy array representing an image
        obj refimg: A numpy array representing the reference image
        float confidence: threshold value (from [0,1]) for template
                          matching
    Output:
        A tuple of (x,y) coodinates, w.r.t the coordinate system of
        img.
    """
    # OpenCV requires either uint8, or float, but with floats it got
    # buggy and failed badly (I think it had to do with it not
    # correctly handling when 'img' had no decimals, but 'refimg'
    # had decimal expansions, which I suppose means that internally
    # OpenCV.matchTemplate does exact integer comparisons.
    img = img.astype('uint8')
    refimg = refimg.astype('uint8')

    I = cv.fromarray(img)
    ref = cv.fromarray(refimg)
    #I = cv.fromarray(np.copy(img))
    #ref = cv.fromarray(np.copy(refimg))
    I_s = cv.CreateMat(I.rows, I.cols, I.type)
    cv.Smooth(I, I_s, cv.CV_GAUSSIAN, param1=xwin, param2=ywin)
    ref_s = cv.CreateMat(ref.rows, ref.cols, ref.type)
    cv.Smooth(ref, ref_s, cv.CV_GAUSSIAN, param1=xwin, param2=ywin)
    #img = np.array(img, dtype='uint8')
    #refimg = np.array(refimg, dtype='uint8')
    result = cv.CreateMat(I_s.rows - ref_s.rows + 1, I_s.cols - ref_s.cols + 1,
                          cv.CV_32F)
    cv.MatchTemplate(I_s, ref_s, result, cv.CV_TM_CCOEFF_NORMED)
    #result = cv2.matchTemplate(img, refimg, cv2.TM_CCOEFF_NORMED)
    # result is a 'similarity' matrix, with values from -1.0 (?) to 1.0,
    # where 1.0 is most similar to the template image.
    result_np = np.asarray(result)
    match_flatidxs = np.arange(
        result_np.size)[(result_np > confidence).flatten()]
    return [
        flatidx_to_pixelidx(flatidx, result_np.shape)
        for flatidx in match_flatidxs
    ]
def find_col_x1(I, Icol, bb, K=3, AX=0.2, AY=0.2, T=0.9):
    """ Tries to find the column of marks on I, using ICOL as a ref.
    image in template matching.
    """
    roi_prev = cv.GetImageROI(I)
    shift_roi(I, bb[0], bb[1], bb[2] - bb[0], bb[3] - bb[1])

    w_A, h_A = cv.GetSize(Icol)
    w_I, h_I = cv.GetSize(I)
    M = cv.CreateMat(h_I - h_A + 1, w_I - w_A + 1, cv.CV_32F)
    cv.MatchTemplate(I, Icol, M, cv.CV_TM_CCOEFF_NORMED)
    if DEBUG_SAVEIMGS:
        M_np = np.array(M)
        import scipy.misc
        print_dbg("<><><><> Saving '_Mbase.png' <><><><>")
        cv.SaveImage("_Mbase.png", I)
        print_dbg("<><><><> Saving '_M.png' <><><><>")
        scipy.misc.imsave("_M.png", M)
        pdb.set_trace()
    cv.SetImageROI(I, roi_prev)
    i = 0
    xs = []
    _xamt, _yamt = int(round(AX * w_A)), int(round(AY * h_A))
    while i < K:
        minResp, maxResp, minLoc, maxLoc = cv.MinMaxLoc(M)
        if maxResp < T:
            break
        x, y = maxLoc
        # Find the /leftmost/ match: don't find a match in the middle
        # of a column.
        while M[y, x] >= T:
            x -= 1
        xs.append((x + bb[0]))
        _x1 = max(1, x - _xamt)
        _x2 = max(1, x + _xamt)
        _y1 = max(1, y - _yamt)
        _y2 = max(1, y + _yamt)
        M[_y1:_y2, _x1:_x2] = -1.0
        i += 1
    if not xs:
        return None
    elif len(xs) == 1:
        return xs[0]
    return np.median(xs)
Esempio n. 12
0
    def logging(self):
        u'''ログを取る'''
        target = self._cvmat
        digits_sieve = DigitsSieve()
        for template in self._templates:
            if not template.result:
                # マッチング結果保存用領域の準備
                template.result = cv.CreateImage(
                    (target.width - template.image.width + 1,
                     target.height - template.image.height + 1),
                    cv.IPL_DEPTH_32F,
                    1,
                )

            cv.MatchTemplate(target, template.image, template.result,
                             config.logging.match_method)

            # 数値の読み取り
            minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(template.result)
            while maxVal > config.logging.match_threshold:
                # 検出された数値情報の保持
                digits_sieve.push(
                    A(
                        number=template.number,
                        x=maxLoc[0],
                        y=maxLoc[1],
                        width=template.image.width,
                        height=template.image.height,
                        score=maxVal,
                    ))

                # 現在の位置周辺のスコアをクリアし、次にスコアの高い位置を取得する
                SetReal2DAround(template.result, maxLoc,
                                config.logging.match_exclusion_size, 0.0)
                minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(template.result)

        value = digits_sieve.getValue()
        if value is not None:
            self._log.append(value)
            self.setValue(value)
            self._textarea.insert(tk.END, '%d\n' % value)
            self._textarea.see(tk.END)
Esempio n. 13
0
def xcorr(templateImage, exptImage):

    #cloning is for memory alignment issue with numpy/openCV.
    if type(templateImage).__name__ == 'ndarray':
        # cast array to 8-bit, otherwise cross correlation fails.
        tmp = templateImage - float(np.min(templateImage))
        tmp = tmp / float(np.max(tmp))
        tmp = np.array(tmp * 255, dtype=np.uint8)
        tmp = array2cv(tmp)
    if type(exptImage).__name__ == 'ndarray':
        expt = exptImage - float(np.min(exptImage))
        expt = expt / float(np.max(expt))
        expt = np.array(expt * 255, dtype=np.uint8)
        expt = array2cv(expt)
    tmp = cv.CloneImage(tmp)
    padImage = cv.CloneImage(expt)
    resultWidth = padImage.width - tmp.width + 1
    resultHeight = padImage.height - tmp.height + 1
    result = cv.CreateImage((resultWidth, resultHeight), cv.IPL_DEPTH_32F, 1)
    cv.MatchTemplate(padImage, tmp, result, cv.CV_TM_CCOEFF_NORMED)
    result = np.squeeze(cv2array(result))
    return result
Esempio n. 14
0
	def matching(self,template,time):
		capture=cv.CaptureFromCAM(0)
		image=cv.QueryFrame(capture)
		# writer=cv.CreateVideoWriter("output.avi", 0, 15, cv.GetSize(image), 1)
		count=0
		w,h = cv.GetSize(template)
		W,H = cv.GetSize(image)
		width = W-w+1
		height = H-h+1
		while count<time:
			image=cv.QueryFrame(capture)
			result = cv.CreateImage((width, height), 32, cv.CV_TM_SQDIFF)
			cv.MatchTemplate(template, image, result, cv.CV_TM_SQDIFF)
			print result
			(min_x,max_y,minloc,maxloc)=cv.MinMaxLoc(result)
			(x,y)=minloc
			cv.Rectangle(image,(int(x),int(y)),(int(x)+w,int(y)+h),(255,255,255),1,0)
			print minloc
			# cv.WriteFrame(writer, image)
			cv.WaitKey(1)
			cv.ShowImage('Image_Window',image)
			count+=1
		cv.DestroyWindow('Image_Window')
Esempio n. 15
0
def findBestTemplateMatch(tplList, img):
    """
    Compares img against a list of templates.
    tplList is a list of string filenames of template images
    Returns a tuple (num, suit) if a template is suitably matched
    or None if not
    """

    minTpl = 200  # arbitrarily large number
    tString = None

    for t in tplList:
        tpl = cv.LoadImage(t)

        w = img.width - tpl.width + 1
        h = img.height - tpl.height + 1
        result = cv.CreateImage((w, h), 32, 1)
        cv.MatchTemplate(img, tpl, result, cv.CV_TM_SQDIFF_NORMED)

        (minVal, maxVal, minLoc, maxLoc) = cv.MinMaxLoc(result)

        #print t
        #print (minVal, maxVal, minLoc, maxLoc)

        # 0.2 found by experiment (the non-card images end up being around
        # 0.25 - 0.28, and all the card images were much around 0.08 and less
        if minVal < minTpl and minVal < 0.2:
            minTpl = minVal
            tString = t

    #print minTpl, tString
    #cv.ShowImage("win", img)
    #cv.ShowImage("win2", result)
    #cv.WaitKey(0)

    return tString
Esempio n. 16
0
def templateMatchFace(image, template, result):
    cv.MatchTemplate(image, template, result, cv.CV_TM_SQDIFF)
    (min_x, max_y, minloc, maxloc) = cv.MinMaxLoc(result)
    return minloc
Esempio n. 17
0
        print e

    reslist = []
    for template in templates:
        temptype = template[0]
        tempimg  = template[1]
        tempthre = template[2]
        tempname = template[3]
        if template[4] == 'CCORR': tempmethod = cv.CV_TM_CCORR_NORMED
        elif template[4] == 'CCOEFF': tempmethod = cv.CV_TM_CCOEFF_NORMED
        else: tempmethod = cv.CV_TM_SQDIFF_NORMED
        ressize = list(cv.GetSize(cv_image))
        ressize[0] -= cv.GetSize(tempimg)[0] - 1
        ressize[1] -= cv.GetSize(tempimg)[1] - 1
        results = cv.CreateImage(ressize, cv.IPL_DEPTH_32F, 1 )
        cv.MatchTemplate(cv_image, tempimg, results, tempmethod)

        status = cv.MinMaxLoc(results)
        if (tempmethod == cv.CV_TM_SQDIFF_NORMED):
            found = ( status[0] < tempthre ) 
            reslist += [(tempname,(status[0],status[2],tempthre,found))]
            if found :
                result.data += tempname+' '
        else :
            found = (tempthre < status[1] )
            reslist += [(tempname,(status[1],status[3],tempthre,found))]
            if found :
                result.data += tempname+' '
        print reslist

    result_pub.publish(result)
Esempio n. 18
0
    def DropOutCorrection(self):
        """Re-run the algorithm, but replace the displacements in all the regions not grown from good seeds"""
        self.processed[:] = 0
        #determine bad regions
        goodSeeds = np.zeros(self.numRegions)
        goodSeeds[self.regionArray > self.threshold] = 1

        region = 0
        #rerun good seed points
        for y in self.seedsY:
            for x in self.seedsX:

                if goodSeeds[region]:
                    self.processed[y, x] = 1

                    intDpY = int(round(self.dpY[y, x]))
                    intDpX = int(round(self.dpX[y, x]))

                    #PUT ITEM IN SEED LIST
                    self.AddToSeedList(self.quality[y, x], y, x, intDpY,
                                       intDpX, region)

                region += 1

    #re-allocate array to hold results of cross correlation
        resultNp = np.float32(
            np.zeros((2 * self.smallRangeY + 1, 2 * self.smallRangeX + 1)))
        resultCv = cv.fromarray(resultNp)

        #rerun algorithm, if point was grown from good seed maintain it

        while self.seedList.qsize() > 0:

            (tempQuality, pointInd, iniDpY, iniDpX,
             region) = self.seedList.get()
            (y, x) = np.unravel_index(pointInd, self.dpY.shape)
            self.processed[y, x] = 1

            #Re-process if not originally from a good seed
            if not goodSeeds[self.regionImage[y, x]]:
                self.regionImage[y, x] = region

                #GRAB SLICE OF DATA FOR CC CONVERT TO CV FRIENDLY FORMAT
                startBlockY = self.windowCenterY[y] - self.halfY
                stopBlockY = self.windowCenterY[y] + self.halfY + 1
                startBlockX = self.windowCenterX[x] - self.halfX
                stopBlockX = self.windowCenterX[x] + self.halfX + 1
                template = cv.fromarray(
                    np.float32(self.pre[startBlockY:stopBlockY,
                                        startBlockX:stopBlockX]))

                startBlockY = self.windowCenterY[
                    y] - self.halfY - self.smallRangeY + iniDpY
                stopBlockY = self.windowCenterY[
                    y] + self.halfY + self.smallRangeY + 1 + iniDpY
                startBlockX = self.windowCenterX[
                    x] - self.halfX - self.smallRangeX + iniDpX
                stopBlockX = self.windowCenterX[
                    x] + self.halfX + self.smallRangeX + 1 + iniDpX
                image = cv.fromarray(
                    np.float32(self.post[startBlockY:stopBlockY,
                                         startBlockX:stopBlockX]))

                cv.MatchTemplate(template, image, resultCv,
                                 cv.CV_TM_CCORR_NORMED)
                resultNp = np.asarray(resultCv)

                #FIND MAXIMUM, PERFORM SUB-SAMPLE FITTING
                maxInd = resultNp.argmax()
                maxCC = resultNp.max()
                maxY, maxX = np.unravel_index(maxInd, resultNp.shape)
                self.quality[y, x] = maxCC

                #perform sub-sample fit
                #fit to f(x) = ax^2 + bx + c in both directions
                if maxY > 0 and maxY < 2 * self.smallRangeY - 1:
                    deltaY = self.SubSampleFit(resultNp[maxY - 1:maxY + 2,
                                                        maxX])
                else:
                    deltaY = 0.0

                if maxX > 0 and maxX < 2 * self.smallRangeX - 1:
                    deltaX = self.SubSampleFit(resultNp[maxY,
                                                        maxX - 1:maxX + 2])
                else:
                    deltaX = 0.0

                self.dpY[y, x] = maxY - self.smallRangeY + deltaY + iniDpY
                if self.dpY[y, x] > self.rangeY:
                    self.dpY[y, x] = self.rangeY
                if self.dpY[y, x] < -self.rangeY:
                    self.dpY[y, x] = -self.rangeY

                self.dpX[y, x] = maxX - self.smallRangeX + deltaX + iniDpX
                if self.dpX[y, x] > self.rangeX:
                    self.dpX[y, x] = self.rangeX
                if self.dpX[y, x] < -self.rangeX:
                    self.dpX[y, x] = -self.rangeX

                intDpY = int(round(self.dpY[y, x]))
                intDpX = int(round(self.dpX[y, x]))

                #PUT ITEM IN SEED LIST
                self.AddToSeedList(maxCC, y, x, intDpY, intDpX, region)

            else:

                intDpY = int(round(self.dpY[y, x]))
                intDpX = int(round(self.dpY[y, x]))

                #PUT ITEM IN SEED LIST
                self.AddToSeedList(self.quality[y, x], y, x, intDpY, intDpX,
                                   region)
Esempio n. 19
0
# create the wanted images
import cv
image=cv.LoadImage('picture.png', cv.CV_LOAD_IMAGE_COLOR)
grey=cv.CreateImage((100,100),8,1)
eig = cv.CreateImage (cv.GetSize (grey), 32, 1)
temp = cv.CreateImage (cv.GetSize (grey), 32, 1)
# the default parameters
quality = 0.01
min_distance = 10
# search the good points
features = cv.GoodFeaturesToTrack (
grey, eig, temp,
1000,
quality, min_distance, None, 3, 0, 0.04)
for (x,y) in features:
    x) + ',' + str(y)
cv.Circle (image, (int(x), int(y)), 3, (0, 255, 0), -1, 8, 0)


cv.ResetImageROI(image)
W,H=cv.GetSize(image)
w,h=cv.GetSize(template)
width=W-w+1
height=H-h+1
result=cv.CreateImage((width,height),32,1)
cv.MatchTemplate(frame,template, result,cv.CV_TM_SQDIFF)
(min_x,max_y,minloc,maxloc)=cv.MinMaxLoc(result)
(x,y)=minloc
cv.Rectangle(image2,(int(x),int(y)),(int(x)+w,int(y)+h),(255,255,255),1,0)
def evalPatchSimilarity(I, patch, debug=False):
    # perform template matching and return the best match in expanded region
    I_in = np.copy(I)
    patch_in = np.copy(patch)

    if debug == True:
        print "...stepping into evalPatchSimilarity."
        pdb.set_trace()

    I = sh.prepOpenCV(I)
    patch = sh.prepOpenCV(patch)
    # See pixel_reg/eric_np2cv/demo.py for why I scale by 255.0 when
    # converting NP -> OpenCV.
    patchCv = cv.fromarray(np.copy(patch) * 255.0)
    ICv = cv.fromarray(np.copy(I) * 255.0)
    #patchCv=cv.fromarray(np.copy(patch))
    #ICv=cv.fromarray(np.copy(I))

    # call template match
    outCv = cv.CreateMat(I.shape[0] - patch.shape[0] + 1,
                         I.shape[1] - patch.shape[1] + 1, patchCv.type)
    cv.MatchTemplate(ICv, patchCv, outCv, cv.CV_TM_CCOEFF_NORMED)
    Iout = np.asarray(outCv) / 255.0
    #Iout=np.asarray(outCv)
    Iout[Iout == 1.0] = 0
    YX = np.unravel_index(Iout.argmax(), Iout.shape)

    # local alignment: expand a little, then local align
    i1 = YX[0]
    i2 = YX[0] + patch.shape[0]
    j1 = YX[1]
    j2 = YX[1] + patch.shape[1]
    I1c = I[i1:i2, j1:j2]
    IO = imagesAlign(I1c, patch, trfm_type='rigid', minArea=np.power(2, 20))

    Ireg = IO[1]
    #Ireg = np.nan_to_num(Ireg)
    # TODO: Ireg is frequently just a competely-black image (due to
    # presence of Nan's?). By inserting the line:
    #     Ireg = np.nan_to_num(Ireg)
    # This stopped an apparent bug in Marin, where various attribute
    # patches would erroneously be matched to the wrong side of the
    # ballot.

    # C := num pixels to discard around border. This used to be C=5,
    #      but this caused issues if one of the 'patch' dimensions was
    #      <= 10, causing an ill-formed image patch.
    AMT = 0.2
    C_i = int(round(Ireg.shape[0] * AMT))
    C_j = int(round(Ireg.shape[1] * AMT))
    D_i = int(round(patch.shape[0] * AMT))
    D_j = int(round(patch.shape[1] * AMT))
    bb_1 = [
        C_i,
        min(Ireg.shape[0] - 1, Ireg.shape[0] - C_i), C_j,
        min(Ireg.shape[1] - 1, Ireg.shape[1] - C_j)
    ]
    bb_2 = [
        D_i,
        min(patch.shape[0] - 1, patch.shape[0] - D_i), D_j,
        min(patch.shape[1] - 1, patch.shape[1] - D_j)
    ]
    if bb_1[0] >= bb_1[1]:
        bb_1[0] = 0
    if bb_1[1] <= 2:
        bb_1[1] = Ireg.shape[0] - 1
    if bb_1[2] >= bb_1[3]:
        bb_1[2] = 0
    if bb_1[3] <= 2:
        bb_1[3] = Ireg.shape[1] - 1

    if bb_2[0] >= bb_2[1]:
        bb_2[0] = 0
    if bb_2[1] <= 2:
        bb_2[1] = patch.shape[0] - 1
    if bb_2[2] >= bb_2[3]:
        bb_2[2] = 0
    if bb_2[3] <= 2:
        bb_2[3] = patch.shape[1] - 1
    #Ireg1=Ireg[C:Ireg.shape[0]-C,C:Ireg.shape[1]-C]
    #patch1=patch[C:patch.shape[0]-C,C:patch.shape[1]-C]
    Ireg1 = Ireg[bb_1[0]:bb_1[1], bb_1[2]:bb_1[3]]
    patch1 = patch[bb_2[0]:bb_2[1], bb_2[2]:bb_2[3]]

    if 0 in Ireg1.shape or 0 in patch1.shape:
        print "==== Uhoh, a crash is about to happen."
        print "Ireg.shape: {0}  patch.shape: {1}".format(
            Ireg.shape, patch.shape)
        print "Ireg1.shape: {0}  patch1.shape: {1}".format(
            Ireg1.shape, patch1.shape)
        print "bb_1:", bb_1
        print "bb_2:", bb_2
        misc.imsave("_evalpatchsim_ireg.png", Ireg)
        misc.imsave("_evalpatchsim_patch.png", patch)
        misc.imsave("_evalpatchsim_I1c.png", I1c)
        misc.imsave("_evalpatchsim_I.png", I)

    err = sh.variableDiffThr(Ireg1, patch1)
    diff = np.abs(Ireg1 - patch1)
    # #estimate threshold for comparison:

    return (-err, YX, diff)
Esempio n. 21
0
import pdb
import cv
import numpy as np
from matplotlib.pyplot import show, imshow, figure

I=np.round(np.load('debug_I.npy')*255.)/255.
patch=np.round(np.load('debug_patch.npy')*255.)/255.

#Ifoo=np.float32(misc.imread('debug_I.png')/255.0)
#patchfoo=np.float32(misc.imread('debug_patch.png')/255.0)

patchCv=cv.fromarray(np.copy(patch))
ICv=cv.fromarray(np.copy(I))
# call template match
outCv=cv.CreateMat(I.shape[0]-patch.shape[0]+1,I.shape[1]-patch.shape[1]+1,patchCv.type)
cv.MatchTemplate(ICv,patchCv,outCv,cv.CV_TM_CCOEFF_NORMED)
Iout=np.asarray(outCv)
YX=np.unravel_index(Iout.argmax(),Iout.shape)

#misc.imsave('debug_I.png',I)
#misc.imsave('debug_patch.png',patch)

pdb.set_trace()
figure(1); imshow(I);
figure(2); imshow(patch);
figure(3); imshow(Iout);
show()

def templateMatching(template):
    # Declare as globals since we are assigning to them now
    global capture
    global camera_index
    """
    # Create multiple smaller sizes of the template so we find a plate at different distances
    templateList = [0, 0]
    tmp1 = template
    for n in range(2):
        templateList[n] = tmp1
        (w, h) = cv.GetSize(tmp1)
        tmp2 = cv.CreateImage((int(w * 0.5), int(h * 0.5)), cv.IPL_DEPTH_8U, 3)
        cv.Resize(tmp1, tmp2, cv.CV_INTER_LINEAR)
        tmp1 = tmp2
    """

    # Capture current frame
    frame = cv.QueryFrame(capture)
    """
    # Create the temporary images that will hold the comparison results
    frameWidth, frameHeight = cv.GetSize(frame)
    resultList = [0, 0]
    for n in range(2):
        templateWidth, templateHeight = cv.GetSize(templateList[n])
        width = frameWidth - templateWidth + 1
        height = frameHeight - templateHeight + 1
        resultList[n] = cv.CreateImage((width, height), 32, 1)
    """

    frameWidth, frameHeight = cv.GetSize(frame)
    templateWidth, templateHeight = cv.GetSize(template)
    width = frameWidth - templateWidth + 1
    height = frameHeight - templateHeight + 1
    result = cv.CreateImage((width, height), 32, 1)

    # cv.ShowImage("CS201 - Tyler Boraski - Final Project", templateList[0])
    # time.sleep(10)

    # Query for templates
    while True:
        # Capture frame
        frame = cv.QueryFrame(capture)

        # Check for template matchs
        cv.MatchTemplate(frame, template, result, cv.CV_TM_SQDIFF)
        minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(result)
        #print minVal
        if minVal < 10000000.0:
            cv.Rectangle(
                frame, (minLoc[0], minLoc[1]),
                (minLoc[0] + template.width, minLoc[1] + template.height),
                cv.CV_RGB(0, 255, 255))
        """
        for n in range(2):
            cv.MatchTemplate(frame, templateList[n], resultList[n], cv.CV_TM_SQDIFF)
            minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(resultList[n])
            if n == 0 and minVal < 10000000.0:
                cv.Rectangle(frame, (minLoc[0], minLoc[1]), (minLoc[0] + templateList[n].width, minLoc[1] + templateList[n].height), cv.CV_RGB(255, 255, 255))
            if n == 1 and minVal < 100000000.0:
                cv.Rectangle(frame, (minLoc[0], minLoc[1]), (minLoc[0] + templateList[n].width, minLoc[1] + templateList[n].height), cv.CV_RGB(0, 255, 0))
                print minVal
        """

        # Display image
        cv.ShowImage("CS201 - Tyler Boraski - Final Project", frame)

        # If "esc" is pressed the program will end
        esc = cv.WaitKey(7) % 0x100
        if esc == 27:
            quit()
def evalPatchSimilarity2(I, patch, debug=False):
    # perform template matching and return the best match in expanded region
    I_in = np.copy(I)
    patch_in = np.copy(patch)

    I = sh.prepOpenCV(I)
    patch = sh.prepOpenCV(patch)
    # See pixel_reg/eric_np2cv/demo.py for why I scale by 255.0 when
    # converting NP -> OpenCV.
    patchCv = cv.fromarray(np.copy(patch) * 255.0)
    ICv = cv.fromarray(np.copy(I) * 255.0)
    #cv.SaveImage("_patchCv.png", patchCv)
    #cv.SaveImage("_ICv.png", ICv)
    #patchCv = tempmatch.smooth_mat(patchCv, 5, 5, bordertype='const', val=255)
    #ICv = tempmatch.smooth_mat(ICv, 5, 5, bordertype='const', val=255)
    #cv.SaveImage("_patchCv_smooth.png", patchCv)
    #cv.SaveImage("_ICv_smooth.png", ICv)
    #pdb.set_trace()
    # call template match
    outCv = cv.CreateMat(I.shape[0] - patch.shape[0] + 1,
                         I.shape[1] - patch.shape[1] + 1, patchCv.type)
    cv.MatchTemplate(ICv, patchCv, outCv, cv.CV_TM_CCOEFF_NORMED)
    #Iout=np.asarray(outCv) / 255.0
    Iout = np.asarray(outCv)
    YX = np.unravel_index(Iout.argmax(), Iout.shape)

    # take result and expand in each dimension by pixPad
    i1 = YX[0]
    i2 = YX[0] + patch.shape[0]
    j1 = YX[1]
    j2 = YX[1] + patch.shape[1]

    # [new] patch pad 25%
    # if the result has any nans then we'll call that bad,
    # undo the transformation and compute error wrt original
    # inputs.

    pixPad = round(.25 * min(patch.shape))
    patchPad = np.empty(
        (patch.shape[0] + 2 * pixPad, patch.shape[1] + 2 * pixPad),
        dtype='float32')
    patchPad[:] = np.nan
    patchPad[pixPad:patch.shape[0] + pixPad,
             pixPad:patch.shape[1] + pixPad] = patch

    # check how much we need to pad in each dimension
    # if any values are positive, that means we need to pad
    # the difference
    i1pad = i1 - pixPad
    i1exp = max(0, -i1pad)
    i1pad = (i1 - pixPad) + i1exp

    i2pad = i2 + pixPad
    i2exp = max(0, i2pad - I.shape[0])
    i2pad = (i2 + pixPad) - i2exp

    j1pad = j1 - pixPad
    j1exp = max(0, -j1pad)
    j1pad = (j1 - pixPad) + j1exp

    j2pad = j2 + pixPad
    j2exp = max(0, j2pad - I.shape[1])
    j2pad = (j2 + pixPad) - j2exp

    # crop out padded patch
    I1c = I[i1pad:i2pad, j1pad:j2pad]

    # check for padding
    if (i1exp + i2exp + j1exp + j2exp) > 0:
        I1c = sh.padWithBorderHandling(I1c, i1exp, i2exp, j1exp, j2exp)

    # expand if necessary
    #hCells = max(int(round(I1c.shape[1] / 200)), 1)
    #vCells = max(int(round(I1c.shape[0] / 200)), 1)
    hCells, vCells = 1, 1

    IO = imagesAlign(I1c,
                     patchPad,
                     trfm_type='rigid',
                     hCells=hCells,
                     vCells=vCells,
                     minArea=np.power(2, 15))
    if debug:
        pdb.set_trace()
    Ireg = IO[1]
    Ireg = Ireg[pixPad:patch.shape[0] + pixPad, pixPad:patch.shape[1] + pixPad]

    if np.sum(np.isnan(Ireg)) > 0:
        # if there are nan values [brought in from the border]
        # then that means the alignment result was pretty extreme
        err = np.inf
        diff = patch
    else:
        err = sh.variableDiffThr(Ireg, patch)
        diff = np.abs(Ireg - patch)

    return (-err, YX, diff)
Esempio n. 24
0
def bestmatch(A,
              imgpaths,
              bb=None,
              img2flip=None,
              do_smooth=0,
              xwinA=3,
              ywinA=3,
              xwinI=3,
              ywinI=3,
              prevmatches=None,
              jobid=None,
              queue_mygauge=None,
              patch_outpaths=None):
    """ Runs template matching on IMGPATHS, searching for best match
    for A. 
    Input:
        A: Either a string (path), or an IplImage.
        list IMGPATHS: List of imgpaths to search over
        tuple BB: (x1,y1,x2,y2)
            Search window to do template matching over.
        dict IMG2FLIP: maps {str imgpath: bool isflipped}
        int DO_SMOOTH:
        dict PREVMATCHES: {imgpath: [(x_i, y_i), ...]}. Matches to ignore.
        obj QUEUE_MYGAUGE:
            Used to signal to a running MyGauge instance that one job has
            completed. (Typically, this gauge lives in a separate process)
        dict PATCH_OUTPATHS: {str imgpath: str patch_outpath}
            If given, then save each patch to disk given by the patchpath in
            PATCH_OUTPATHS.
    Output:
        dict {str IMGPATH: (x1, y1, float score)}.
    """
    if type(A) in (str, unicode):
        A_im = cv.LoadImage(A, cv.CV_LOAD_IMAGE_GRAYSCALE)
    else:
        A_im = A
    if do_smooth == SMOOTH_BOTH_BRD or do_smooth == SMOOTH_A_BRD:
        A_im = smooth(A_im, xwinA, ywinA, bordertype='const', val=255)
    elif do_smooth in (SMOOTH_BOTH, SMOOTH_A):
        A_im = smooth(A_im, xwinA, ywinA)
    w_A, h_A = cv.GetSize(A_im)
    results = {}
    for i, imgpath in enumerate(imgpaths):
        if type(imgpath) in (str, unicode):
            I = cv.LoadImage(imgpath, cv.CV_LOAD_IMAGE_GRAYSCALE)
            Iorig = I
        else:
            I = imgpath
            Iorig = I
            imgpath = i
        if do_smooth in (SMOOTH_BOTH_BRD, SMOOTH_IMG_BRD):
            I = smooth(I, xwinI, ywinI, bordertype='const', val=255)
        elif do_smooth in (SMOOTH_BOTH, SMOOTH_IMG):
            I = smooth(I, xwinI, ywinI)
        if img2flip and img2flip[imgpath]:
            cv.Flip(I, I, flipMode=-1)
            Iorig = I
        if bb != None:
            new_roi = tuple(
                map(int, (bb[0], bb[1], bb[2] - bb[0], bb[3] - bb[1])))
            cv.SetImageROI(I, new_roi)
        w_I, h_I = cv.GetSize(I)
        matchmat = cv.CreateMat(h_I - h_A + 1, w_I - w_A + 1, cv.CV_32F)
        cv.MatchTemplate(I, A_im, matchmat, cv.CV_TM_CCOEFF_NORMED)
        # 0.) Suppress previously-found matches, if any
        prevmats = prevmatches.get(imgpath, []) if prevmatches else []
        for (x, y) in prevmats:
            print 'suppressing: {0} at {1}'.format(imgpath, (x, y))
            _x1 = max(0, int(x - (w_A / 3)))
            _y1 = max(0, int(y - (h_A / 3)))
            _x2 = min(matchmat.cols, int(x + (w_A / 3)))
            _y2 = min(matchmat.rows, int(y + (h_A / 3)))
            matchmat[_y1:_y2, _x1:_x2] = -1.0
        minResp, maxResp, minLoc, maxLoc = cv.MinMaxLoc(matchmat)
        x, y = maxLoc[0], maxLoc[1]
        if bb != None:
            x += bb[0]
            y += bb[1]
        results[imgpath] = (x, y, maxResp)
        # Save the patch to disk if necessary
        if patch_outpaths:
            outpath = patch_outpaths.get(imgpath, None)
            if outpath:
                try:
                    os.makedirs(os.path.split(outpath)[0])
                except:
                    pass
                cv.SetImageROI(Iorig, (int(x), int(y), int(w_A), int(h_A)))
                cv.SaveImage(outpath, Iorig)
        if jobid and wx.App.IsMainLoopRunning():
            wx.CallAfter(Publisher().sendMessage, "signals.MyGauge.tick",
                         (jobid, ))
        if queue_mygauge != None:
            queue_mygauge.put(True)

    return results
Esempio n. 25
0
def get_tempmatches(A,
                    imgpaths,
                    img2flip=None,
                    T=0.8,
                    bb=None,
                    do_smooth=0,
                    xwinA=13,
                    ywinA=13,
                    xwinI=13,
                    ywinI=13,
                    MAX_MATS=50,
                    prevmatches=None,
                    DELT=0.5,
                    atleastone=False,
                    jobid=None,
                    queue_mygauge=None):
    """ Runs template matching, trying to find image A within each image
    in IMGPATHS. Returns location (and responses) of all matches greater than
    some threshold T.
    Input:
        IplImage A:
        list IMGPATHS:
        dict IMG2FLIP: maps {str imgpath: bool isflipped}
        float T: Template-matching sensitivity
        tuple BB: (x1,y1,x2,y2)
            Search in each img in IMGPATHS inside this BB only.
        float DELT: How much we should perform non-maximal suppression,
            on each axis.
        dict PREVMATCHES: maps {str imgpath: [(x1,y1,x2,y2), ...]}
    Output:
        dict MATCHES, of the form {str imgpath: [(x1, y1, x2, y2, float score), ...]}
    """
    if do_smooth == SMOOTH_BOTH_BRD or do_smooth == SMOOTH_A_BRD:
        A_im = smooth(A, xwinA, ywinA, bordertype='const', val=255)
    elif do_smooth in (SMOOTH_BOTH, SMOOTH_A):
        A_im = smooth(A, xwinA, ywinA)
    else:
        A_im = A
    wA, hA = cv.GetSize(A_im)
    results = {}  # {str imgpath: [(x1,y1,x2,y2,score),...]}
    for i, imgpath in enumerate(imgpaths):
        if isinstance(imgpath, str) or isinstance(imgpath, unicode):
            I = cv.LoadImage(imgpath, cv.CV_LOAD_IMAGE_GRAYSCALE)
        else:
            I = imgpath
            imgpath = i
        if do_smooth in (SMOOTH_BOTH_BRD, SMOOTH_IMG_BRD):
            I = smooth(I, xwinI, ywinI, bordertype='const', val=255)
        elif do_smooth in (SMOOTH_BOTH, SMOOTH_IMG):
            I = smooth(I, xwinI, ywinI)
        if img2flip and img2flip[imgpath]:
            cv.Flip(I, I, flipMode=-1)
        if bb != None:
            new_roi = tuple(
                map(int, (bb[0], bb[1], bb[2] - bb[0], bb[3] - bb[1])))
            cv.SetImageROI(I, new_roi)
        wI, hI = cv.GetSize(I)
        M = cv.CreateMat(hI - hA + 1, wI - wA + 1, cv.CV_32F)
        cv.MatchTemplate(I, A_im, M, cv.CV_TM_CCOEFF_NORMED)
        M_np = np.array(M)
        # 0.) Suppress previously-found matches, if any
        prevmats = prevmatches.get(imgpath, []) if prevmatches else []
        for (x1, y1, x2, y2) in prevmats:
            #print 'suppressing: {0} at {1}'.format(imgpath, (x1, y1))
            _x1 = max(0, int(x1 - max(1, (wA * DELT))))
            _y1 = max(0, int(y1 - max(1, (hA * DELT))))
            _x2 = min(M_np.shape[1], int(x1 + max(1, (wA * DELT))))
            _y2 = min(M_np.shape[0], int(y1 + max(1, (hA * DELT))))
            M_np[_y1:_y2, _x1:_x2] = -1.0
        score = np.inf
        #print 'best score:', np.max(M_np)
        num_mats = 0
        matches = []
        while score > T and num_mats < MAX_MATS:
            M_idx = np.argmax(M_np)
            i = int(M_idx / M.cols)
            j = M_idx % M.cols
            if bb != None:
                i += bb[1]
                j += bb[0]

            score = M_np[i, j]
            if score < T:
                break
            matches.append((j, i, j + wA, i + hA, score))
            # Suppression
            _x1 = max(0, int(j - max(1, (wA * DELT))))
            _y1 = max(0, int(i - max(1, (hA * DELT))))
            _x2 = min(M_np.shape[1], int(j + max(1, (wA * DELT))))
            _y2 = min(M_np.shape[0], int(i + max(1, (hA * DELT))))
            M_np[_y1:_y2, _x1:_x2] = -1.0
            #M_np[i-(hA/2):i+(hA/2),
            #     j-(wA/2):j+(wA/2)] = -1.0
            num_mats += 1
        if not matches and atleastone:
            print 'DOO DOO DOO'
            M_idx = np.argmax(M_np)
            i = int(M_idx / M.cols)
            j = M_idx % M.cols
            if bb != None:
                i += bb[1]
                j += bb[0]
            score = M_np[i, j]
            matches.append((j, i, j + wA, i + hA, score))
        results[imgpath] = matches
        if jobid and wx.App.IsMainLoopRunning():
            # Note: I don't think this actually does anything, since this
            # is living in a separate process, which can't communicate
            # to the wx App instance living in the original host process
            wx.CallAfter(Publisher().sendMessage, "signals.MyGauge.tick",
                         (jobid, ))
        if jobid and queue_mygauge != None and wx.App.IsMainLoopRunning():
            queue_mygauge.put(True)
    return results
                        bl.startX:bl.stopX].copy()
        bl.ReadFrame()
        postRf = bl.data.copy()

        locationY, locationX = numpy.mgrid[bl.startY:bl.stopY - bl.stepY,
                                           bl.startX:bl.stopX]
        motionCompRf = interp2(numpy.arange(postRf.shape[1]),
                               numpy.arange(postRf.shape[0]), postRf,
                               locationX + dpXUp, locationY + dpYUp)

        #Now compute the cross correlation between the pre frame and the motion compensated post frame
        import cv
        template = cv.fromarray(numpy.float32(motionCompRf))
        image = cv.fromarray(numpy.float32(preRf))
        resultCv = cv.fromarray(numpy.float32(numpy.zeros((1, 1))))
        cv.MatchTemplate(template, image, resultCv, cv.CV_TM_CCORR_NORMED)

        resultNp = numpy.asarray(resultCv)
        rhoRfJ[skip, 0] = float(resultNp)

        ###################
        ####Other set######
        ###################
        reader.SetFileName(RESULT_DIR + '/blockMatch/frame_' + str(frameNo) +
                           '_' + str(frameNo + skip + 1) + 'dispY.mhd')
        dpYItk = reader.Execute()
        dpY = sitk.GetArrayFromImage(dpYItk)

        reader.SetFileName(RESULT_DIR + '/blockMatch/frame_' + str(frameNo) +
                           '_' + str(frameNo + skip + 1) + 'dispX.mhd')
        dpXItk = reader.Execute()
Esempio n. 27
0
def temp_match(I, bb, imList, bbSearch=None, bbSearches=None, rszFac=0.75,
               padSearch=0.75, padPatch=0.0):
    bb = list(bb)
    if bbSearch is not None:
        bbSearch = list(bbSearch)
    if bbSearches is not None:
        bbSearches = list(bbSearches)
    matchList = []  # (filename, left,right,up,down)

    I = np.round(shared.fastResize(I, rszFac) * 255.) / 255

    bb[0] = bb[0] * rszFac
    bb[1] = bb[1] * rszFac
    bb[2] = bb[2] * rszFac
    bb[3] = bb[3] * rszFac
    [bbOut, bbOff] = shared.expand(bb[0], bb[1], bb[2], bb[3], I.shape[
                                   0], I.shape[1], padPatch)
    patchFoo = I[bbOut[0]:bbOut[1], bbOut[2]:bbOut[3]]

    patch = patchFoo[bbOff[0]:bbOff[1], bbOff[2]:bbOff[3]]

    if bbSearch is not None:
        bbSearch[0] = bbSearch[0] * rszFac
        bbSearch[1] = bbSearch[1] * rszFac
        bbSearch[2] = bbSearch[2] * rszFac
        bbSearch[3] = bbSearch[3] * rszFac

    for cur_i, imP in enumerate(imList):
        if bbSearches is not None:
            bbSearch = map(lambda c: c * rszFac, bbSearches[cur_i])
        I1 = shared.standardImread(imP, flatten=True)
        I1 = np.round(shared.fastResize(I1, rszFac) * 255.) / 255.
        # crop to region if specified
        if bbSearch is not None:
            [bbOut1, bbOff1] = shared.expand(bbSearch[0], bbSearch[1],
                                             bbSearch[2], bbSearch[3],
                                             I1.shape[0], I1.shape[1], padSearch)
            I1 = I1[bbOut1[0]:bbOut1[1], bbOut1[2]:bbOut1[3]]
        if I1.shape[0] < patch.shape[0] or I1.shape[1] < patch.shape[1]:
            w_big = max(I1.shape[0], patch.shape[0])
            h_big = max(I1.shape[1], patch.shape[1])
            I1_big = np.zeros((w_big, h_big)).astype('float32')
            I1_big[0:I1.shape[0], 0:I1.shape[1]] = I1
            I1 = I1_big

        patchCv = cv.fromarray(np.copy(patch))
        ICv = cv.fromarray(np.copy(I1))
        outCv = cv.CreateMat(abs(I1.shape[
                             0] - patch.shape[0]) + 1, abs(I1.shape[1] - patch.shape[1]) + 1, cv.CV_32F)

        cv.MatchTemplate(ICv, patchCv, outCv, cv.CV_TM_CCOEFF_NORMED)
        Iout = np.asarray(outCv)

        Iout[Iout == 1.0] = 0.995  # opencv bug

        score1 = Iout.max()  # NCC score
        YX = np.unravel_index(Iout.argmax(), Iout.shape)
        i1 = YX[0]
        i2 = YX[0] + patch.shape[0]
        j1 = YX[1]
        j2 = YX[1] + patch.shape[1]
        (err, diff, Ireg) = shared.lkSmallLarge(
            patch, I1, i1, i2, j1, j2, minArea=np.power(2, 17))
        score2 = err / diff.size  # pixel reg score
        if bbSearch is not None:
            matchList.append((imP, score1, score2, Ireg,
                              i1 + bbOut1[0], i2 + bbOut1[0],
                              j1 + bbOut1[2], j2 + bbOut1[2], rszFac))
        else:
            matchList.append((imP, score1, score2, Ireg,
                              i1, i2, j1, j2, rszFac))

    return matchList
Esempio n. 28
0
def decode_patch(original_image, original_mark, expected_bits):
    """
    Given a ES&S-style ballot, returns the LHS barcode as a bitstring 
    if one is found, along with bounding box of each digit in the barcode.
    The algorithm works by finding finding the column of timing marks on 
    the left side of the ballot and looking at the intensity of pixels
    just to the right of each of them to detect "on" or "off" bits.
    Input:
        original_image : cv image of ballot
        original_mark  : cv image of mark
        expected_bits  : number of bits expected in barcode
    Output:
        bitstring : string representation of barcode (ex: "100110...")
        locations : {bit_value: [(x1,y1,x2,y2), ...]}
    """

    # pixels for resized mark, image will be scaled down by same ratio
    resized_mark_height = 15  # will not match if lower than ~10

    # left portion of page to template match (ex: 5 means 1/5 of page)
    portion = 5  # error if more than ~6

    mark_w, mark_h = cv.GetSize(original_mark)
    scaling = float(resized_mark_height) / mark_h
    w = int(round(mark_w * scaling))
    h = int(round(mark_h * scaling))
    resized_mark = cv.CreateImage((w, h), 8, 1)
    cv.Resize(original_mark, resized_mark)

    image_W, image_H = cv.GetSize(original_image)
    cv.SetImageROI(original_image,
                   (0, 0, int(round(image_W / portion)), image_H))
    W = int(round(image_W / portion * scaling))
    H = int(round(image_H * scaling))
    resized_image = cv.CreateImage((W, H), 8, 1)
    cv.Resize(original_image, resized_image)
    width = W - w + 1
    height = H - h + 1
    match_mat = cv.CreateImage((width, height), 32, 1)
    cv.MatchTemplate(resized_image, resized_mark, match_mat,
                     cv.CV_TM_CCOEFF_NORMED)
    cv.ResetImageROI(original_image)

    # find all possible match locations
    best_column = 0
    most_matches = 0
    possible = []
    for x in range(width):
        column_matches = 0
        for y in range(height):
            if match_mat[y, x] > 0.6:
                possible.append((y, x))
                column_matches += 1
        if column_matches > most_matches:
            most_matches = column_matches
            best_column = x
    f1 = filter(lambda p: p[1] < best_column + w, possible)
    f1 = sorted(f1, key=lambda z: z[0])
    if not f1:
        return (None, None)

    # filter match locations
    last_location = f1[0]
    last_max = match_mat[f1[0][0], f1[0][1]]
    locations = []
    for p in f1[1:]:
        y, x = p
        r = match_mat[y, x]
        if y > last_location[0] + h:
            locations.append(last_location)
            last_max = r
            last_location = p
            continue
        if r > last_max:
            last_max = r
            last_location = p
    locations.append(last_location)
    locations = locations[1:-1]
    if len(locations) != expected_bits:
        return (None, None)

    # detect mark to the right of the timing marks
    y0, x0 = locations[0]
    thresh = 0
    black = 0
    white = 0
    for x in range(x0, x0 + w):
        black += resized_image[y0 + h / 2, x]
        white += resized_image[y0 - h / 2, x]
    thresh = 0.7 * white + 0.3 * black
    bitstring = ''
    bit_locations = {}
    for (y, x) in locations:
        intensity = 0
        x_start = x + (2 * w)
        x_end = x_start + w
        digit = ''
        for x_check in range(x_start, x_end):
            intensity += resized_image[y + (h / 2), x_check]
        digit = '1' if (intensity < thresh) else '0'
        bitstring += digit
        resized_locations = [x_start, y, x_end, y + h]
        total_intensity = 0
        if digit == '0':
            # check for stray marks
            for x1 in range(x_start, x_end):
                for y1 in range(y, y + h):
                    total_intensity += resized_image[y1, x1]
            if total_intensity < 255 * 0.99 * w * h:
                return (None, None)
        mark_location = tuple(
            [int(round(z / scaling)) for z in resized_locations])
        bit_locations.setdefault(digit, []).append(mark_location)
    return bitstring, bit_locations
Esempio n. 29
0
    def locate_object(self, frame):
        '''
        Finds the object in the given frame based on information from previous
        frames.

        The object location as a tuple (x,y) within the given image is
        returned.  If the object is not found, False is returned.
        Tracker.object_center will always contain the last known object
        location.
        '''

        if not self._template:
            raise RuntimeError("The Tracker class can not be used after it is "
                               "unpickled.")

        search_rect = clip_rectangle(
            (
                self.object_center[0] - self.search_size[0] / 2,  # x
                self.object_center[1] - self.search_size[1] / 2,  # y
                self.search_size[0],  # width
                self.search_size[1],  # height
            ),
            frame.width,
            frame.height)
        search_image = self._preprocess(crop(frame, search_rect))
        result = cv.CreateImage(
            (search_image.width - self._template.width + 1,
             search_image.height - self._template.height + 1),
            cv.IPL_DEPTH_32F, 1)
        cv.MatchTemplate(search_image, self._template, result,
                         self.match_method)
        min_or_max = MATCH_METHOD_MIN_OR_MAX[self.match_method]
        minmaxloc = cv.MinMaxLoc(result)
        if abs(minmaxloc[1] - minmaxloc[0]) < 0.001:
            return False
        match_in_result = minmaxloc[min_or_max]

        # Change from result image coordinates to search region coordinates to
        # image coordinates
        match_in_search_region = (
            match_in_result[0] + self._template.width / 2,
            match_in_result[1] + self._template.height / 2,
        )
        object_center = (
            match_in_search_region[0] + search_rect[0],
            match_in_search_region[1] + search_rect[1],
        )
        object_center = (
            int(in_range(0, object_center[0], frame.width - 1)),
            int(in_range(0, object_center[1], frame.height - 1)),
        )

        # Determine if the max/min is significant.
        hist = cv.CreateHist([256], cv.CV_HIST_ARRAY, [[0, 255]], 1)
        cv.CalcHist([scale_32f_image(result)], hist)
        # XXX stddevs from mean should be calculated from either 0 or 255
        #    depending on min or max
        distance = abs(libvision.hist.num_stddev_from_mean(hist, 255))
        if distance < self.min_z_score:
            object_found = False
        else:
            object_found = True
            self._update_template(search_image, match_in_search_region)
            self.object_center = object_center

        if self.debug:

            result_8bit = scale_32f_image(result)
            if object_found:
                cv.Circle(result_8bit, match_in_result, 5, (0, 255, 0))
                cv.Circle(search_image, match_in_search_region, 5, (0, 255, 0))
            hist_image = libvision.hist.histogram_image(hist)

            cv.ShowImage("match", result_8bit)
            cv.ShowImage("template", scale_32f_image(self._template))
            cv.ShowImage("search region", scale_32f_image(search_image))
            cv.ShowImage("Histogram", hist_image)

        # Update Template
        if object_found:
            return self.object_center
        else:
            return False
Esempio n. 30
0
def score(card, known, method):
	r = cv.CreateMat(1, 1, cv.CV_32FC1)
	cv.MatchTemplate(card, known, r, method)
	return r[0,0]