示例#1
0
def evaluate_(resS, gt, precision, recall, tpr, fpr):
    gtFM = gt
    gtFM = cv2.compare(gtFM, 128, cv2.CMP_GT)
    gtBM = cv2.bitwise_not(gtFM)
    gtF = np.sum(gtFM)
    gtB = resS.shape[0] * resS.shape[1] * 255 - gtF
    mae = 0.
    for i in range(NUMBER_THRESHOLD):
        resM = np.zeros(resS.shape)
        tpM = np.zeros(resS.shape)
        fpM = np.zeros(resS.shape)
        resM = cv2.compare(resS, i, cv2.CMP_GT)
        tpM = cv2.bitwise_and(resM, gtFM)
        fpM = cv2.bitwise_and(resM, gtBM)
        tp = np.sum(tpM)
        fp = np.sum(fpM)
        recall[i] += tp / (gtF + EPS)
        total = EPS + tp + fp
        precision[i] += (tp + EPS) / total
        tpr[i] += (tp + EPS) / (gtF + EPS)
        fpr[i] += (fp + EPS) / (gtB + EPS)
    np.divide(gtFM, 255.0)
    np.divide(resS, 255.0)
    resS = cv2.absdiff(gtFM, resS)
    mae += np.sum(resS) / (gtFM.shape[0] * gtFM.shape[1])
    print(mae)
    return mae
示例#2
0
def cv2_peak_local_max(img, threshold_relative, threshold_abs):

    #max_val = img.max()
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(img)
    th = max(max_val * threshold_relative, threshold_abs)

    _, mm = cv2.threshold(img, th, max_val, cv2.THRESH_TOZERO)

    #max filter
    kernel = np.ones((3, 3))
    mm_d = cv2.dilate(mm, kernel)
    loc_maxima = cv2.compare(mm, mm_d, cv2.CMP_GE)

    mm_e = cv2.erode(mm, kernel)
    non_plateau = cv2.compare(mm, mm_e, cv2.CMP_GT)
    loc_maxima = cv2.bitwise_and(loc_maxima, non_plateau)

    _, coords, _ = cv2.findContours(loc_maxima, cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_NONE)
    coords = np.array([x.squeeze()[::-1] for cc in coords for x in cc])
    coords = np.array(coords)
    #coords = np.array(np.where(loc_maxima>0)).T
    #the code above is faster than  coords = np.array(np.where(loc_maxima>0)).T

    return coords
示例#3
0
def hist_compare(image_1, image_2):
    hist1 = create_rgb_hist(image_1)
    hist2 = create_rgb_hist(image_2)
    match1 = cv2.compare(hist1, hist2, cv2.HISTCMP_BHATTACHARYYA)  #越小越相似
    match2 = cv2.compare(hist1, hist2, cv2.HISTCMP_CORREL)  #越大越相似
    match3 = cv2.compare(hist1, hist2, cv2.HISTCMP_CHISQR)  #越小越相似
    print("巴氏距离: %s, 相关性: %s, 卡方: %s" % (match1, match2, match3))
示例#4
0
def peer_cost(img):
    b,g,r = cv2.split(img)

    ret, m1 = cv2.threshold(r, 95, 255, cv2.THRESH_BINARY)
    ret, m2 = cv2.threshold(g, 40, 255, cv2.THRESH_BINARY)
    ret, m3 = cv2.threshold(b, 20, 255, cv2.THRESH_BINARY)

    mmax = cv2.max(r, cv2.max(g, b))
    mmin = 255
    for tmp in b[0]:
        if (tmp != 0 and tmp < mmin):
            mmin = tmp
    for tmp in g[0]:
        if (tmp != 0 and tmp < mmin):
            mmin = tmp
    for tmp in r[0]:
        if (tmp != 0 and tmp < mmin):
            mmin = tmp
    print mmin
    ret, m4 = cv2.threshold(mmax - mmin, 15, 255, cv2.THRESH_BINARY)
    ret, m5 = cv2.threshold(cv2.absdiff(r, g), 15, 255, cv2.THRESH_BINARY)
    m6 = cv2.compare(r, g, cv2.CMP_GT)
    m7 = cv2.compare(r, b, cv2.CMP_GE)

    mask = m1 & m2 & m3 & m4 & m5 & m6 & m7

    return mask
示例#5
0
    def labfilter(self, image):
        '''
                This is a filter based on a method proposed in "Fast and Efficient Method for Fire Detection
                Using Image Processing" by Turgay Celik

                This method uses the CIE L*a*b* color space and performs 4 bitwise filters
                The method returns true at any pixel that satisfies:
                L* > Lm* (mean of L* values)
                a* > am* (mean of a* values)
                b* > bm* (mean of b* values)
                b* > a*
                '''
        cieLab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
        L, a, b = cv2.split(cieLab)
        Lm = cv2.mean(L)
        am = cv2.mean(a)
        bm = cv2.mean(b)
        R1 = cv2.compare(L, Lm, cv2.CMP_GT)
        R2 = cv2.compare(a, am, cv2.CMP_GT)
        R3 = cv2.compare(b, bm, cv2.CMP_GT)
        R4 = cv2.compare(b, a, cv2.CMP_GT)
        R12 = cv2.bitwise_and(R1, R2)
        R34 = cv2.bitwise_and(R3, R4)

        return cv2.bitwise_and(R12, R34)
示例#6
0
def peer(img):
    b, g, r = cv2.split(img)
    ret, m1 = cv2.threshold(r, 95, 255, cv2.THRESH_BINARY)
    ret, m2 = cv2.threshold(g, 30, 255, cv2.THRESH_BINARY)
    ret, m3 = cv2.threshold(b, 20, 255, cv2.THRESH_BINARY)
    mmax = cv2.max(r, cv2.max(g, b))
    mmin = cv2.min(r, cv2.min(g, b))

    ret, m4 = cv2.threshold(mmax - mmin, 15, 255, cv2.THRESH_BINARY)
    ret, m5 = cv2.threshold(cv2.absdiff(r, g), 15, 255, cv2.THRESH_BINARY)
    m6 = cv2.compare(r, g, cv2.CMP_GE)
    m7 = cv2.compare(r, b, cv2.CMP_GE)
    mask = m1 & m2 & m3 & m6 & m4 & m5 & m7
    cv2.imshow("b", b)
    cv2.imshow("g", g)
    cv2.imshow("r", r)
    cv2.imshow('r_thre', m1)
    cv2.imshow('g_thre',m2)
    cv2.imshow('b_thre',m3)
    cv2.imshow('max-min',m4)
    cv2.imshow('absdiff',m5)
    cv2.imshow('r_g',m6)
    cv2.imshow('r_b',m7)
    cv2.imshow('res',mask)
    return mask
示例#7
0
 def rgbfilter_white(self, image,image_bg):
     rd = 2
     rd2 = 100
     diff = cv2.subtract(cv2.cvtColor(image,cv2.COLOR_BGR2GRAY),image_bg)
     res = cv2.compare(diff,rd,cv2.CMP_GT)
     res1 = cv2.compare(diff,rd2,cv2.CMP_LT)
     return cv2.bitwise_and(res,res1)
示例#8
0
def getLocalMaxima(image, minDistance, seType=None):
    """
	Returns local maxima in image.
	Based on http://stackoverflow.com/questions/5550290/find-local-maxima-in-grayscale-image-using-opencv/21023493#21023493

	The image will be dilated with structure element.

	:param image:
	:type image: np.ndarray
	:param minDistance: size of the structure element
	:type minDistance: int
	:param seType: structure element type i.e. cv2.MORPH_RECT
	:type seType: int

	:return: image with local maximas
	:rtype: np.ndarray
	"""
    size = 2 * minDistance + 1
    if seType is None:
        seType = cv2.MORPH_RECT
    kernel = cv2.getStructuringElement(seType, (size, size))
    dil = cv2.dilate(image, kernel=kernel)
    # no changes in
    mask = cv2.compare(image, dil, cmpop=cv2.CMP_EQ)
    localMaxiImage = cv2.compare(image, 0, cmpop=cv2.CMP_GT)
    localMaxiImage[mask == 0] = 0

    # Remove the image borders
    localMaxiImage[:minDistance] = 0
    localMaxiImage[-minDistance:] = 0
    localMaxiImage[:, :minDistance] = 0
    localMaxiImage[:, -minDistance:] = 0
    return localMaxiImage
示例#9
0
def depthBackgroundSubtract(depth, staticMap=None):
	if not staticMap:
		staticMap = cv2.imread("staticmapCV2.png", cv2.CV_LOAD_IMAGE_UNCHANGED)
	nodepthmask = cv2.compare(depth, np.array(10), cv2.CMP_LE)
	depth[nodepthmask != 0] = 10000

	return cv2.compare(depth, staticMap, cv2.CMP_LT)
示例#10
0
def depthBackgroundSubtract(depth, depth_diff_mask, staticMap=None):
    if not staticMap:
        staticMap = cv2.imread("staticmapCV2.png", cv2.CV_LOAD_IMAGE_UNCHANGED)
    np.zeros(depth.shape, dtype="uint16")
    nodepthmask = cv2.compare(depth, np.array(10), cv2.CMP_LE)
    depth[nodepthmask != 0] = 10000

    depth_diff_mask = cv2.compare(depth, staticMap, cv2.CMP_LT)
示例#11
0
    def feed(self, image):
        if self.backImg == None:
            self.backImg = image.copy()
            self.prevImg = image.copy()
            self.height, self.width = image.shape
            self.bestRun = np.ones((self.height,self.width), np.uint8)
            self.currentRun = np.ones((self.height,self.width), np.uint8)
            self.minFixedPixel = int(image.size*self.PERCENTAGE)
            #print self.minFixedPixel
            if self.showBackImage:
                self.createTrackbars()
                cv2.imshow("backImage", self.backImg)
            return self.backImg

        self.checkSettings()
        
        diffImage = cv2.absdiff(self.prevImg,image)
        ret, threshold1 = cv2.threshold(diffImage, self.THRESHOLD, 1, cv2.THRESH_BINARY_INV)
        ret, threshold255 = cv2.threshold(diffImage, self.THRESHOLD, 255, cv2.THRESH_BINARY_INV)
        
        nonZero = cv2.countNonZero(threshold1)
        nonZeroRatio = nonZero / float(image.size)
        perfection = self.PERFECTION
        if nonZeroRatio < self.PERCENTAGE:
            perfection = 5
            print perfection
        
        nonChanged = cv2.bitwise_and(self.currentRun, threshold255)
        self.currentRun = cv2.add(threshold1,nonChanged)

        newBestsMask = cv2.compare(self.currentRun, self.bestRun, cv2.CMP_GE)
        oldBestsMask = cv2.compare(self.currentRun, self.bestRun, cv2.CMP_LT)

        newBestRuns = cv2.bitwise_and(self.currentRun, self.currentRun, mask = newBestsMask)
        oldBestRuns = cv2.bitwise_and(self.bestRun, self.bestRun, mask = oldBestsMask)
        self.bestRun = cv2.add(newBestRuns, oldBestRuns)

        newBackImgPoints = cv2.bitwise_and(image, image,mask = newBestsMask)
        oldBackImgPoints = cv2.bitwise_and(self.backImg, self.backImg, mask = oldBestsMask)
        self.backImg = cv2.add(newBackImgPoints, oldBackImgPoints)

        stablePoints = cv2.compare(self.bestRun, perfection, cv2.CMP_GT)
        unstablePoints = cv2.bitwise_not(stablePoints)
        stablePoints = cv2.bitwise_and(stablePoints, perfection)
        unstablePoints = cv2.bitwise_and(unstablePoints, self.bestRun)
        self.bestRun = cv2.add(stablePoints, unstablePoints)
        
        self.nonZeroPoints = cv2.countNonZero(stablePoints)
        self.prevImg = image.copy()

        if self.showBackImage:
            cv2.imshow("backImage", self.backImg)
            
        return self.backImg
示例#12
0
def midThreshold(img, minValue, maxValue, value):
    # anything above minValue and below maxValue becomes value
    # anything outside of min/maxValue maintains its value
    gt = cv2.compare(img, minValue, cv2.CMP_GE)
    lt = cv2.compare(img, maxValue, cv2.CMP_LE)
    between = cv2.bitwise_and(gt, lt)
    newImg = np.zeros(img.shape, dtype=np.uint8)
    newImg += img
    newImg -= img * (between / 255)
    newImg += value * (between / 255)
    return newImg
示例#13
0
def midThreshold(img, minValue, maxValue, value):
	# anything above minValue and below maxValue becomes value
	# anything outside of min/maxValue maintains its value
	gt = cv2.compare(img, minValue, cv2.CMP_GE)
	lt = cv2.compare(img, maxValue, cv2.CMP_LE)
	between = cv2.bitwise_and(gt, lt)
	newImg = np.zeros(img.shape, dtype=np.uint8)
	newImg += img
	newImg -= img*(between/255)
	newImg += value*(between/255)
	return newImg
示例#14
0
def peer2(img):
    b, g, r = cv2.split(img)

    ret, m1 = cv2.threshold(r, 220, 255, cv2.THRESH_BINARY)
    ret, m2 = cv2.threshold(g, 210, 255, cv2.THRESH_BINARY)
    ret, m3 = cv2.threshold(b, 170, 255, cv2.THRESH_BINARY)
    ret, m4 = cv2.threshold(cv2.absdiff(r, g), 15, 255, cv2.THRESH_BINARY)
    m5 = cv2.compare(r, g, cv2.CMP_GT)
    m6 = cv2.compare(r, b, cv2.CMP_GE)

    mask = m1 & m2 & m3 & m4 & m5 & m6

    return mask
def findIou(refMask, detMask):

        diff = cv2.compare(refMask,detMask,cv2.CMP_NE)
        detCopy = detMask.copy()
        detCopy[np.where(detCopy == [0])] = [128]

        intersection = cv2.compare(refMask,detCopy,cv2.CMP_EQ)

        numInterSection = cv2.countNonZero(intersection) 
        numDiff = cv2.countNonZero(diff)

        iou = float(numInterSection)/float(numInterSection+numDiff)
        return iou
示例#16
0
    def normalizeImage(self, new_size):
        # create a resized version of the image
        # with a predefined size

        # first, generate a squared version of original
        #...check longest to keep aspect ratio....
        longest = max(self.img.shape[0], self.img.shape[1])

        #...offset....
        offset_y = int((longest - self.img.shape[0]) / 2.0)
        offset_x = int((longest - self.img.shape[1]) / 2.0)

        #...check if padding is required...
        if longest < ConnectedComponent.MinScalingSize:
            padding = int(
                math.ceil((ConnectedComponent.MinScalingSize - longest) / 2.0))
        else:
            padding = 0

        start_y = offset_y + padding
        start_x = offset_x + padding

        #...the final matrix...
        squared = np.zeros((longest + padding * 2, longest + padding * 2))

        # generate the squared version ...
        squared[start_y:(start_y + self.img.shape[0]),
                start_x:(start_x + self.img.shape[1])] = self.img

        # now generate the scaled version
        #scaled = np.zeros((new_size, new_size))
        scaled = cv2.resize(squared, (new_size, new_size))

        self.normalized = cv2.compare(scaled, 128, cv2.CMP_GT)
示例#17
0
 def find_max_blob(self, blobs):
     """
     :param blobs:
     :return: 面積が最大のblobを求めるかんせうう
     """
     i = 1
     b = cv2.compare(blobs, i, cv2.CMP_EQ)
     x, y, w, h = cv2.boundingRect(b)
     max_area = w * h
     for j in range(2, len(blobs)):
         b = cv2.compare(blobs, j, cv2.CMP_EQ)
         x, y, w, h = cv2.boundingRect(b)
         if max_area < w * h:
             i = j
             max_area = w * h
     return cv2.compare(blobs, i, cv2.CMP_EQ)
示例#18
0
def peer(img):
    b, g, r = cv2.split(img)

    ret, m1 = cv2.threshold(r, 95, 255, cv2.THRESH_BINARY)
    ret, m2 = cv2.threshold(g, 40, 255, cv2.THRESH_BINARY)
    ret, m3 = cv2.threshold(b, 20, 255, cv2.THRESH_BINARY)
    mmax = cv2.max(r, cv2.max(g, b))
    mmin = cv2.min(r, cv2.min(g, b))
    ret, m4 = cv2.threshold(mmax - mmin, 15, 255, cv2.THRESH_BINARY)
    ret, m5 = cv2.threshold(cv2.absdiff(r, g), 15, 255, cv2.THRESH_BINARY)
    m6 = cv2.compare(r, g, cv2.CMP_GT)
    m7 = cv2.compare(r, b, cv2.CMP_GE)

    mask = m1 & m2 & m3 & m4 & m5 & m6 & m7

    return mask
示例#19
0
def segmentAndMask(rgb, depth, mask, rgb_final, depth_final, thresh, staticMap = None):
    if not staticMap:
        staticMap = cv2.imread("staticmapCV2.png", cv2.CV_LOAD_IMAGE_UNCHANGED)

    rgb_final = rgb.copy()
    depth_final = depth.copy()

    # only keep foreground
    foregroundMask = np.zeros(depth.shape, dtype="uint8")
    depthBackgroundSubtract(depth, foregroundMask)

    backgroundMask = np.zeros(depth.shape, dtype="uint8")

    cv2.bitwise_not(foregroundMask, backgroundMask)
    rgb_final[backgroundMask != 0] = 0
    depth_final[backgroundMask != 0] = 0
    
    # mask out the pixels with no depth
    noDepthMask = cv2.compare(depth, np.array(0), cv2.CMP_LE)
    rgb_final[noDepthMask != 0] = 0
    depth_final[noDepthMask != 0] = 0

    # mask out pixels specified in mask
    rgb_final[mask != 0] = 0
    depth_final[mask != 0] = 0
示例#20
0
    def run(self, img):

        #Оцениваем фон
        neg_msk, diff = self._run(img)

        # Фильтруем
        # Для переделки в "неколько градиентов"
        # достаточно вызвать self._compute_blured
        # с несколькими размерами ядра и
        # объединить n, например, - "по или"
        neg, d = self._compute_blured(img, self.ng_ksz)

        # Т.к. шумы оцениваются "не совсем сверху",
        # считаем плотность "шумовых" областей движения.
        pos = cv2.compare(neg, 0, cv2.CMP_EQ)
        pos = cv2.bitwise_and(pos, 1)
        # Единицы там, где может быть "шумовая" составляющая
        ns = cv2.bitwise_and(neg_msk, pos)
        nsden = np.sum(ns).astype('float') / np.sum(pos)

        #Вспышка - резкий рост плотности
        flash = self.flash.run(nsden) or self.ifrm.run(nsden)

        #Обновление служебной информации
        print "Dencity change:", nsden / self.nsden_z
        self.nsden_z = nsden

        return neg, flash, d
示例#21
0
def morphology(img):
    # inverts the image to execute easier operations (sum and subtraction)
    a, img = cv2.threshold(img, 100, 255, cv2.THRESH_BINARY_INV)
    # generates 3 by 3 cross kernel
    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    # iteration counter
    iteration = 0
    print("Starting fast thin...")
    while 1:
        iteration += 1
        print("Running iteration", iteration,
              "of morphology")  # "Running iteration x"
        #erosion
        last_img = img.copy()
        ero = cv2.erode(img, kernel, iterations=1)
        #dilation
        dil = cv2.dilate(ero, kernel, iterations=1)
        # result = original - dilated + eroded
        img -= dil
        img += ero
        # ends loop if result is the same from last iteration
        if cv2.compare(img, last_img, cv2.CMP_EQ).all():
            break
    a, img = cv2.threshold(img, 100, 255,
                           cv2.THRESH_BINARY_INV)  # inverts back the image
    return img
示例#22
0
def plot_intercontour_hist(image, outer_contour_id, contours, graph, normalized=True):
    outer_contour = contours[outer_contour_id]
    (x, y, width, height) = cv2.boundingRect(outer_contour)
    subimage = get_subimage(image, (x, y), (x + width, y + height))
    monochrome = cv2.cvtColor(subimage, cv2.COLOR_BGR2GRAY)
    inverted_mask = cv2.compare(monochrome, monochrome, cv2.CMP_EQ)
    inner_contours = [contours[int(contour_id)] for contour_id in graph.successors(outer_contour_id)]
    for i in range(width):
        for j in range(height):
            point = (x + i, y + j)
            outer_contour_test = cv2.pointPolygonTest(outer_contour, point, 0)
            inner_contours_test = -1
            for inner_contour in inner_contours:
                inner_contour_test = cv2.pointPolygonTest(inner_contour, point, 0)
                if inner_contour_test > 0:
                    inner_contours_test = 1
                    break
            if outer_contour_test >= 0 and inner_contours_test < 0:
                inverted_mask[j][i] = 0
    mask = cv2.bitwise_not(inverted_mask)
    cv.Set(cv.fromarray(subimage), WHITE, cv.fromarray(inverted_mask))
    inner_contour_id = len(str(inner_contours))
    print 'inner contour id: ', inner_contour_id
    image_name = '%d-%s'%(outer_contour_id, inner_contour_id)
    #cv2.imshow(image_name, subimage)
    #subhists = plot_hist(subimage, mask, image_name)
    (subhists, winnames) = plot_hist_hls(subimage, mask, image_name, normalized)
    return subhists, subimage, mask, x, y, winnames
示例#23
0
def morphology(img):
  
  # Menentukan nilai treshhold
  a, img = cv2.threshold(img,100,255,cv2.THRESH_BINARY_INV) 
  
  # Mengubah bentuk dan ukuran kernel
  kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))

  # iteration 
  iteration = 0
  print ("Starting fast thin...")
  while 1:
    iteration += 1
    print ("Running iteration",iteration,"of morphology")
    
    #erosion
    last_img = img.copy()
    ero = cv2.erode(img,kernel,iterations = 1)
    
    #dilation
    dil = cv2.dilate(ero,kernel,iterations = 1)
    
    # result
    img -= dil
    img += ero
    
    # Mengakhiri loop jika hasilnya sama dari iterasi terakhir
    if cv2.compare(img, last_img, cv2.CMP_EQ).all():
      break

  a, img = cv2.threshold(img,100,255,cv2.THRESH_BINARY_INV) 
  return img
示例#24
0
 def laplaceEdge(self, dimg, threshold):
   edge = None
   if dimg is not None:
     blur = cv2.GaussianBlur(dimg, (5,5), sigmaX=0)
     laplacian = np.abs(cv2.Laplacian(blur, cv2.CV_32F, ksize=3))
     edge = cv2.compare(laplacian, threshold, cv2.CMP_GT)
   return edge
def get_foreground(img):
    """Divide the foreground && background of the given image"""
    h, w = img.shape[:2]
    rectangle = (1, 1, w, h)
    result = np.zeros(shape=(h, w), dtype=np.uint8)
    bgModel = np.zeros((1, 13 * 5))
    fgModel = np.zeros((1, 13 * 5))
    cv.grabCut(img, result, rectangle, bgModel, fgModel, 5, cv.GC_INIT_WITH_RECT)
    #cv.imshow("test", result)
    #cv.waitKey(100)
    temp1 = np.array(cv.GC_PR_FGD, dtype=np.uint8)
    result2 = cv.compare(result, temp1, cv.CMP_EQ)
    cv.imshow("test", result2)
    cv.waitKey(2000)
    #print result2
    x, y = img.shape[:2]
    background = np.ones(shape=(x, y, 3), dtype=np.uint8)
    #background *= 255
    foreground = np.ones(shape=(x, y, 3), dtype=np.uint8)
    #foreground *= 255
    for i in range(0, x):
        for j in range(0, y):
            if (result2[i, j].all() == 0):
                background[i, j] = img[i, j]
            else:
                foreground[i, j] = img[i, j]
    #print background
    cv.imshow("test", foreground)
    cv.waitKey(2000)
    cv.imshow("test1", background)
    cv.waitKey(2000)
    cv.imwrite("prueba.png", foreground)
    return foreground
示例#26
0
def segmentation2contours(seg_mask, kernel_size = 5, chain_approx = False):
    chain_approx = cv2.CHAIN_APPROX_SIMPLE  if chain_approx else cv2.CHAIN_APPROX_NONE
    
    n_labs, seg_mask, stats, centroids = cv2.connectedComponentsWithStats(seg_mask.astype(np.uint8), 4)
        
    kernel = cv2.getStructuringElement( cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
    
    mm = kernel_size // 2 #offset
    
    cell_contours = []
    for ii, (top, left, height, width, _) in enumerate(stats):
        if ii == 0:
            continue
        
        yl, yr = max(0, left - mm), min(seg_mask.shape[0], left + width + mm) 
        xl, xr = max(0, top - mm), min(seg_mask.shape[1], top + height + mm) 
        
        crop = cv2.compare(seg_mask[yl:yr, xl:xr], ii, cv2.CMP_EQ)
        crop = cv2.dilate(crop, kernel)
        cnt = cv2.findContours(crop, cv2.RETR_EXTERNAL, chain_approx)[-2]
        assert len(cnt) == 1
        
        cnt = cnt[0].squeeze(1) + np.array((xl, yl))[None]
        
        cell_contours.append(cnt)
    
    return cell_contours
def getROI( img, Nblur, Nsobel, Bsepare ):
    if len(img.shape)>2:
        img_gray = rgb2gray( img, );
    else:
        img_gray = img;
    IM_WIDTH  = img_gray.shape[1]
    IM_HEIGHT = img_gray.shape[0]
    ########################################################################
    numel = IM_WIDTH * IM_HEIGHT;
    NordB = Nblur;
    NordS = Nsobel;
    if NordB>0:
        img_gray = cv2.blur(img_gray,(NordB,NordB))
    sobelx = cv2.Sobel(img_gray,cv2.CV_64F,1,0,ksize=NordS)
    sobely = cv2.Sobel(img_gray,cv2.CV_64F,0,1,ksize=NordS)
    sobel2 = sobelx * sobelx + sobely * sobely
    sobel = cv2.compare( sobel2, int((8*np.sum(sobel2)/(numel))), cv2.CMP_GT )
    sobel[0:IM_HEIGHT, 0:NordS] = 0
    sobel[0:IM_HEIGHT, IM_WIDTH-NordS:IM_WIDTH] = 0
    sobel[0:NordS,0:IM_WIDTH] = 0
    sobel[IM_HEIGHT-NordS:IM_HEIGHT,0:IM_WIDTH] = 0
    sobelf = cv2.bitwise_and( imfill( sobel, 8 ), cv2.bitwise_not(sobel) );
    #
    sobelf = imclose( sobelf, 'square', NordS )
    #
    if Bsepare:
        mxv = int(round( np.max(cv2.distanceTransform( sobelf, cv2.DIST_L2, 3 ))/2 ))
        sobelf = imopen( sobelf, 'square', int(mxv) );
    else:
        sobelf = imopen( sobelf, 'square', int(15) )
    #
    # cv2.imshow( "Color", (sobel) );cv2.waitKey()
    #
    # sobelf = imfill( sobelf, 8 );
    return sobelf
示例#28
0
def corner_detection():
	image_filepath = '../../../data/machine_vision/build.png'

	img = cv.imread(image_filepath, cv.IMREAD_GRAYSCALE)
	if img is None:
		print('Failed to load an image, {}.'.format(image_filepath))
		return

	dst_32f = cv.cornerHarris(img, blockSize=3, ksize=3, k=0.01)

	dilated = cv.dilate(dst_32f, kernel=None, anchor=None, iterations=1)
	localMax = cv.compare(dst_32f, dilated, cv.CMP_EQ)  # Binary image.

	minv, maxv, minl, maxl = cv.minMaxLoc(dst_32f)
	_, dst_32f = cv.threshold(dst_32f, 0.01 * maxv, 255, cv.THRESH_BINARY)

	cornerMap = (dst_32f * 255).astype(np.int32)
	cornerMap[localMax == 0] = 0  # Deletes all modified pixels.

	centers = np.where(cornerMap > 0)
	#centers = np.vstack(centers.tolist()).T
	centers = list(zip(centers[1].tolist(), centers[0].tolist()))

	for center in centers:
		cv.circle(img, center, 3, (255, 255, 255), 2, cv.LINE_AA)

	cv.imshow('Image', img)
	cv.imshow('CornerHarris Result', dst_32f)
	cv.imshow('Unique Points after Dilatation/CMP/And', cornerMap)
	cv.waitKey(0)

	cv.destroyAllWindows()
示例#29
0
def estimate_perspective_ii(level_map, img2, img3, operator_position):
    """用可部署地块选出从地图坐标到通常视角的透视矩阵。

    同名参数含义与estimate_perspective函数一致,返回值也一致,但是透视矩阵是三维的。
    """
    height, width = img2.shape[:2]
    mask = cv2.compare(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY),
                       cv2.cvtColor(img3, cv2.COLOR_BGR2GRAY), cv2.CMP_NE)
    perspectives = [
        generate_perspective((width, height), level_map, view, True)
        for view in range(4)
    ]
    # 寻找最匹配的一种视角。
    view = np.argmax([
        # 峰值信噪比是一种评价图像质量的客观标准……
        cv2.PSNR(
            mask,
            cv2.inRange(
                generate_bullet_time_buildable_mask(
                    (width, height), level_map, perspective,
                    operator_position), 16, 255))
        for perspective in perspectives
    ])
    return generate_perspective((width, height), level_map, view,
                                False), perspectives[view]
示例#30
0
    def ycrcbfilter(self, image):
        ycrcb = cv2.cvtColor(image, cv2.COLOR_BGR2YCR_CB)
        Y, Cr, Cb = cv2.split(ycrcb)
        ycb = cv2.compare(Y, Cb, cv2.CMP_GT)
        crcb = cv2.compare(Cr, Cb, cv2.CMP_GT)
        A1 = cv2.bitwise_and(ycb, crcb)
        ym = cv2.mean(Y)
        yym = cv2.compare(Y, ym, cv2.CMP_GT)
        A2 = cv2.bitwise_and(A1, yym)
        crm = cv2.mean(Cr)
        ccrm = cv2.compare(Cr, crm, cv2.CMP_GT)
        A3 = cv2.bitwise_and(A2, ccrm)
        cbm = cv2.mean(Cb)
        cbcbm = cv2.compare(Cb, cbm, cv2.CMP_LT)

        return cv2.bitwise_and(A3, cbcbm)
示例#31
0
def start():
    freeze = False
    counter = 0
    while True:
        try:
            t1 = time.time()
            frames = pipeline.wait_for_frames()
            aligned_frames = align.process(frames)
            aligned_depth_frame = aligned_frames.get_depth_frame()
            aligned_depth_frame = depth_filter(aligned_depth_frame)
            color_frame = aligned_frames.get_color_frame()
            color_image = np.asanyarray(color_frame.get_data())

            # Apply flood fill
            points = pc.calculate(aligned_depth_frame)
            vtx = np.ndarray(buffer=points.get_vertices(),
                             dtype=np.float32,
                             shape=(d1 * d2, 3))
            o3d_pointcloud.points = o3d.utility.Vector3dVector(vtx)
            if counter == 0:
                vis.create_window(window_name="Open3d", width=400, height=400)
                vis.add_geometry(o3d_pointcloud)
            else:
                vis.update_geometry(o3d_pointcloud)
                vis.poll_events()
                vis.update_renderer()

            x = vtx[f1, :] - vtx[f2, :]
            y = vtx[f3, :] - vtx[f4, :]
            xyz_norm = normalize_v3(np.cross(x, y))
            norm_flat = xyz_norm @ norm
            norm_fill = norm_flat  # norm_fill[rand_idx] = norm_flat
            norm_matrix = np.abs(norm_fill.reshape((d1, d2)))

            norm_umatrix = cv2.resize(cv2.UMat(norm_matrix), (d2 * 4, d1 * 4))
            bool_matrix = cv2.compare(norm_umatrix, 0.95, cmpop=cv2.CMP_GT)
            comps, out, stats, cents = getConnects(bool_matrix, connectivity=4)
            sizes = stats.get()[:, 2]  # get the area sizes
            max_label = np.argmax(sizes[2:comps]) + 2
            color_image[out.get() == max_label] = 255
            t2 = time.time()

            text = f'FPS: {1 / (t2 - t1)}'
            font = cv2.FONT_HERSHEY_SIMPLEX
            color = (255, 0, 0)
            place = (50, 50)
            thicc = 2

            color_image = cv2.putText(color_image, text, place, font, 1, color,
                                      thicc, cv2.LINE_AA)
            cv2.imshow('Color', color_image)

            if cv2.waitKey(100) & 0xFF == ord('q'):
                break
            counter += 1
        except:
            pass
    cv2.destroyAllWindows()
    pipeline.stop()
示例#32
0
def simularity(image, target):
    img = cv2.imread(image, 0)
    tar = cv2.imread(target, 0)
    img = get_two(img)
    tar = get_two(tar)
    ret = cv2.compare(img, tar, cv2.CMP_EQ)
    ret = np.sum(ret)
    return ret
示例#33
0
def p_hash(img):
    """OpenCV pHash的Python再实现。"""
    dct = cv2.dct(
        cv2.cvtColor(
            cv2.resize(img, (32, 32), interpolation=cv2.INTER_LINEAR_EXACT),
            cv2.COLOR_BGR2GRAY).astype(np.float32))[:8, :8]
    dct[0, 0] = 0
    return cv2.compare(dct, float(np.mean(dct)), cv2.CMP_GT) // 255
示例#34
0
def stencil_cull(segmentations, stencil, cls=StenCode.car, w=1920, h=1080):
    mask = np.full((h, w), 0x7, dtype=np.uint8)
    stencil_unmask = np.bitwise_and(stencil, mask)
    thresh = cv2.compare(stencil_unmask, cls.value, cv2.CMP_EQ)
    thresh = (thresh // 255).astype(np.uint32)
    # person_thresh = cv2.compare(stencil_unmask, StenCode.person.value, cv2.CMP_EQ)
    # person_thresh = (person_thresh // 255).astype(np.uint32)
    return segmentations * thresh
示例#35
0
def isMovement(img, threshold = 100):
    black = np.zeros((img.shape), np.uint8)
    out = cv2.compare(img,black,cv2.CMP_NE)
    diff = cv2.countNonZero(img)

    if diff < threshold:
        return False
    else:
        return True
示例#36
0
    def edgeBasedRegionBinarization(image):
        #works with gray scale image
        if len(image.shape) == 3:
            gray_scale = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        else:
            gray_scale = image

        rows = gray_scale.shape[0]
        cols = gray_scale.shape[1]

        #get Contrast-Limited Adaptive Histogram Equalization
        tiles_y = int(rows / 20)  #20
        tiles_x = int(cols / 20)  #20

        equalized = AdaptiveEqualizer.adapthisteq(gray_scale, 0.04, tiles_x,
                                                  tiles_y)

        #now apply Canny edge detection....
        edges = cv2.Canny(gray_scale, 10, 50, apertureSize=3)

        #dilate the edges...
        #....create structuring element...
        strel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
        #....now dilate edges...
        dilate = cv2.dilate(edges, strel)

        #now invert...
        board = cv2.bitwise_not(dilate)

        #now label connected components...
        labels, count_labels = scipy.ndimage.measurements.label(board)

        #get the sizes of the connected components...
        sizes = scipy.ndimage.measurements.sum(board, labels,
                                               range(count_labels + 1))

        #filter all connected components below certain threshold...
        percent = 0.05
        remove_size = (sizes / 255.0) < (rows * cols * percent)
        remove_pix = remove_size[labels]
        labels[remove_pix] = 0

        #it has to be above certain percentage to be background
        only_board = cv2.compare(labels, 0, cv2.CMP_GT)

        #...dilate...
        only_board = cv2.dilate(only_board, strel)

        #print("Min " + str(equalized.min()))
        #print("Min " + str(equalized.max()))
        #cv2.imshow("test", equalized / 255.0)
        #cv2.waitKey(0)

        #final_content = np.zeros( equalized.shape )
        final_content = Binarizer.threshold_content(equalized, only_board, 128)

        return final_content
示例#37
0
    def rgbfilter_gray(self, image, rgbthreshold):
        b,g,r = cv2.split(image)
        rd = rgbthreshold
        min1 = cv2.min(b,g)
        min1 = cv2.min(min1,r)
        max1 = cv2.max(b,g)
        max1 = cv2.max(max1,r)

        diff = cv2.absdiff(max1,min1)
        res = cv2.compare(diff,rd,cv2.CMP_LT)
        return res
示例#38
0
def segmentAndMask(depth):

    depth_final = depth.copy()

    # only keep foreground
    foregroundMask = depthBackgroundSubtract(depth)

    backgroundMask = np.zeros(depth.shape, dtype="uint8")
    backgroundMask = cv2.bitwise_not(foregroundMask)

    depth_final[backgroundMask != 0] = 0

    # mask out the pixels with no depth
    noDepthMask = cv2.compare(depth, np.array(0), cv2.CMP_LE)
    depth_final[noDepthMask != 0] = 0

    return depth_final
示例#39
0
def plot_inner_hist(image, outer_contour_id, contours):
    outer_contour = contours[outer_contour_id]
    (x, y, width, height) = cv2.boundingRect(outer_contour)
    subimage = get_subimage(image, (x, y), (x + width, y + height))
    monochrome = cv2.cvtColor(subimage, cv2.COLOR_BGR2GRAY)
    inverted_mask = cv2.compare(monochrome, monochrome, cv2.CMP_EQ)
    for i in range(width):
        for j in range(height):
            point = (x + i, y + j)
            outer_contour_test = cv2.pointPolygonTest(outer_contour, point, 0)
            if outer_contour_test > 0:
                inverted_mask[j][i] = 0
    mask = cv2.bitwise_not(inverted_mask)
    cv.Set(cv.fromarray(subimage), (0, 0, 0), cv.fromarray(inverted_mask))
    image_name = '%d'%(outer_contour_id)
    #cv2.imshow(image_name, subimage)
    #subhists = plot_hist(subimage, mask, image_name)
    (subhists, winnames) = plot_hist_hls(subimage, mask, image_name)
    return subhists, subimage, mask, inverted_mask, winnames
示例#40
0
	def replaceText(self, img, text, rotation):
		# first, eliminates text that is in img contained by the box
		color = self.avgColor(img)
		M = cv2.getRotationMatrix2D((self.x, self.y), self.angle, 1)
		(rows, cols, ch) = img.shape
		rotatedImg = np.zeros(img.shape, dtype=np.uint8)
		(left, top, right, bottom) = self.rotatedBounds()
		cv2.rectangle(rotatedImg, (left, top), (right-5, bottom-5), color, -1)
		# adds the text we want to display
		rotatedImg = addText(rotatedImg, self.rotatedBounds(), text, (10, 10, 10), rotation)
		M2 = cv2.getRotationMatrix2D((self.x, self.y), -self.angle, 1)
		unrotatedImg = cv2.warpAffine(rotatedImg, M2, (cols, rows))
		newImg = np.zeros(img.shape, dtype=np.uint8)
		# replaces the box on the image with the updated box
		isntBlack = cv2.compare(unrotatedImg, newImg, cv2.CMP_NE) / 255
		#isBlack = cv2.compare(unrotatedImg, newImg, cv2.CMP_EQ) / 255
		newImg += img
		newImg -= img*isntBlack
		newImg += unrotatedImg*isntBlack
		return newImg
示例#41
0
    def test_cudaarithm_arithmetic(self):
        npMat1 = np.random.random((128, 128, 3)) - 0.5
        npMat2 = np.random.random((128, 128, 3)) - 0.5

        cuMat1 = cv.cuda_GpuMat()
        cuMat2 = cv.cuda_GpuMat()
        cuMat1.upload(npMat1)
        cuMat2.upload(npMat2)

        self.assertTrue(np.allclose(cv.cuda.add(cuMat1, cuMat2).download(),
                                         cv.add(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.subtract(cuMat1, cuMat2).download(),
                                         cv.subtract(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.multiply(cuMat1, cuMat2).download(),
                                         cv.multiply(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.divide(cuMat1, cuMat2).download(),
                                         cv.divide(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.absdiff(cuMat1, cuMat2).download(),
                                         cv.absdiff(npMat1, npMat2)))

        self.assertTrue(np.allclose(cv.cuda.compare(cuMat1, cuMat2, cv.CMP_GE).download(),
                                         cv.compare(npMat1, npMat2, cv.CMP_GE)))

        self.assertTrue(np.allclose(cv.cuda.abs(cuMat1).download(),
                                         np.abs(npMat1)))

        self.assertTrue(np.allclose(cv.cuda.sqrt(cv.cuda.sqr(cuMat1)).download(),
                                    cv.cuda.abs(cuMat1).download()))


        self.assertTrue(np.allclose(cv.cuda.log(cv.cuda.exp(cuMat1)).download(),
                                                            npMat1))

        self.assertTrue(np.allclose(cv.cuda.pow(cuMat1, 2).download(),
                                         cv.pow(npMat1, 2)))
示例#42
0
    def get_percentage_of_active_pixels_in_frame(self, frame):
        # process the frame
        self.__foreground_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # calculate the difference between current and background frame
        difference = cv2.absdiff(self.__background_image,
                                 self.__foreground_image)

        # process the difference
        # use median blur
        blurred = cv2.medianBlur(difference, 3)

        # do the thresholding
        thresholded = cv2.compare(blurred, 6, cv2.CMP_GT)

        # erode and dilate
        eroded = cv2.erode(thresholded, self.__structing_element)
        dilated = cv2.dilate(eroded, self.__structing_element)

        # store the difference image for further usage
        self.__difference_thresholded_image = dilated

        # count non zero elements
        nonzero_pixels = cv2.countNonZero(dilated)

        # calculate the number of non-zero pixels
        height, width = self.__foreground_image.shape
        percentage_of_nonzero_pixels = (nonzero_pixels * 100 / (height * width))

        # prepare data for background update
        mask_gt = np.greater(self.__background_image, self.__foreground_image)
        mask_lt = np.less(self.__background_image, self.__foreground_image)

        # update the background
        self.__background_image += mask_lt
        self.__background_image -= mask_gt

        return percentage_of_nonzero_pixels
    doThis = 1
    
    if doThis == 1:
        # Run for videos
        gCAVid = getContactAngle(varsCA8, headersVid)
        gCAVid.analyzeVideos()
    elif doThis == 2:
        # Run for tilt images
        gCATilt = getContactAngle(varsImgTilt, headersImg, isVideo=False)
        gCATilt.analyzeTiltImages()
    elif doThis ==3:
        # Run for static images, they pretend they are videos
        gCAStat = getContactAngle(varsImgStatic, headersImg)
        gCAStat.analyzeStaticImages()

"""
NOTES:

roi has a minimum size: opencv video.write seems to have a minimum image size

FOURCC CODECs
int fourCC_code = CV_FOURCC('M','J','P','G');	// M-JPEG codec (may not 
be reliable). 
int fourCC_code = CV_FOURCC('P','I','M','1');	// MPEG 1 codec. 
int fourCC_code = CV_FOURCC('D','I','V','3');	// MPEG 4.3 codec. 
int fourCC_code = CV_FOURCC('I','2','6','3');	// H263I codec. 
int fourCC_code = CV_FOURCC('F','L','V','1');	// FLV1 codec. 

MASK LOGIC (CMPOP)
cv2.compare(src1, src2, cmpop[, dst])
CMP_EQ src1 is equal to src2.
drone.reset()
drone.speed = 0.1
W, H = 640, 360
rt = 240

stream = cv2.VideoCapture(path)
while running:
	try:
		buff = stream.grab()
		imageyuv = stream.retrieve(buff)
		imagergb = cv2.cvtColor(imageyuv[1],cv2.COLOR_BGR2RGB)
		imagehsv = cv2.cvtColor(imagergb,cv2.COLOR_RGB2HSV)
		#imagegray = cv2.cvtColor(imagergb,cv2.COLOR_RGB2GRAY)
		h,s,v = cv2.split(imagehsv)
		r,g,b = cv2.split(imagergb)
		gb = cv2.compare(g,b,cv2.CMP_GT)
		rg = cv2.compare(r,g,cv2.CMP_GT)
		rrt = cv2.compare(r,rt,cv2.CMP_GT)
		rgb = cv2.compare(rg,gb,cv2.CMP_EQ)
		rgbrt = cv2.compare(rgb,rrt,cv2.CMP_EQ)
		sat = cv2.compare(s,50,cv2.CMP_LT)
		rgbrts = cv2.compare(sat,rgbrt,cv2.CMP_LT)
		#cv2.imshow("RGB",imagergb)
		cv2.imshow("Threshold",rgbrt)
		cv2.imshow("SAT",sat)
		if go == True:
			change = rgbrt - lastimg
			#cv2.imshow("Change",change)
		nc = 0
		lastimg = rgbrt
		go = True
def run(input_handler: InputHandler, camera_name: str, add_to_db: Optional[str]):
    Database.initialize()
    im0 = input_handler.get_frame(camera_name).image
    im_gray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)
    im_draw = numpy.copy(im0)
    tl, br = util.get_rect(im_draw)

    cmt = CMT(im_gray0, tl, br, estimate_rotation=False)
    identifier = SubjectIdentifier(add_to_db)

    structuring_element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

    while True:
        try:
            # Read image
            im = input_handler.get_frame(camera_name).image

            im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
            im_draw = numpy.copy(im)

            cmt.process_frame(im_gray)

            # Display results
            # Draw updated estimate
            if cmt.has_result:
                cropped_image = im[cmt.tl[1] : cmt.bl[1], cmt.tl[0] : cmt.tr[0]]

                difference = cv2.absdiff(im_gray0, im_gray)

                blurred = cv2.medianBlur(difference, 3)
                display = cv2.compare(blurred, 6, cv2.CMP_GT)

                eroded = cv2.erode(display, structuring_element)
                dilated = cv2.dilate(eroded, structuring_element)

                cropped_mask = dilated[cmt.tl[1] : cmt.bl[1], cmt.tl[0] : cmt.tr[0]]
                cropped_mask[cropped_mask == 255] = 1

                horizontal_center = cropped_mask.shape[1] // 2
                vertical_center = cropped_mask.shape[0] // 2

                cv2.ellipse(
                    cropped_mask,
                    (horizontal_center, vertical_center),
                    (horizontal_center, vertical_center),
                    0,
                    0,
                    360,
                    3,
                    -1,
                )

                cv2.rectangle(im_draw, cmt.tl, cmt.br, (255, 0, 0), 4)
                subject = identifier.identify(cropped_image, cropped_mask)
                cv2.putText(im_draw, subject.name, cmt.tl, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)

            util.draw_keypoints(cmt.tracked_keypoints, im_draw, (255, 255, 255))
            util.draw_keypoints(cmt.votes[:, :2], im_draw)
            util.draw_keypoints(cmt.outliers[:, :2], im_draw, (0, 0, 255))
            cv2.imshow("main", im_draw)
            cv2.waitKey(1)

            im_gray0 = im_gray
        except IndexError:
            Database.save_db()
            exit(0)
示例#46
0
def matching_pixels(img1, img2):
    return cv2.countNonZero(cv2.compare(img1, img2, cv2.CMP_EQ))
示例#47
0
def compute_contours(data, threshold_value, transform=None, exact=False, smooth_value=0,
                     contour_method=cv2.CHAIN_APPROX_TC89_L1, zoom=None):
    if exact:
        thresh_img = cv2.compare(data, threshold_value, cv2.CMP_EQ)
    else:
        ret, thresh_img = cv2.threshold(data, threshold_value, 255, 0)
    if thresh_img.dtype != np.uint8:
        thresh_img = thresh_img.astype(np.uint8)

    if zoom:
        thresh_img = cv2.resize(thresh_img, (thresh_img.shape[0]*zoom, thresh_img.shape[1]*zoom),
                                interpolation=cv2.INTER_NEAREST)
        thresh_img = cv2.GaussianBlur(thresh_img, (3, 3), 0)
        ret, thresh_img = cv2.threshold(thresh_img, 99, 255, 0)
    else:
        cross = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.uint8)
        thresh_img = cv2.dilate(thresh_img, cross)
    if thresh_img.dtype != np.uint8:
        thresh_img = thresh_img.astype(np.uint8)
    if contour_method.lower() in ['simple', 'approx']:
        if contour_method.lower == 'simple':
            method = cv2.CHAIN_APPROX_SIMPLE
        else:
            method = cv2.CHAIN_APPROX_TC89_L1
        thresh_img, contours, order = cv2.findContours(thresh_img, cv2.RETR_LIST, method)
    elif contour_method.lower == 'accurate':
        try:
            contours = measure.find_contours(thresh_img, threshold_value, fully_connected='high')
        except AttributeError as e:
            logger.error("Need package 'skimage' for accurate contour method")
            raise e
    else:
        logger.error("Unknown contour method: \"{}\"".format(contour_method))
        contours = []
    need_column_swap = False
    del thresh_img
    featurelist = []
    contour_len = [len(contour) for contour in contours]
    for idx, c_len in enumerate(contour_len):
        if c_len > 1:
            if smooth_value > 0:
                contour = cv2.approxPolyDP(contours[idx].astype(np.float32), smooth_value, True)
            else:
                contour = contours[idx]
            l = len(contour)
            if l < 2:
                continue
            contour = np.reshape(contour, [l, 2])
            if transform is not None:
                # turn into homogeneous coordinates
                con_h = np.ones([l, 3])
                # Doing the below is much quicker than the normal:
                #    contour = np.append(contour, np.ones([l, 1]), axis=1)
                if need_column_swap:
                    con_h[:, :-1] = contour[:, [1, 0]]
                else:
                    con_h[:, :-1] = contour
                # apply transform from pixel coords into srs
                contour = np.dot(con_h, transform).tolist()
            else:
                if need_column_swap:
                    contour[:, [0, 1]] = contour[:, [1, 0]]
                contour = contour.tolist()
            if contour[0] != contour[-1]:
                contour.append(contour[0])
            poly = contour
            featurelist.append([poly])
    return featurelist
示例#48
0
def dark_or_light_objects_only(self, color='dark'):
    if self.params['circular_mask_x'] != 'none':
        if self.image_mask is None:
            self.image_mask = np.zeros_like(self.imgScaled)
            cv2.circle(self.image_mask,(self.params['circular_mask_x'], self.params['circular_mask_y']),int(self.params['circular_mask_r']),[1,1,1],-1)
        self.imgScaled = self.image_mask*self.imgScaled
        
    # If there is no background image, grab one, and move on to the next frame
    if self.backgroundImage is None:
        reset_background(self)
        return
    if self.reset_background_flag:
        reset_background(self)
        self.reset_background_flag = False
        return
    if self.add_image_to_background_flag:
        add_image_to_background(self, color)
        self.add_image_to_background_flag = False
        return 
    
    
    if self.params['backgroundupdate'] != 0:
        cv2.accumulateWeighted(np.float32(self.imgScaled), self.backgroundImage, self.params['backgroundupdate']) # this needs to be here, otherwise there's an accumulation of something in the background
    if self.params['medianbgupdateinterval'] != 0:
        t = rospy.Time.now().secs
        if not self.__dict__.has_key('medianbgimages'):
            self.medianbgimages = [self.imgScaled]
            self.medianbgimages_times = [t]
        if t-self.medianbgimages_times[-1] > self.params['medianbgupdateinterval']:
            self.medianbgimages.append(self.imgScaled)
            self.medianbgimages_times.append(t)
        if len(self.medianbgimages) > 3:
            self.backgroundImage = copy.copy(np.float32(np.median(self.medianbgimages, axis=0)))
            self.medianbgimages.pop(0)
            self.medianbgimages_times.pop(0)
            print 'reset background with median image'

    try:
        kernel = self.kernel
    except:
        kern_d = self.params['morph_open_kernel_size']
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kern_d,kern_d))
        self.kernel = kernel
    
    if color == 'dark':
        self.threshed = cv2.compare(np.float32(self.imgScaled), self.backgroundImage-self.params['threshold'], cv2.CMP_LT) # CMP_LT is less than
    elif color == 'light':
        self.threshed = cv2.compare(np.float32(self.imgScaled), self.backgroundImage+self.params['threshold'], cv2.CMP_GT) # CMP_GT is greater than
    elif color == 'darkorlight':
        #absdiff = cv2.absdiff(np.float32(self.imgScaled), self.backgroundImage)
        #retval, self.threshed = cv2.threshold(absdiff, self.params['threshold'], 255, 0)
        #self.threshed = np.uint8(self.threshed)
        dark = cv2.compare(np.float32(self.imgScaled), self.backgroundImage-self.params['threshold'], cv2.CMP_LT) # CMP_LT is less than
        light = cv2.compare(np.float32(self.imgScaled), self.backgroundImage+self.params['threshold'], cv2.CMP_GT) # CMP_GT is greater than
        self.threshed = dark+light
    
    convert_to_gray_if_necessary(self)
    
    # noise removal
    self.threshed = cv2.morphologyEx(self.threshed,cv2.MORPH_OPEN, kernel, iterations = 1)
    
    # sure background area
    #sure_bg = cv2.dilate(opening,kernel,iterations=3)

    # Finding sure foreground area
    #dist_transform = cv2.distanceTransform(opening,cv.CV_DIST_L2,3)
    #dist_transform = dist_transform / (np.max(dist_transform)) * 255
    #ret, sure_fg = cv2.threshold(dist_transform,0.2*dist_transform.max(),255,0)

    # Finding unknown region
    #sure_fg = np.uint8(sure_fg)
    
    #self.threshed = sure_fg
    erode_and_dialate(self)

    # publish the processed image
    c = cv2.cvtColor(np.uint8(self.threshed), cv2.COLOR_GRAY2BGR)
    # commented for now, because publishing unthresholded difference
    
    if OPENCV_VERSION == 2: # cv bridge not compatible with open cv 3, at least at this time
        img = self.cvbridge.cv2_to_imgmsg(c, 'bgr8') # might need to change to bgr for color cameras
        self.pubProcessedImage.publish(img)
    
    extract_and_publish_contours(self)
示例#49
0
	#Create a mask where green and one other colour only are almost 1:1
	r1 = r * 1.0
	g1 = g * 1.0
	b1 = b * 1.0
	#green:blue        
	gbratio = cv2.divide(b1,g1)
        maskgbratio = cv2.inRange(gbratio,0.7,1.3)
        #green:red
        grratio = cv2.divide(r1,g1)
        maskgrratio = cv2.inRange(grratio,0.7,1.3)
	#only g/r or g/b should be near 1, if both then it is whitish, which we don't want
#	maskgreenish = cv2.bitwise_xor(maskgrratio,maskgbratio)

	#Create a mask for pixels that are green dominant
        #ADJUST THIS SO THAT IT IS MORE THAN JUST SLIGHTLY GREEN DOMINANT, BUT IS DEFINATELY A GOOD BIT HIGHER THAN THE OTHERS
	mask1 = cv2.compare(g,b,cv2.CMP_GT)
	mask2 = cv2.compare(g,r,cv2.CMP_GT)
	maskgreendominant = cv2.bitwise_and(mask1, mask2)
	verygreen = cv2.bitwise_and(maskgreendominant,cv2.bitwise_not(cv2.bitwise_and(maskgrratio,maskgbratio)))

	#Combine green dominant and greenish masks
#	colourmask = cv2.bitwise_or(verygreen, maskgreenish)
	
	#Create a mask that sets an intensity threshold
	total = cv2.add(cv2.add(r,g),b)
	intensitymask = cv2.inRange(total,200,765)
	
	#Combine colour and intensity masks
	finalmask = cv2.bitwise_and(verygreen, intensitymask)

	output = cv2.bitwise_and(image, image, mask = finalmask)
示例#50
0
def compare_rms(img_to_classify):
	path = "../coin_images/"
	rms_classID = 0
	comp_classID = 0
	absdif_classID = 0

	absdif_score = 99999999
	comp_score =   0
	rms_score =    99999999
	degree = 0
	cannyx = 200
	crop_size = 55

	cropped_coin_only = preprocess_img(img_to_classify)
	coin_center = find_center_of_coin(cropped_coin_only)
	print "Coin only Center of Coin", coin_center
	img_to_classify_cropped = center_crop(cropped_coin_only , coin_center, crop_size)
	cv.Canny(img_to_classify_cropped,img_to_classify_cropped ,cv.Round((cannyx/2)),cannyx, 3)
	cv2.imwrite("cropped_coin.png", cv2array(img_to_classify_cropped))
	#sys.exit(-1)

	for name in glob.glob(path+'jheads/*.jpg'):
		print name
		img = cv2.imread(name)
		img_cropped = preprocess_img(img)
	
		cv.Canny(img_cropped,img_cropped ,cv.Round((cannyx/2)),cannyx, 3)
		cv2.imwrite("cropped_coin2.png", cv2array(img_cropped))

		for x in range(360):
			rotated_img = rotate_image(img_cropped  ,x)
			cropped = cv2array(center_crop(rotated_img, coin_center, crop_size))
			cv2.imwrite("rotated.png", cropped)
			'''
			temp_absdif_score = np.sum(cv2.absdiff(cropped,cv2array(img_to_classify_cropped)))
			if temp_absdif_score < absdif_score: 
				absdif_score = temp_absdif_score
				absdif_classID = "1"
				absdif_degree = x
				print "absdiff:", absdif_score,"  New classID:", absdif_classID, " Degree:", absdif_degree
			#temp_comp_score = np.sum(cv2.compare(cropped, cv2array(img_to_classify_cropped), cv2.CMP_EQ))
			'''
			img_to_classify_cropped_array = cv2array(img_to_classify_cropped)
			img_to_classify_cropped_array_reshaped = np.reshape(img_to_classify_cropped_array, (img_to_classify_cropped_array.shape[0],img_to_classify_cropped_array.shape[1]) )
			cropped_reshaped = np.reshape(cropped, (cropped.shape[0],cropped.shape[1]) )
	
			temp_comp_score = compute_ssim(cropped_reshaped, img_to_classify_cropped_array_reshaped)
			if temp_comp_score > comp_score: 
				comp_score = temp_comp_score
				comp_classID = "1"
				comp_degree = x
				print "comp_score:", comp_score,"  New classID:", comp_classID, " Degree:", comp_degree
				rotated_img2 = cv2array(rotate_image(array2cv(img_to_classify) , (360-comp_degree)))
				cv2.imwrite("comp_degree.png", rotated_img2)
			'''
			temp_rms_score = compare_images(cropped , cv2array(img_to_classify_cropped))
			if temp_rms_score[0] < rms_score:
				rms_score = temp_rms_score[0]
				rms_classID = "1"
				rms_degree = x
				print "RMS Score:", rms_score,"  New classID:", rms_classID, " Degree:", rms_degree
			'''

	for name in glob.glob(path+'jtails/*.jpg'):
		print name
		img = cv2.imread(name)
		img_cropped = preprocess_img(img)	

		cv.Canny(img_cropped,img_cropped ,cv.Round((cannyx/2)),cannyx, 3)
		cv2.imwrite("cropped_coin2.png", cv2array(img_cropped))

		for x in range(360):
			rotated_img = rotate_image(img_cropped  ,x)
			cropped = cv2array(center_crop(rotated_img, coin_center, crop_size))
			cv2.imwrite("rotated.png", cropped)

			img_to_classify_cropped_array = cv2array(img_to_classify_cropped)
			img_to_classify_cropped_array_reshaped = np.reshape(img_to_classify_cropped_array, (img_to_classify_cropped_array.shape[0],img_to_classify_cropped_array.shape[1]) )
			cropped_reshaped = np.reshape(cropped, (cropped.shape[0],cropped.shape[1]) )
	
			temp_comp_score = compute_ssim(cropped_reshaped, img_to_classify_cropped_array_reshaped)
			if temp_comp_score > comp_score: 
				comp_score = temp_comp_score
				comp_classID = "2"
				comp_degree = x
				print "comp_score:", comp_score,"  New classID:", comp_classID, " Degree:", comp_degree
				rotated_img2 = cv2array(rotate_image(array2cv(img_to_classify) , (360-comp_degree)))
				cv2.imwrite("comp_degree.png", rotated_img2)

	for name in glob.glob(path+'oheads/*.jpg'):
		print name
		img = cv2.imread(name)
		img_cropped = preprocess_img(img)

		cv.Canny(img_cropped,img_cropped ,cv.Round((cannyx/2)),cannyx, 3)
		cv2.imwrite("cropped_coin2.png", cv2array(img_cropped))

		for x in range(360):
			rotated_img = rotate_image(img_cropped  ,x)
			cropped = cv2array(center_crop(rotated_img, coin_center, crop_size))
			cv2.imwrite("rotated.png", cropped)

			temp_comp_score = np.sum(cv2.compare(cropped, cv2array(img_to_classify_cropped), cv2.CMP_EQ))
			if temp_comp_score > comp_score: 
				comp_score = temp_comp_score
				comp_classID = "3"
				comp_degree = x
				print "comp_score:", comp_score,"  New classID:", comp_classID, " Degree:", comp_degree
				rotated_img2 = cv2array(rotate_image(array2cv(img_to_classify) , (360-comp_degree)))
				cv2.imwrite("comp_degree.png", rotated_img2)


	for name in glob.glob(path+'otails/*.jpg'):
		print name
		img = cv2.imread(name)
		img_cropped = preprocess_img(img)

		cv.Canny(img_cropped,img_cropped ,cv.Round((cannyx/2)),cannyx, 3)
		cv2.imwrite("cropped_coin2.png", cv2array(img_cropped))

		for x in range(360):
			rotated_img = rotate_image(img_cropped  ,x)
			cropped = cv2array(center_crop(rotated_img, coin_center, crop_size))
			cv2.imwrite("rotated.png", cropped)

			temp_comp_score = np.sum(cv2.compare(cropped, cv2array(img_to_classify_cropped), cv2.CMP_EQ))
			if temp_comp_score > comp_score: 
				comp_score = temp_comp_score
				comp_classID = "4"
				comp_degree = x
				print "comp_score:", comp_score,"  New classID:", comp_classID, " Degree:", comp_degree
				rotated_img2 = cv2array(rotate_image(array2cv(img_to_classify) , (360-comp_degree)))
				cv2.imwrite("comp_degree.png", rotated_img2)

	#print "Final classID:", classID, " Degree:", degree
	print "ClassID:"
	print "1 = Heads"
	print "2 = Tails"
	print "3 = Other Heads"
	print "4 = Other Tails"
	print "Final comp_score:", comp_score,"  Comp classID:", comp_classID, " Degree:", comp_degree
示例#51
0
 def __ne__(self, other):
     return cv2.compare(self._ocvimg, imgunwrap(other), cv2.CMP_NE)