def kuwahara(self, img, r=30, resize=False, rate=0.5): #元画像、正方形領域の一辺、リサイズするか、リサイズする場合の比率 h, w, _ = img.shape if resize: img = cv2.resize(img, (int(w * rate), int(h * rate))) h, w, _ = img.shape img = np.pad(img, ((r, r), (r, r), (0, 0)), "edge") ave, var = cv2.integral2(img) ave = (ave[:-r - 1, :-r - 1] + ave[r + 1:, r + 1:] - ave[r + 1:, :-r - 1] - ave[:-r - 1, r + 1:]) / (r + 1)**2 #平均値の一括計算 var = ((var[:-r - 1, :-r - 1] + var[r + 1:, r + 1:] - var[r + 1:, :-r - 1] - var[:-r - 1, r + 1:]) / (r + 1)**2 - ave**2).sum(axis=2) #分散の一括計算 def filt(i, j): return np.array([ ave[i, j], ave[i + r, j], ave[i, j + r], ave[i + r, j + r] ])[(np.array( [var[i, j], var[i + r, j], var[i, j + r], var[i + r, j + r]]).argmin(axis=0).flatten(), j.flatten(), i.flatten())].reshape(w, h, _).transpose(1, 0, 2) filtered_pic = filt(*np.meshgrid(np.arange(h), np.arange(w))).astype( img.dtype) #色の決定 return filtered_pic
def local_normalization(w_height, w_width, im): #print("Aplicando normalización local con ventanas de tamaño (" + str(w_height) + ", " + str(w_width) + ")") #start_time = time.time() # Get integral images int_im, int_im_sq = cv.integral2(im) #print("Imagen integral obtenida") #print(int_im.shape) #print(int_im_sq.shape) #print(im.shape) height = im.shape[0] width = im.shape[1] w_area = float(w_height * w_width) norm_im = np.zeros(im.shape, dtype = float) mid_w = int(w_width / 2.0) mid_h = int(w_height / 2.0) # Recorriendo imagen original for y in range(0, height): for x in range(0, width): sumW = float(get_sum_for_window(x, y, w_height, w_width, int_im)) sumWSQ = float(get_sum_for_window(x, y, w_height, w_width, int_im_sq)) mean = sumW / w_area dvst = np.sqrt((sumWSQ/w_area) - (mean * mean)) if dvst > 0: norm_im[y, x] = (float(im[y, x]) - mean) / dvst #print("--- Normalización local realizada en %s seconds ---" % (time.time() - start_time)) #norm_im = (norm_im - norm_im.min()) #norm_im = norm_im / (norm_im.max() - norm_im.min()) return np.array(norm_im).astype(np.float)
def calcCV(self): self.ii, self.sii = cv2.integral2(self.img) self.sii = np.delete(self.sii, (0), axis=0) self.sii = np.delete(self.sii, (0), axis=1) self.ii = np.delete(self.ii, (0), axis=0) self.ii = np.delete(self.ii, (0), axis=1)
def kuwahara(pic, r=5, resize=False, rate=0.5): h, w, _ = pic.shape if resize: pic = cv2.resize(pic, (int(w * rate), int(h * rate))) h, w, _ = pic.shape pic = np.pad(pic, ((r, r), (r, r), (0, 0)), "edge") ave, var = cv2.integral2(pic) ave = (ave[:-r - 1, :-r - 1] + ave[r + 1:, r + 1:] - ave[r + 1:, :-r - 1] - ave[:-r - 1, r + 1:]) / (r + 1)**2 var = ((var[:-r - 1, :-r - 1] + var[r + 1:, r + 1:] - var[r + 1:, :-r - 1] - var[:-r - 1, r + 1:]) / (r + 1)**2 - ave**2).sum(axis=2) def filt(i, j): return np.array([ ave[i, j], ave[i + r, j], ave[i, j + r], ave[i + r, j + r] ])[(np.array( [var[i, j], var[i + r, j], var[i, j + r], var[i + r, j + r]]).argmin(axis=0).flatten(), j.flatten(), i.flatten())].reshape(w, h, _).transpose(1, 0, 2) filtered_pic = filt(*np.meshgrid(np.arange(h), np.arange(w))).astype( pic.dtype) return filtered_pic
def sauvola(img, window, thresh, k): """Sauvola binarization based in, J. Sauvola, T. Seppanen, S. Haapakoski, M. Pietikainen, Adaptive Document Binarization, in IEEE Computer Society Washington, 1997. """ rows, cols = img.shape pad = int(np.floor(window[0] / 2)) sum2, sqsum = cv2.integral2( cv2.copyMakeBorder(img, pad, pad, pad, pad, cv2.BORDER_CONSTANT)) isum = sum2[window[0]:rows + window[0], window[1]:cols + window[1]] + \ sum2[0:rows, 0:cols] - \ sum2[window[0]:rows + window[0], 0:cols] - \ sum2[0:rows, window[1]:cols + window[1]] isqsum = sqsum[window[0]:rows + window[0], window[1]:cols + window[1]] + \ sqsum[0:rows, 0:cols] - \ sqsum[window[0]:rows + window[0], 0:cols] - \ sqsum[0:rows, window[1]:cols + window[1]] ksize = window[0] * window[1] mean = isum / ksize std = (((isqsum / ksize) - (mean**2) / ksize) / ksize)**0.5 threshold = (mean * (1 + k * (std / thresh - 1))) * (mean >= 100) return np.asarray(255 * (img >= threshold), 'uint8')
def kuwahara_filter(image, kernel_size=5): height, width, channel = image.shape[0], image.shape[1], image.shape[2] r = int((kernel_size - 1) / 2) r = r if r >= 2 else 2 image = np.pad(image, ((r, r), (r, r), (0, 0)), "edge") average, variance = cv.integral2(image) average = (average[:-r - 1, :-r - 1] + average[r + 1:, r + 1:] + average[1:-r-1,1:-r-1] + average[r:1,r:1] + average[-r-1:r+1,-r-1:r+1] - average[r + 1:, :-r - 1] - average[:-r - 1, r + 1:] - average[r:, :-r] - average[:-r, r:] ) / (r + 1) ** 2 variance = ((variance[:-r - 1, :-r - 1] + variance[r + 1:, r + 1:] + variance[:-r,:-r] + variance[r:,r:] + variance[-r-1:r+1,-r-1:r+1] - variance[r + 1:, :-r - 1] - variance[:-r - 1, r + 1:] - variance[r:, :-r] - variance[:-r, r:] ) / (r + 1) ** 2 /(r + 1) ** 2 - average ** 2).sum(axis=2) def filter(i, j): return np.array([ average[i, j], average[i, j + 1], average[i + r, j], average[i + 1, j], average[i, j + r], average[i, +1, j + r], average[i + r, j + 1], average[i + r, j + r], average[i + 1, j + 1] ])[(np.array([ variance[i, j], variance[i, j + 1], variance[i + r, j], variance[i + 1, j], variance[i, j + r], variance[i, +1, j + r], variance[i + r, j + 1], variance[i + r, j + r], variance[i + 1, j + 1] ]).argmin(axis=0).flatten(), j.flatten(), i.flatten())].reshape(width, height, channel).transpose(1, 0, 2) filtered_image = filter(*np.meshgrid(np.arange(height), np.arange(width))) filtered_image = filtered_image.astype(image.dtype) filtered_image = filtered_image.copy() return filtered_image
def compute_integral(img: List[float]) -> List[float]: # TODO: Compute I such that I(y,x) = sum of img(1,1) to img(y,x) #integral_matrix = np.zeros(img.shape) # for i in range(img.shape[0]): # for j in range(img.shape[1]): # integral_matrix[i][j] = np.sum(img[:i, :j]) integral_matrix, _ = cv2.integral2(img) return integral_matrix
def main(): src = cv2.imread('timg1.jpg') mask = generateMask(src) sum, sqsum = cv2.integral2(src, sdepth=cv2.CV_32S, sqdepth=cv2.CV_32F) dst = FastEPFilter(src, sum, sqsum) dst = blendImage(src, mask, dst) dst = enhanceEdge(src, dst, mask) cv2.imwrite("fb.jpg", dst) f, axarr = plt.subplots(1, 2) axarr[0].imshow(cv2.imread('timg1.jpg')) axarr[1].imshow(cv2.imread('fb.jpg')) plt.show()
def calcLocalStats(im, map_m, map_s, winx, winy): rows, cols = im.shape im_sum, im_sum_sq = cv2.integral2(im, sqdepth=cv2.CV_64F) wxh = winx / 2 wyh = winy / 2 x_firstth = wxh y_lastth = rows - wyh - 1 y_firstth = wyh winarea = float(winx * winy) max_s = 0 j = y_firstth for j in range(y_firstth, y_lastth + 1): sum = 0.0 sum_sq = 0.0 sum = im_sum.item(j - wyh + winy, winx) - im_sum.item( j - wyh, winx) - im_sum.item(j - wyh + winy, 0) + im_sum.item( j - wyh, 0) sum_sq = im_sum_sq.item(j - wyh + winy, winx) - im_sum_sq.item( j - wyh, winx) - im_sum_sq.item(j - wyh + winy, 0) + im_sum_sq.item(j - wyh, 0) m = sum / winarea s = math.sqrt((sum_sq - m * sum) / winarea) if s > max_s: max_s = s map_m.itemset((j, x_firstth), m) map_s.itemset((j, x_firstth), s) maxrange = cols - winx + 1 for i in range(1, maxrange): sum -= im_sum.item(j-wyh+winy, i) - im_sum.item(j-wyh, i) - im_sum.item(j-wyh+winy, i-1) \ + im_sum.item(j-wyh, i-1) sum += im_sum.item(j - wyh + winy, i + winx) - im_sum.item( j - wyh, i + winx) - im_sum.item(j - wyh + winy, i + winx - 1) + im_sum.item( j - wyh, i + winx - 1) sum_sq -= im_sum_sq.item(j - wyh + winy, i) - im_sum_sq.item( j - wyh, i) - im_sum_sq.item( j - wyh + winy, i - 1) + im_sum_sq.item(j - wyh, i - 1) sum_sq += im_sum_sq.item( j - wyh + winy, i + winx) - im_sum_sq.item( j - wyh, i + winx) - im_sum_sq.item( j - wyh + winy, i + winx - 1) + im_sum_sq.item( j - wyh, i + winx - 1) m = sum / winarea s = math.sqrt(abs(sum_sq - m * sum) / winarea) if s > max_s: max_s = s map_m.itemset((j, i + wxh), m) map_s.itemset((j, i + wxh), s) return max_s, map_m, map_s, im
def mean_std(im, W): s = W // 2 N = W * W padded = np.pad(im, (s, s), 'reflect') sum1, sum2 = cv2.integral2(padded, sdepth=cv2.CV_32F) S1 = sum1[W:, W:] - sum1[W:, :-W] - sum1[:-W, W:] + sum1[:-W, :-W] S2 = sum2[W:, W:] - sum2[W:, :-W] - sum2[:-W, W:] + sum2[:-W, :-W] means = S1 / N variances = S2 / N - means * means stds = np.sqrt(variances.clip(0, None)) return means, stds
def standard_derivation(self): # Calculate the standard deviation # Here I'm taking the full image, you can take any rectangular region # Method-1: using cv2.meanStdDev() mean, std_1 = cv2.meanStdDev(self.fused, mask=None) # Method-2: using the formulae 1/n(S2 - (S1**2)/n) sum_1, sqsum_2 = cv2.integral2(self.fused) n = self.fused.size # sum of the region can be easily found out using the integral image as # Sum = Bottom right + top left - top right - bottom left s1 = sum_1[-1, -1] s2 = sqsum_2[-1, -1] std_2 = np.sqrt((s2 - (s1 ** 2) / n) / n) print(std_1, std_2) # [[0.45825757]] 0.4582575694
def fast_localEnergy(args): #Se recibe la imagen y se toman sus dimensiones img, wsize = args r, c = img.shape #Tamaño del borde a regenerar bor = int((wsize - 1) /2) #Se obtienen la integral y la integral #cuadrada de la imagen para realizar los #calculos mucho mas rapido sum_1, sqsum_2 = cv2.integral2(img) #Imagen donde se guardará el resultado img_eng = np.zeros((r - (wsize-1) ,c - (wsize -1)), np.float64) #Se llama la función implementada en cython #y se transforma de nuevo a tipo array de numpy img_eng = np.array(localEnergy(sum_1.astype(np.float), sqsum_2,img_eng,r,c , wsize) ,np.float32) #Se extienden los bordes para que quede del tamaño #original img_eng = cv2.copyMakeBorder(img_eng, bor, bor,bor,bor, cv2.BORDER_REPLICATE) return img_eng
def integralMean(im, rows, cols, window): # get the window size m, n = window # compute the integral images of im and im.^2 sum, sqsum = cv2.integral2(im) # calculate the window area for each pixel isum = sum[m:rows + m, n:cols + n] + \ sum[0:rows, 0:cols] - \ sum[m:rows + m, 0:cols] - \ sum[0:rows, n:cols + n] isqsum = sqsum[m:rows + m, n:cols + n] + \ sqsum[0:rows, 0:cols] - \ sqsum[m:rows + m, 0:cols] - \ sqsum[0:rows, n:cols + n] # calculate the average values for each pixel mean = isum / (m * n) sqmean = isqsum / (m * n) return mean, sqmean
def kuwahara_filter(image, kernel_size=5): """桑原フィルターを適用した画像を返す Args: image: OpenCV Image kernel_size: Kernel size is an odd number of 5 or more Returns: Image after applying the filter. """ height, width, channel = image.shape[0], image.shape[1], image.shape[2] r = int((kernel_size - 1) / 2) r = r if r >= 2 else 2 image = np.pad(image, ((r, r), (r, r), (0, 0)), "edge") average, variance = cv.integral2(image) average = (average[:-r - 1, :-r - 1] + average[r + 1:, r + 1:] - average[r + 1:, :-r - 1] - average[:-r - 1, r + 1:]) / (r + 1)**2 variance = ((variance[:-r - 1, :-r - 1] + variance[r + 1:, r + 1:] - variance[r + 1:, :-r - 1] - variance[:-r - 1, r + 1:]) / (r + 1)**2 - average**2).sum(axis=2) def filter(i, j): return np.array([ average[i, j], average[i + r, j], average[i, j + r], average[i + r, j + r] ])[(np.array([ variance[i, j], variance[i + r, j], variance[i, j + r], variance[i + r, j + r] ]).argmin(axis=0).flatten(), j.flatten(), i.flatten())].reshape(width, height, channel).transpose(1, 0, 2) filtered_image = filter(*np.meshgrid(np.arange(height), np.arange(width))) filtered_image = filtered_image.astype(image.dtype) filtered_image = filtered_image.copy() return filtered_image
def sauvola(img, window, thresh, k): """Sauvola binarization""" rows, cols = img.shape pad = int(np.floor(window[0] / 2)) sum2, sqsum = cv2.integral2( cv2.copyMakeBorder(img, pad, pad, pad, pad, cv2.BORDER_CONSTANT)) isum = sum2[window[0]:rows + window[0], window[1]:cols + window[1]] + \ sum2[0:rows, 0:cols] - \ sum2[window[0]:rows + window[0], 0:cols] - \ sum2[0:rows, window[1]:cols + window[1]] isqsum = sqsum[window[0]:rows + window[0], window[1]:cols + window[1]] + \ sqsum[0:rows, 0:cols] - \ sqsum[window[0]:rows + window[0], 0:cols] - \ sqsum[0:rows, window[1]:cols + window[1]] ksize = window[0] * window[1] mean = isum / ksize std = (((isqsum / ksize) - (mean**2) / ksize) / ksize)**0.5 threshold = (mean * (1 + k * (std / thresh - 1))) * (mean >= 100) return np.asarray(255 * (img >= threshold), 'uint8')
def sauvola(img, imgGary, k, kernel_width, imageName, imgGaryPath): tempImage = imgGary.copy() # sumIntegral=np.zeros((img.shape[0],img.shape[1]),dtype=np.int32) # sumSquare=np.zeros((img.shape[1],img.shape[0]),dtype=np.int32) intergralImage = cv2.integral(imgGary) sum, squareImage = cv2.integral2(imgGary) #print(type(squareImage)) #print(type(intergralImage)) for i in range(imgGary.shape[0]): #shape[0]:height for j in range(imgGary.shape[1]): #shape[1]:width xmin = int(max(0, j - kernel_width)) ymin = int(max(0, i - kernel_width)) xmax = int(min(imgGary.shape[1] - 1, j + kernel_width)) ymax = int(min(imgGary.shape[0] - 1, i + kernel_width)) area = (xmax - xmin + 1) * (ymax - ymin + 1) if (area < 0): print("Error in the area: %d", area) return if (xmin == 0 and ymin == 0): #the first pixel intergralPxielValue = intergralImage[ymax, xmax] squarePixelValue = squareImage[ymax, xmax] elif (xmin == 0 and ymin > 0): #the first col intergralPxielValue = intergralImage[ ymax, xmax] - intergralImage[ymin - 1, xmax] squarePixelValue = squareImage[ymax, xmax] - squareImage[ymin - 1, xmax] elif (xmin > 0 and ymin == 0): #the first row intergralPxielValue = intergralImage[ ymax, xmax] - intergralImage[ymax, xmin - 1] squarePixelValue = squareImage[ymax, xmax] - squareImage[ymax, xmin - 1] else: #the rest pixel mainDiagonalIntergralPixelValue = intergralImage[ ymax, xmax] + intergralImage[ymin - 1, xmin - 1] counterDiagonalIntergralPixelValue = intergralImage[ ymin - 1, xmax] + intergralImage[ymax, xmin - 1] intergralPxielValue = mainDiagonalIntergralPixelValue - counterDiagonalIntergralPixelValue mainDiagonalSquarePixelValue = squareImage[ ymax, xmax] + squareImage[ymin - 1, xmin - 1] counterDiagonalSquarePixelValue = squareImage[ ymin - 1, xmax] + squareImage[ymax, xmin - 1] squarePixelValue = mainDiagonalSquarePixelValue - counterDiagonalSquarePixelValue #注意防止越界 mean = intergralPxielValue / area a = np.longlong(0) a = math.pow(intergralPxielValue, 2) a = a / area stdDev = math.sqrt((squarePixelValue - a) / (area - 1)) std = math.sqrt( (squarePixelValue - math.sqrt(intergralPxielValue) / area) / (area - 1)) threshold = mean * (1 + k * ((stdDev / 128) - 1)) if (imgGary[i, j] > threshold): tempImage[i, j] = 255 for m in range(3): img[i, j, m] = 255 if (imgGary[i, j] < threshold): tempImage[i, j] = 0 writeGrayImgName = "./imageEnhance" writeImgName = "./imageEnhance" if (not os.path.exists(writeGrayImgName)): os.mkdir(writeGrayImgName) if (not os.path.exists(writeImgName)): os.mkdir(writeImgName) writeGrayImgName = os.path.join(writeGrayImgName, imgGaryPath) writeImgName = os.path.join(writeImgName, imageName) cv2.imwrite(writeImgName, img) cv2.imwrite(writeGrayImgName, tempImage)
g = np.copy(rgbd.d.astype(np.float) )*1e-3 d = np.copy(rgbd.d.astype(np.float) )*1e-3 #qBilat = cv2.adaptiveBilateralFilter(d,(3,3),0.03) print (g==0).sum() haveData = g>=1e-2 Ns = cv2.integral(haveData.astype(np.float)) dSum = cv2.integral(d.astype(np.float)) #dSumSq = dSum**2 #dMean = np.nan_to_num(dSum/Ns) #dSig = dSqSum - dSumSq/Ns gSum,gSqSum = cv2.integral2(g.astype(np.float)) #gSumSq = gSum**2 #gMean = np.nan_to_num(gSum/Ns) #gSig = np.nan_to_num(gSqSum - gSumSq/Ns) def integralGet(I,i,j,w): return I[min(i+w,I.shape[0]-1),min(j+w,I.shape[1]-1)] \ - I[min(i+w,I.shape[0]-1),max(j-w,0)] \ - I[max(i-w,0),min(j+w,I.shape[1]-1)] \ + I[max(i-w,0),max(j-w,0)] prod = d*g prod[np.logical_not(haveData)] = 0. prodInt = cv2.integral(prod.astype(np.float))
import cv2 import numpy as np img = np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]], dtype='uint8') # Calculate the standard deviation # Here I'm taking the full image, you can take any rectangular region # Method-1: using cv2.meanStdDev() mean, std_1 = cv2.meanStdDev(img, mask=None) # Method-2: using the formulae 1/n(S2 - (S1**2)/n) sum_1, sqsum_2 = cv2.integral2(img) n = img.size # sum of the region can be easily found out using the integral image as # Sum = Bottom right + top left - top right - bottom left s1 = sum_1[-1, -1] s2 = sqsum_2[-1, -1] std_2 = np.sqrt((s2 - (s1**2) / n) / n) print(std_1, std_2) # [[0.45825757]] 0.4582575694
cx = 0 if (col - radius) < 0 else (col - radius) cy = 0 if (row - radius) < 0 else (row - radius) # 计算卷积框区域的大小(面积,高乘宽) num = (x2 - x1)*(y2 - y1) for i in range(0, 3, 1): s = get_block_sum(ii, x1, y1, x2, y2, i) # 获取卷积框区域内像素值的和 result[cy, cx][i] = s // num # 卷积框区域内像素值的和除以面积则为平均值 return result src = cv.imread("../data/images/example.png") sum_table = cv.integral(src) # 计算和(sum)表 blur_result = blur_demo(src, sum_table, ksize=15) # 计算平方和表(sqsum)表 sum_table, sqsum = cv.integral2(src) # 计算瓦块和表(tilted)表 \texttt{tilted} (X,Y) = \sum _{y<Y,abs(x-X+1) \leq Y-y-1} \texttt{image} (x,y) # (0,0)-(x,y)对角线下元素的和作为(x,y)的值 sum_table, sqsum, tilted = cv.integral3(src) # 可视化 rows = 1 cols = 2 images = [src, blur_result] titles = ["original image", "blur use integral"] for index, image in enumerate(images): plt.subplot(rows, cols, index + 1) if index >= len(titles): plt.title("image") else:
def paper_2017(img_l, img_r, w, d_max, tau, disp_thresh, image_number): start = time.time() # alg 1: int image init # data: left and right integral images with dimensions m+1 by n+1 # result: integral image of size m+1 x n+1 # NOTE - see: https://stackoverflow.com/questions/30195420/why-the-integral-image-contains-extra-row-and-column-of-zeros int_img_l, int_img_l2 = cv.integral2(img_l) int_img_r, int_img_r2 = cv.integral2(img_r) cv.imwrite("left_int_img_" + str(image_number).zfill(6) + "_10.png", int_img_l) cv.imwrite( "left_int_img_squared_" + str(image_number).zfill(6) + "_10.png", int_img_l2) cv.imwrite("right_int_img_" + str(image_number).zfill(6) + "_10.png", int_img_r) cv.imwrite( "right_int_img_squared_" + str(image_number).zfill(6) + "_10.png", int_img_r2) # alg 2: left disparity map estimation # data: left + right image, mean, sigma # result: left disp map ################ MEAN MAP CALCULATION #################### mean_map_left, l_min_mean, l_max_mean = mean_map_calculation( img_l, int_img_l, w) mean_map_right, r_min_mean, r_max_mean = mean_map_calculation( img_r, int_img_r, w) mean_map_left_copy = copy.deepcopy(mean_map_left) mean_map_right_copy = copy.deepcopy(mean_map_right) normalize_disp_map(mean_map_left_copy, l_min_mean, l_max_mean) normalize_disp_map(mean_map_right_copy, r_min_mean, r_max_mean) cv.imwrite("left_mu_map_" + str(image_number).zfill(6) + ".png", np.array(mean_map_left_copy, dtype=np.float32)) cv.imwrite("right_mu_map_" + str(image_number).zfill(6) + ".png", np.array(mean_map_right_copy, dtype=np.float32)) ########## SIGMA CALCULATION ########## sigma_map_l, min_sigma_l, max_sigma_l = sigma_map_calculation( int_img_l2, mean_map_left, w) sigma_map_r, min_sigma_r, max_sigma_r = sigma_map_calculation( int_img_r2, mean_map_right, w) sigma_map_left_copy = copy.deepcopy(sigma_map_l) sigma_map_right_copy = copy.deepcopy(sigma_map_r) normalize_disp_map(sigma_map_left_copy, min_sigma_l, max_sigma_l) normalize_disp_map(sigma_map_right_copy, min_sigma_r, max_sigma_r) cv.imwrite("left_sigma_map_" + str(image_number).zfill(6) + ".png", np.array(sigma_map_left_copy, dtype=np.float32)) cv.imwrite("right_sigma_map_" + str(image_number).zfill(6) + ".png", np.array(sigma_map_right_copy, dtype=np.float32)) ########## DISPARITY MAP CALCULATION ########## imgs_l = [img_l, mean_map_left, sigma_map_l] imgs_r = [img_r, mean_map_right, sigma_map_r] # disp_l, l_min, l_max = left_right_disparity_2017(imgs_l, imgs_r, w, d_max, tau) #disp_r, r_min, r_max = right_left_disparity_2017(imgs_l, imgs_r, w, d_max, tau) # normalize_disp_map(disp_l, l_min, l_max) #normalize_disp_map(disp_r, r_min, r_max) # Write disparity maps # cv.imwrite("left_disp_map_" + str(i).zfill(6) + ".png", np.array(disp_l, dtype=np.uint8)) #cv.imwrite("right_disp_map.png", np.array(disp_r, dtype=np.uint8)) # Write checked disparity map # cv.imwrite("left_right_checked.png", np.array( # disp_l_checked, dtype=np.uint8)) end = time.time() print("Time elapsed (seconds): ", end - start)
#neighbouring window size can be changed depending on image window=35 #otsu thresholding ret2,th= cv2.threshold(b,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) h,w=b.shape th=0 value=[] threshold=[] m=0 var=0 std=0 b,c=cv2.integral2(b) h,w=c.shape #Sauvola Method using integrl images for i in range(1, w , 1): for j in range(1, h , 1): if(j>(h-int(window/2)) and i>(w-int(window/2))): m = (b[j, i] + b[j - int(window/2), i - int(window/2)] - b[ j , i - int(window/2)] - b[j - int(window/2), i]) / (window * window) s = (c[j, i] + c[j - int(window/2), i - int(window/2)] - c[ j, i - int(window/2)] - c[j - int(window/2), i]) / (window * window) elif (i > (w - int(window/2)) and j < (h - int(window/2))): m = (b[j + int(window/2), i] + b[j - int(window/2), i - int(window/2)] - b[ j + int(window/2), i - int(window/2)] - b[j - int(window/2), i]) / (window * window) s = (c[j + int(window/2), i] + c[j - int(window/2), i - int(window/2)] - c[
import cv2 as cv import numpy as np import sys np.set_printoptions(suppress=True) if __name__ == '__main__': # 创建一个数据类型为float64、维度为16*16的全1矩阵 img = np.ones((16, 16), dtype='float64') # 在图像中加入随机噪声 pts1 = np.random.rand(16, 16) - 0.5 img += pts1 # 计算标准求和积分 sum1 = cv.integral(img) # 计算平方求和积分 sum2, sqsum2 = cv.integral2(img) # 计算倾斜求和积分 sum3, sqsum3, tilted3 = cv.integral3(img) # 展示结果 cv.namedWindow('sum', cv.WINDOW_NORMAL) cv.namedWindow('sum_sqsum', cv.WINDOW_NORMAL) cv.namedWindow('sum_sqsum_tilted', cv.WINDOW_NORMAL) cv.imshow('sum', (sum1 / 255)) cv.imshow('sum_sqsum', (sqsum2 / 255)) cv.imshow('sum_sqsum_tilted', (tilted3 / 255)) cv.waitKey(0) cv.destroyAllWindows()
def app(im): """Page App""" width, choice = app_sidebar() if im is not None: if choice == "Bilevel": img = im.convert("1") elif choice == "Greyscale": img = im.convert("L") elif choice == "Contrast": factor = st.sidebar.slider(choice, 0.0, 3.0, 1.0) enhancer = ImageEnhance.Contrast(im) img = enhancer.enhance(factor) elif choice == "Brightness": factor = st.sidebar.slider(choice, 0.0, 3.0, 1.0) enhancer = ImageEnhance.Brightness(im) img = enhancer.enhance(factor) elif choice == "Colorpick": arr = np.array(im.convert("RGB")) hsv = cv2.cvtColor(arr, cv2.COLOR_BGR2HSV) a, b = st.sidebar.slider(choice, 0, 255, (0, 255)) img_mask = cv2.inRange(hsv, np.ones(3) * a, np.ones(3) * b) img = cv2.bitwise_and(arr, arr, mask=img_mask) elif choice == "Canny": arr = np.array(im.convert("RGB")) gray = cv2.cvtColor(arr, cv2.COLOR_BGR2GRAY) a, b = st.sidebar.slider(choice, 0, 500, (200, 400)) img = cv2.Canny(gray, a, b) img = signature(img) elif choice == "Hist": from matplotlib import pyplot as plt arr = np.array(im.convert("RGB")) for i, col in enumerate(("b", "g", "r")): histr = cv2.calcHist([arr], [i], None, [256], [0, 256]) plt.plot(histr, color=col) plt.xlim([0, 256]) st.pyplot() elif choice == "Kuwahara": # https://en.wikipedia.org/wiki/Kuwahara_filter # https://qiita.com/Cartelet/items/5c1c012c132be3aa9608 r = st.sidebar.slider(choice, 5, 50, 5, 5) arr = np.array(im.convert("RGB")) h, w, _ = arr.shape img = np.empty_like(arr) arr = np.pad(arr, ((r, r), (r, r), (0, 0)), "edge") ave, var = cv2.integral2(arr) ave_mask = (ave[:-r - 1, :-r - 1] + ave[r + 1:, r + 1:] - ave[r + 1:, :-r - 1] - ave[:-r - 1, r + 1:]) ave = ave_mask / (r + 1)**2 var_mask = (var[:-r - 1, :-r - 1] + var[r + 1:, r + 1:] - var[r + 1:, :-r - 1] - var[:-r - 1, r + 1:]) var = (var_mask / (r + 1)**2 - ave**2).sum(axis=2) for i in range(h): for j in range(w): a1, b1, c1, d1, = ( ave[i, j], ave[i + r, j], ave[i, j + r], ave[i + r, j + r], ) a2, b2, c2, d2, = ( var[i, j], var[i + r, j], var[i, j + r], var[i + r, j + r], ) img[i, j] = np.array([a1, b1, c1, d1])[np.array([a2, b2, c2, d2]).argmin()] img = signature(img) try: st.image(img, width=width) except UnboundLocalError: st.image(im, width=width)
stereo.setMinDisparity(0) stereo.setBlockSize(winSize) stereo.setNumDisparities(numberOfDisparities) stereo.setDisp12MaxDiff(100) stereo.setUniquenessRatio(10) stereo.setSpeckleRange(32) stereo.setSpeckleWindowSize(0) disp = stereo.compute(iml, imr).astype(np.float32) / 16.0 disp = cv2.medianBlur(disp, 5) disp = numberOfDisparities+disp depth = disp.copy() disp[disp<0] = 0 w,h = disp.shape # make up the "holes" in the map # use integration map to accelerate the calculation inte = cv2.integral2(disp)[0] for m in range(w): for n in range(h): size = 30 if not disp[m,n]: idx1 = max([m-size,0]) idx2 = min([w,m+size]) idx3 = max([n-size,0]) idx4 = min([h,n+size]) arr = disp[idx1:idx2,idx3:idx4] if np.sum(arr>0): num = np.sum(arr>0) else: num = np.inf depth[m,n] = (inte[idx1,idx3] + inte[idx2,idx4]-inte[idx2,idx3]-inte[idx1,idx4])/num disp = depth
g = np.copy(rgbd.d.astype(np.float)) * 1e-3 d = np.copy(rgbd.d.astype(np.float)) * 1e-3 #qBilat = cv2.adaptiveBilateralFilter(d,(3,3),0.03) print(g == 0).sum() haveData = g >= 1e-2 Ns = cv2.integral(haveData.astype(np.float)) dSum = cv2.integral(d.astype(np.float)) #dSumSq = dSum**2 #dMean = np.nan_to_num(dSum/Ns) #dSig = dSqSum - dSumSq/Ns gSum, gSqSum = cv2.integral2(g.astype(np.float)) #gSumSq = gSum**2 #gMean = np.nan_to_num(gSum/Ns) #gSig = np.nan_to_num(gSqSum - gSumSq/Ns) def integralGet(I, i, j, w): return I[min(i+w,I.shape[0]-1),min(j+w,I.shape[1]-1)] \ - I[min(i+w,I.shape[0]-1),max(j-w,0)] \ - I[max(i-w,0),min(j+w,I.shape[1]-1)] \ + I[max(i-w,0),max(j-w,0)] prod = d * g prod[np.logical_not(haveData)] = 0. prodInt = cv2.integral(prod.astype(np.float))
s = 3 # skip plt.figure(1) plt.clf() plt.subplot(2, 1, 1) image = np.zeros(shape + (3,), dtype=np.uint8) img = plt.imshow(image) plt.subplot(2, 1, 2) class_image = -np.ones(((shape[0] - cshape[0] + s) / s, (shape[1] - cshape[1] + s) / s)) class_img = plt.imshow(class_image, vmin=-1, vmax=1) print class_image.shape for frame in frames: integral, _ = cv2.integral2(abs(frame)) counts = ( integral[: -cshape[0], : -cshape[1]] - integral[cshape[0] :, : -cshape[1]] - integral[: -cshape[0], cshape[1] :] + integral[cshape[0] :, cshape[1] :] ) ii, jj = (counts[::s, ::s] > 5).nonzero() patches = [frame[s * i : s * i + cshape[0], s * j : s * j + cshape[1]] for i, j in zip(ii, jj)] patches = np.array(patches, dtype=dtype)[:, None, :, :] classes = propup(patches) class_image[:] = -1 class_image[ii, jj] = classes class_img.set_data(class_image)