コード例 #1
0
    def minervini(self, norm):
        # Calculate texture response filter from Minervini 2013
        # FIXME: radius, gaussian size and sigmas should be user defined.
        falloff = 1.0 / 50.0
        pillsize = 7
        gaussize = 17
        sdH = 4
        sdL = 1

        # pillbox feature (F1)
        pillse = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                           (pillsize, pillsize))
        pillse = pillse.astype(float)
        pillse = pillse / sum(sum(pillse))
        F1 = cv2.filter2D(self.imgLAB[:, :, 1], -1, pillse)

        # Difference of Gaussian (DoG) featrue (F2)
        G1 = cv2.getGaussianKernel(gaussize, sdH)
        G2 = cv2.getGaussianKernel(gaussize, sdL)
        G1 = G1 * cv2.transpose(G1)
        G2 = G2 * cv2.transpose(G2)
        F2 = cv2.filter2D(self.imgLAB[:, :, 0], -1, G1 - G2)

        F = np.exp(-falloff * np.abs(F1 + F2))
        # FIXME: We are ignoring norm for now.
        F = self.normRange(F)
        F = np.reshape(F, (F.shape[0], F.shape[1], 1))

        return F
コード例 #2
0
def get_tle_vid_goos(vid_name,divisor):
    cap = cv2.VideoCapture(vid_name)
    time = 0
    tot_time=int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    energies=np.zeros(tot_time,dtype=np.int64)
    cap.set(cv2.CAP_PROP_POS_FRAMES,time)
    ret,frame = cap.read()
    in_frame=np.zeros(frame.shape,dtype=np.int64)
    small_frame=cv2.pyrDown(frame)
    blur_frame=np.zeros(frame.shape,dtype=np.int64)
    goose=cv2.getGaussianKernel(frame.shape[0],frame.shape[0]/divisor)
    goose1=cv2.getGaussianKernel(frame.shape[1],frame.shape[1]/divisor)
    goosq=np.dot(goose,np.transpose(goose1))
    m=1/np.max(goosq)
    center_frame=np.zeros(frame.shape,dtype=np.float64)
    for color in range(3):
        center_frame[:,:,color]=goosq*m
    print tot_time
    while(time<tot_time):
        in_frame[:,:,:]=frame
        small_frame[:,:,:]=cv2.pyrDown(frame)
        blur_frame[:,:,:]=cv2.pyrUp(small_frame)[:frame.shape[0],:frame.shape[1],:]
        tle=np.sum(center_frame*np.abs(np.subtract(in_frame,blur_frame)))
        energies[time]=tle
        if time%50==0:
            print time
        time=time+1
        cap.set(cv2.CAP_PROP_POS_FRAMES,time)
        ret,frame = cap.read()
        
    return energies
コード例 #3
0
    def __init__(self):
        super(Video, self).__init__()

        self.play = True
        self.exit = False
        self.waitPerFrameInMillisec = 1
        self.filename = ''

    # no.1
        self.grayscale = False
    # no.2
        self.invert = False
    # no.3
        self.histeql = False
    # no.4
        self.thresholding = False
        self.threshold = 125
    # no.5
        self.reducecolors = False
        self.reducechannelvalues = 2
    # no.5
        self.median = False
        self.mediansize = 3
    # no.6
        self.blur = False
        self.blurkernelsize = 3
        self.blurkernel = []
    # no.7
        self.sharpen = False
        self.sharpenkernelsize = 3
        self.sharpenkernel = []
    # no.8
        self.edges = False
        self.edgeskernel = []
    # no.9
        self.canny = False
        self.cannynumber = 10
        self.cannynumber2 = 30

###############generovani blur kernelu
        for i in range(3,32,2):
            self.blurkernel.insert(i/2-1, cv2.getGaussianKernel(i,0) * cv2.transpose(cv2.getGaussianKernel(i,0)))
        # print("Blur Kernel my")
        # print(self.blurkernel)
###############generovani sharpen kernelu
        # for i in range(3,32,2):
        #     self.sharpenkernel.insert(i, cv2.Laplacian(self.blurkernel[i/2-1], cv2.CV_64F))
        #     self.sharpenkernel[i/2-1] *= -1
        for i in range(3,32,2):
            self.sharpenkernel.insert(i,self.blurkernel[i/2-1].copy())
            self.sharpenkernel[i/2-1][i/2][i/2] -= 2.
            self.sharpenkernel[i/2-1] *=- 1
        # print("Sharpen Kernel my")
        # print(self.sharpenkernel)
#######################generovani edge kernelu
        self.edgeskernel = np.array([[ -1.,  -1.,  -1.],
                                    [-1.,  8.,  -1.],
                                    [-1.,  -1.,  -1.]])
コード例 #4
0
ファイル: filters.py プロジェクト: sergey-lebedev/set
def gaussian(d_x, sx, d_y, sy):
    x_gaussian_kernel = np.matrix(cv2.getGaussianKernel(d_x, sx))
    #print x_gaussian_kernel
    y_gaussian_kernel = np.matrix(cv2.getGaussianKernel(d_y, sy))
    #print y_gaussian_kernel
    gaussian_kernel = y_gaussian_kernel * x_gaussian_kernel.T
    #print gaussian_kernel
    #k = gaussian_kernel.sum()
    #print k
    return gaussian_kernel
コード例 #5
0
def rough_pupil_point(img):

    VARIANCE = 55
    img_inverted =  cv2.bitwise_not(img)
    row, col = img_inverted.shape
    gaussian_x = cv2.getGaussianKernel(row, VARIANCE)
    gaussian_y = cv2.getGaussianKernel(col, VARIANCE)
    gaussian_weight = gaussian_x * np.transpose(gaussian_y)
    weighted_image = np.multiply(img_inverted, gaussian_weight)
    rough_point = np.unravel_index(weighted_image.argmax(), weighted_image.shape)
    return rough_point
コード例 #6
0
ファイル: sac.py プロジェクト: HerrPeterPaul/rePhotos
def getPointFromRectangle(img1, point1, point2):
	"""
	Computes point of interest in a subimage which is defined by to given points.
	:param img1: image in which point is searched
	:param point1: corner of user drawn rectangle
	:param point2: opposite corner of user drawn rectangle
	:return: point of interest in rectangle"""


	assert 0 <= point1[1] < img1.shape[0], "Point1 outside image"
	assert 0 <= point1[0] < img1.shape[1], "Point1 outside image"
	assert 0 <= point2[1] < img1.shape[0], "Point2 outside image"
	assert 0 <= point2[0] < img1.shape[1], "Point2 outside image"

	assert point1[0] != point2[0], "X cordinates of rectangle corners are equal -> no rectangle"
	assert point1[1] != point2[1], "Y cordinates of rectangle corners are equal -> no rectangle"
	
	subimage = np.copy(img1[min(point1[1],point2[1]):max(point1[1],point2[1]), 
										  min(point1[0], point2[0]):max(point1[0],point2[0])])
	subimageGray = cv2.cvtColor(subimage, cv2.COLOR_BGR2GRAY)
	subimageF = np.float32(subimageGray)
	subimageF = cv2.normalize(subimageF, subimageF, 0, 1, cv2.NORM_MINMAX)
	subimageF = cv2.GaussianBlur(subimageF, (5,5), 0)	
	
	# Detector parameters
	blockSize = 2
	apertureSize = 3
	k = 0.04
	# Detecting corners
	corners = cv2.cornerHarris( subimageF, blockSize, apertureSize, k, cv2.BORDER_DEFAULT )

	# Assume that user wants to mark point in middle of rectangle, hence weight cornes using gaussian
	rows, cols = corners.shape
	gausCols = cv2.getGaussianKernel(cols, -1)
	gausRows = cv2.getGaussianKernel(rows, -1)
	gausMatrix = gausRows*gausCols.T
	gausMatrixNormalized = gausMatrix/gausMatrix.max()
	corners = corners * gausMatrixNormalized
	
	# get sharpest corners
	i, j = np.where(corners == corners.max());

	# get index of corner in middle of sharpest corner array, most often there is only one entry 
	index = int(i.shape[0]/2)

	#add the start position of rectangle as offset
	returnPoint = (j[index] + min(point1[0], point2[0]), i[index] + min(point1[1], point2[1]))

	return returnPoint
コード例 #7
0
def find_balls(frame):
    ##preprocessing
    kernel = cv2.getGaussianKernel(10, 4)
    kernel = kernel * kernel.T
    cv2.filter2D(frame, -1, frame, kernel)

    ##color conversion
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # showChannels(hsv)

    if RECTANGLE:
        # rect = (point1[0], point1[1], point2[0] - point1[0], point2[1] - point1[1]);
        obj_img = hsv[point1[1] : point2[1], point1[0] : point2[0], 0:3]
        # pixels=array(obj_img)
    else:
        pixels = getPixelsAt(hsv, points)
        obj_img = [pixels]
        obj_img = np.array(obj_img)

    ##### Thesholding ####
    # lower_bound, upper_bound = get_color_range(hsv,pixels)
    # mask = cv2.inRange(hsv, lower_bound, upper_bound)

    ####Mahalanobis#####
    # mean,cov=analyze_colors(hsv,pixels)
    # invcov=np.linalg.inv(cov)
    # (w,h,c) = hsv.shape
    # mask = buildMahalanobisMask(hsv, invcov, mean, w, h)

    #### Histogram Backprojection ####
    mask = histogramBackproj(hsv, obj_img)
    center2, radius2 = findBallInMask(frame, hsv, mask)

    return center2, radius2
コード例 #8
0
def edge_promoting(root, save):
    file_list = os.listdir(root)
    if not os.path.isdir(save):
        os.makedirs(save)
    kernel_size = 5
    kernel = np.ones((kernel_size, kernel_size), np.uint8)
    gauss = cv2.getGaussianKernel(kernel_size, 0)
    gauss = gauss * gauss.transpose(1, 0)
    n = 1
    for f in tqdm(file_list):
        rgb_img = cv2.imread(os.path.join(root, f))
        gray_img = cv2.imread(os.path.join(root, f), 0)
        rgb_img = cv2.resize(rgb_img, (256, 256))
        pad_img = np.pad(rgb_img, ((2,2), (2,2), (0,0)), mode='reflect')
        gray_img = cv2.resize(gray_img, (256, 256))
        edges = cv2.Canny(gray_img, 100, 200)
        dilation = cv2.dilate(edges, kernel)

        gauss_img = np.copy(rgb_img)
        idx = np.where(dilation != 0)
        for i in range(np.sum(dilation != 0)):
            gauss_img[idx[0][i], idx[1][i], 0] = np.sum(np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 0], gauss))
            gauss_img[idx[0][i], idx[1][i], 1] = np.sum(np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 1], gauss))
            gauss_img[idx[0][i], idx[1][i], 2] = np.sum(np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 2], gauss))

        result = np.concatenate((rgb_img, gauss_img), 1)

        cv2.imwrite(os.path.join(save, str(n) + '.png'), result)
        n += 1
コード例 #9
0
ファイル: Harris.py プロジェクト: A-Alaa/cv-assignments
    def __calcHarrisMat__(self):
        self.__findGradients__()
        # M = SUM( 3X3 window )
        # H = det(M) - k*(trace(M))*(trace(M))
        # k = 0.04 <=> 0.06 , we will assume it is 0.05
        # det(M) = (Ix*Ix)*(Iy*Iy) - (Ix*Iy)*(Ix*Iy) ,  trace(M) = (Ix*Ix) + (Iy*Iy)
        gaussianKernel1D = cv2.getGaussianKernel(3, self.sigma)
        window = gaussianKernel1D * gaussianKernel1D.transpose()
        # window = np.array([ [ 1 , 1 , 1 ] ,
        #                     [ 1 , 1 , 1 ] ,
        #                     [ 1 , 1 , 1 ] ])

        gradXSquared = self.gradientX * self.gradientX
        gradYSquared = self.gradientY * self.gradientY
        gradXgradY = self.gradientX * self.gradientY

        # Calculate the summation of the window's value ( IxIx, IyIy, IxIy)

        mIxIx = convolve2d(gradXSquared, window, mode='same')
        mIyIy = convolve2d(gradYSquared, window, mode='same')
        mIxIy = convolve2d(gradXgradY, window, mode='same')

        # Calculate the |M|
        detOfMatrix = (mIxIx * mIyIy) - (mIxIy * mIxIy)
        # Calculate the trace()
        traceOfMatrix = mIxIx + mIyIy

        self.responseMat = detOfMatrix - 0.05 * traceOfMatrix * traceOfMatrix
コード例 #10
0
ファイル: main.py プロジェクト: kratosAgain/ComputerVision
def deconvolution(img_in):

    # Write deconvolution codes here
    def ft(im, newsize=None):
        dft = np.fft.fft2(np.float32(im), newsize)
        return np.fft.fftshift(dft)

    def ift(shift):
        f_ishift = np.fft.ifftshift(shift)
        img_back = np.fft.ifft2(f_ishift)
        return np.abs(img_back)

    gk = cv2.getGaussianKernel(21, 5)
    gk = gk * gk.T

    imf = ft(img_in, (img_in.shape[0],
                      img_in.shape[1]))  # make sure sizes match
    gkf = ft(gk, (img_in.shape[0],
                  img_in.shape[1]))  # so we can multiple easily
    imconvf = ift(imf / gkf)

    # now for example we can reconstruct the blurred image from its FT

    img_out = (imconvf * 255).astype('uint8')
    return True, img_out
コード例 #11
0
    def set_param(self, patch_size, patch_depth=1):
        """
        patch_size 必须为奇数
        :param patch_size:
        :param patch_depth:
        :return:
        """
        self.patch_size = patch_size
        self.patch_depth = patch_depth
        mask = np.zeros((patch_size, patch_size))
        inner_mask = np.zeros((patch_size, patch_size))
        min_mask = np.zeros((patch_size, patch_size))
        center = ((patch_size + 1) / 2, (patch_size + 1) / 2)
        self.center = center
        r = patch_size / 2

        gaussianKernel = cv2.getGaussianKernel(self.patch_size, 3)
        self.gaussianKernel2D = gaussianKernel * gaussianKernel.T

        for x in range(patch_size):
            for y in range(patch_size):
                if distanse((x + 1, y + 1), center) <= r:
                    mask[x, y] = 1.0
                if distanse((x + 1, y + 1), center) <= r - 2:
                    inner_mask[x, y] = 1.0
                if distanse((x + 1, y + 1), center) <= r - 4:
                    min_mask[x, y] = 1.0

        self.mask = mask
        self.min_mask = min_mask
        # 比正常的mask小一圈,为了消除在旋转当中产生的一些边界像素的误差
        self.inner_mask = inner_mask
コード例 #12
0
ファイル: aindane.py プロジェクト: shiruilu/CAPE
def ace(I, In_prime, c=5):
    """ace algo in 3.2"""
    sigma = sqrt(c**2 /2)
    img_freq = np.fft.fft2(I)
    img_freq_shift = np.fft.fftshift(img_freq)
    # size of gaussian: 3*sigma(0.99...), cv2 require sigma to be int
    _gaussian_x = cv2.getGaussianKernel(int(round(sigma*3)), int(round(sigma)))
    gaussian = (_gaussian_x * _gaussian_x.T) \
               / np.sum(_gaussian_x * _gaussian_x.T) # normalize
    ##gaussian kernel padded with 0, extend to image.shape
    gaussian_freq_shift = np.fft.fftshift( np.fft.fft2(gaussian, I.shape) )

    image_fm = img_freq_shift * gaussian_freq_shift # element wise multiplication
    I_conv = np.real( np.fft.ifft2( np.fft.ifftshift(image_fm) ) ) # equation 6

    sigma_I = np.array( [np.std(I)] ) # std of I,to an array, for np.piecewise
    P = np.piecewise(sigma_I,
                     [ sigma_I<=3,
                       sigma_I>3 and sigma_I<10,
                       sigma_I>=10
                     ],
                     [ 3, 1.0*(27-2*sigma_I)/7, 1 ]
    )
    E = ((I_conv+eps) / (I+eps)) ** P
    S = 255 * np.power(In_prime, E)
    return S
コード例 #13
0
 def change_support(self,support):
     goose=cv2.getGaussianKernel(support,support/3)
     kern=np.dot(goose,goose.T)
     self.weights=np.zeros((support,support,3),dtype=np.float)
     for color in range(3):
         self.weights[:,:,color]=kern
         
     self.step=support/self.od
     self.support=support
     
     self.data=np.zeros(((self.x-support)*(self.y-support),support*support*3))
     self.x_tot=self.out_shape[1]-support
     self.y_tot=self.out_shape[0]-support
     self.ind_shape=(self.y_tot/self.step,self.x_tot/self.step)
     self.bitmap=np.ones(self.ind_shape,dtype=np.uint8)
     self.inds=np.zeros(self.ind_shape,dtype=np.uint32)
     max_ind=(self.x-support)*(self.y-support)
     self.wmat=np.zeros(self.out_shape)
     for meta_ind in range(np.product(self.ind_shape)):
         x_off=self.step*(meta_ind%self.ind_shape[1])
         y_off=self.step*(meta_ind/self.ind_shape[1])
         self.wmat[x_off:x_off+support,y_off:y_off+support,:]+=self.weights
     self.wmat[self.wmat==0]=1
     for ind in range((self.x-support)*(self.y-support)):
         xoff=ind%(self.x-support)
         yoff=ind/(self.x-support)
         self.data[ind,:]=self.sample[yoff:yoff+support,xoff:xoff+support,:].reshape(1,-1)
     ls=np.max((1,self.data.shape[0]/100))
     ls=np.min((ls,1000))
     self.tree=neighbors.KDTree(self.data,leaf_size=ls)
コード例 #14
0
 def _createFilters(self):
     """ Creates the filters that are needed for computing the modified
     multiscale singularity index response. The filters can be used for
     processing many input images once the filters are created.
     """
 
     # Create the debiasing filter
     sigmad  = 5 * self.minScale
     ksized  = int(sigmad*3) #kernel half size
     self.Gdebias = cv2.getGaussianKernel(2*ksized+1, sigmad)
 
     # Set sigma and kernel size for the second and first order derivatives
     sigma2   = float(self.minScale)
     sigma1   = self.minScale*1.7754
     ksize2   = int(sigma2*3) + 1
     ksize1   = int(sigma1*3) + 1
 
     # Set steerable filter basis orientations
     theta1 = 0
     theta2 = np.pi/3
     theta3 = 2*np.pi/3
 
     # Create a meshgrid for second order derivatives
     X, Y = np.meshgrid(range(-ksize2,ksize2+1), range(-ksize2,ksize2+1))
     u1 = X*np.cos(theta1) - Y*np.sin(theta1)
     u2 = X*np.cos(theta2) - Y*np.sin(theta2)
     u3 = X*np.cos(theta3) - Y*np.sin(theta3)
 
     # Create an isotropic Gaussian.
     # All second derivatives are defined in terms of G0
     self.G01d = cv2.getGaussianKernel(2*ksize2+1, sigma2)
     G0 = self.G01d * self.G01d.T
 
     # Compute second partial derivatives of Gaussian
     self.G20   = (((u1**2)/(sigma2**4)) - (1/(sigma2**2))) * G0
     self.G260  = (((u2**2)/(sigma2**4)) - (1/(sigma2**2))) * G0
     self.G2120 = (((u3**2)/(sigma2**4)) - (1/(sigma2**2))) * G0
 
     # Create a separable basis filter for first partial derivative of Gaussian
     x_1  = np.linspace(-ksize1, ksize1, 2*ksize1+1)
     x_1  = np.reshape(x_1, (1, -1))
     self.G0_a = cv2.getGaussianKernel(2*ksize1+1, sigma1)
     self.G1   = -((1/sigma1)**2) * x_1 * self.G0_a.T
     
     # Set the completion flag
     self.isCreated = True
コード例 #15
0
ファイル: parameters.py プロジェクト: Jayme-T/imgProcessor
def modifiedLaplacian(img):
    ''''LAPM' algorithm (Nayar89)'''
    M = np.array([-1, 2, -1])
    G = cv2.getGaussianKernel(ksize=3, sigma=-1)
    Lx = cv2.sepFilter2D(src=img, ddepth=cv2.cv.CV_64F, kernelX=M, kernelY=G)
    Ly = cv2.sepFilter2D(src=img, ddepth=cv2.cv.CV_64F, kernelX=G, kernelY=M)
    FM = np.abs(Lx) + np.abs(Ly)
    return cv2.mean(FM)[0]
コード例 #16
0
ファイル: suppression.py プロジェクト: HungLV4/HOG
def gaussian2d(n, sigma):
	k1d = cv2.getGaussianKernel(n, sigma)
	k1d = k1d.reshape(n)

	k2d = np.zeros((n, n))
	for i in range(n):
		k2d[i, :] = k1d[i] * k1d

	return k2d
コード例 #17
0
def create_noisy_image_by_clustering_euclidean(noisy_image, patch_width,
                                               cluster_size):
    """
    :param noisy_image:
    :param patch_width:
    :param cluster_size:
    :return idx:
    """
    # todo: 为什么要添加高斯噪声后才进行图像聚类?
    row = cv2.getGaussianKernel(9, 1.5)
    col = cv2.getGaussianKernel(9, 1.5)
    gaussian_kernel = np.dot(col[:], np.transpose(row[:]))
    # todo anchor=?
    # todo cv2.filter2D == matlab::filter2 ?
    noisy_image = cv2.filter2D(noisy_image, -1, gaussian_kernel,
                               delta=0, borderType=cv2.BORDER_CONSTANT)
    return create_image_by_clustering_euclidean(noisy_image, patch_width,
                                                cluster_size)
コード例 #18
0
ファイル: common_net.py プロジェクト: gfederix/UNIT
 def __init__(self, kernel_size=5):
   super(GaussianSmoother, self).__init__()
   self.sigma = 0.3*((kernel_size-1)*0.5-1)+0.8
   kernel = cv2.getGaussianKernel(kernel_size, -1)
   kernel2d = np.dot(kernel.reshape(kernel_size,1),kernel.reshape(1,kernel_size))
   data = torch.Tensor(3, 1, kernel_size, kernel_size)
   self.pad = (kernel_size-1)/2
   for i in range(0,3):
     data[i,0,:,:] = torch.from_numpy(kernel2d)
   self.blur_kernel = Variable(data, requires_grad=False)
コード例 #19
0
    def __init__(self,sample,support,out_shape,over_divisor):
        self.sample=np.zeros(sample.shape)
        self.sample[:,:,:]=sample
        self.x=sample.shape[1]
        self.y=sample.shape[0]

        goose=cv2.getGaussianKernel(support,support/3)
        kern=np.dot(goose,goose.T)
        self.weights=np.zeros((support,support,3),dtype=np.float)
        for color in range(3):
            self.weights[:,:,color]=kern
        
        self.od=over_divisor
        self.step=support/self.od
        
        self.guess=np.zeros(out_shape,dtype=np.float)
        for pix in range(out_shape[0]*out_shape[1]):
            xo=int(self.x*np.random.rand())
            yo=int(self.y*np.random.rand())
            self.guess[pix/out_shape[1],pix%out_shape[1],:]=sample[yo,xo,:]
        max_node=(out_shape[0]/support)*(out_shape[1]/support)
        max_ind=(self.x-support)*(self.y-support)
        for node in range(max_node):
            x_off=(node%(out_shape[1]/support))*support
            y_off=(node/(out_shape[1]/support))*support
            ind=np.random.rand()*max_ind
            xoff=ind%(self.x-support)
            yoff=ind/(self.x-support)
            print sample[yoff:yoff+support,xoff:xoff+support,:].shape
            self.guess[y_off:y_off+support,x_off:x_off+support]=sample[yoff:yoff+support,xoff:xoff+support,:]
        self.support=support
        
        
        self.x_tot=out_shape[1]-support
        self.y_tot=out_shape[0]-support
        self.ind_shape=(self.y_tot/self.step,self.x_tot/self.step)
        self.bitmap=np.ones(self.ind_shape,dtype=np.uint8)

        self.data=np.zeros(((self.x-support)*(self.y-support),support*support*3))
        #max_ind=(self.x-self.support)*(self.y-self.support)
        self.inds=np.uint32(np.random.rand(self.ind_shape[0],self.ind_shape[1])*max_ind)
        self.wmat=np.zeros(out_shape)
        for meta_ind in range(np.product(self.ind_shape)):
            x_off=self.step*(meta_ind%self.ind_shape[1])
            y_off=self.step*(meta_ind/self.ind_shape[1])
            self.wmat[x_off:x_off+support,y_off:y_off+support,:]+=self.weights
        self.wmat[self.wmat==0]=1
        for ind in range((self.x-support)*(self.y-support)):
            xoff=ind%(self.x-support)
            yoff=ind/(self.x-support)
            self.data[ind,:]=sample[yoff:yoff+support,xoff:xoff+support,:].reshape(1,-1)
        ls=np.max((1,self.data.shape[0]/100))
        ls=np.min((ls,10000))
        self.tree=neighbors.KDTree(self.data,leaf_size=ls)
        self.out_shape=out_shape
コード例 #20
0
 def update_gaussian_filter_win(self, dummy=None):
     """
     Update Gaussian Kernel param and the result image.
     """
     ks = cv2.getTrackbarPos("kize=2n+1:",
                             self.ctrl_panel_winname)
     kernel = cv2.getGaussianKernel(ks*2+1, 0)
     dst = cv2.filter2D(self.img, -1, kernel)
     self.tmp = dst
     self.get_dft()
     cv2.imshow(self.test_winname, dst)
コード例 #21
0
ファイル: plotters.py プロジェクト: speed-of-light/iphw2013
 def gaussian_mask(self, img, sigma=1, inverse=False):
     hs, ws = img.shape
     sqsize = self.make_odd(min(hs, ws))
     xshift = ( ws - sqsize )/2
     yshift = ( hs - sqsize )/2
     x = cv2.getGaussianKernel( sqsize, sigma )
     gaussian = ( x*x.T ) * 1000
     gaussian = ( 1 - gaussian ) if inverse else gaussian
     mask = np.ones(( hs, ws, 2),np.float16) if inverse else np.zeros(( hs, ws, 2),np.float16)
     mask[ yshift:sqsize+yshift, xshift:sqsize+xshift, 0] = gaussian
     mask[ yshift:sqsize+yshift, xshift:sqsize+xshift, 1] = gaussian
     return mask
コード例 #22
0
def FrameSmoth(frame):

    ''' In this stage of algorithm we impliment the 'bluring' procces -
        the function clculate the score of each frame of the interval (0.25 s) by execute the gaussian.
        The goal of this proccess is to avoid 'False Positive'	of ths frames hat we recognized as diffrent. ''' 

    gaussian =cv2.getGaussianKernel(5,10)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray=cv2.filter2D(gray,-1,gaussian)
    #gray=signal.convolve2d(gray, gaussian,mode='same')
    gray=normalize(gray)
    return gray
コード例 #23
0
def resize(image, d_w, d_h):
    if (d_h < image.shape[0]) or (d_w < image.shape[1]):
        ratio = min(d_h / float(image.shape[0]), d_w / float(image.shape[1]))

        kernel_size = int( 5 / (2 * ratio))
        sigma = 0.5 / ratio
        image_to_resize = cv2.filter2D(image, cv2.CV_8UC3, cv2.getGaussianKernel(kernel_size, sigma))
        interpolation_type = cv2.INTER_AREA
    else:
        image_to_resize = image
        interpolation_type = cv2.INTER_CUBIC

    return cv2.resize(image_to_resize,(d_w, d_h), None, 0, 0, interpolation_type)
def resized_image_wrong(image, desired_width, desired_height):
    """
    Resizes an image, applies adequate filter,
    and then subsample/upsample the image
    """

    input_height, input_width, _ = image.shape
    resized_shape = (desired_height, desired_width)

    will_do_shrinking = \
        (resized_shape[0] < image.shape[0]) \
        or (resized_shape[1] < image.shape[1])

    is_octave_minus_one = (desired_height < 100)
    # we noticed that oversmoothing hurts octave 0
    if will_do_shrinking and is_octave_minus_one:
        height_ratio = resized_shape[0] / float(image.shape[0])
        width_ratio = resized_shape[1] / float(image.shape[1])
        resizing_ratio = min(height_ratio, width_ratio)

        #print("resized_shape ==", resized_shape)
        #print("image.shape ==",  image.shape)
        #assert resizing_ratio > 0
        assert resizing_ratio < 1
        # Following OpenCv and Vincent De Smet suggestions for the
        # kernel size and sigma values
        try:
            kernel_size = int(5/(2*resizing_ratio))
            # (even with sigma zero, there will be smoothing)
            sigma = 0.5 / resizing_ratio
            kernel = cv2.getGaussianKernel(kernel_size, sigma)
            image_to_resize = cv2.filter2D(image, cv2.CV_8UC3, kernel)
        except:
            print("WARNING: could not apply filter to image")
            image_to_resize = image
        interpolation_type = cv2.INTER_AREA

    else:
        # doing streching
        image_to_resize = image
        #interpolation_type = cv2.INTER_LINEAR
        interpolation_type = cv2.INTER_CUBIC
        #interpolation_type = cv2.INTER_LANCZOS4

    destination_size = (desired_width, desired_height)
    resized_image = cv2.resize(image_to_resize,
                               destination_size,
                               None,
                               0, 0, interpolation_type)

    return resized_image
コード例 #25
0
ファイル: remove_slp.py プロジェクト: antiapt/crisp
def remove_slp(img, gstd1=GSTD1, gstd2=GSTD2, gstd3=GSTD3, ksize=KSIZE, w=W):
    """Remove the SLP from kinect IR image
    
    The input image should be a float32 numpy array, and should NOT be a square root image
    Parameters
    ------------------
    img : (M, N) float ndarray
            Kinect NIR image with SLP pattern
    gstd1 : float
            Standard deviation of gaussian kernel 1
    gstd2 : float
            Standard deviation of gaussian kernel 2
    gstd3 : float
            Standard deviation of gaussian kernel 3
    ksize : int
            Size of kernel (should be odd)
    w   : float
            Weighting factor

    Returns
    ------------------
    img_noslp : (M,N) float ndarray
            Input image with SLP removed
    """
    gf1 = cv2.getGaussianKernel(ksize, gstd1)
    gf2 = cv2.getGaussianKernel(ksize, gstd2)
    gf3 = cv2.getGaussianKernel(ksize, gstd3)
    sqrtimg = cv2.sqrt(img)
    p1 = cv2.sepFilter2D(sqrtimg, -1, gf1, gf1)
    p2 = cv2.sepFilter2D(sqrtimg, -1, gf2, gf2)
    maxarr = np.maximum(0, (p1 - p2) / p2)
    minarr = np.minimum(w * maxarr, 1)
    p = 1 - minarr
    nc = cv2.sepFilter2D(p, -1, gf3, gf3) + EPS
    output = cv2.sepFilter2D(p*sqrtimg, -1, gf3, gf3)
    output = (output / nc) ** 2 # Since input is sqrted
    
    return output
コード例 #26
0
def gauss_kernel_3d(dims, dimt, sig, tau):
    """

    :param dim: length of the 3D-Gaussian Kernel
    :param sig: standard deviation in space
    :param tau: standard deviation in time
    :return: kern : (dim, dim, dim)-ndarray which represents the gaussian-kernel
    """

    # initialisation of the kernel
    kern = np.zeros(shape=(dimt, dims, dims), dtype=float)

    # compute the kernel in the space dimension
    kernxy = cv2.getGaussianKernel(dims, sig)
    kern_space = np.outer(kernxy, kernxy)

    # compute the kernel in the time dimension
    kernt = cv2.getGaussianKernel(dimt, tau)

    # product of both to obtain the spatio-temporal kernel
    for n in range(dimt):
        kern[n] = kern_space * kernt[n]

    return kern
コード例 #27
0
ファイル: ps5.py プロジェクト: cowens85/Fall2015
def one_b(Ix, Iy, run="foo"):
    kernel = cv2.getGaussianKernel(3,0.3)
    # kernel = np.ones((3, 3), dtype=np.float) / 9.0
    R = harris_response(Ix, Iy, kernel, 0.04)

    # Scale/type-cast response map and write to file
    if run == "transA":
        write_image(R, "ps5-1-b-1.png", scale=True)
    elif run == "transB":
        write_image(R, "ps5-1-b-2.png", scale=True)
    elif run == "simA":
        write_image(R, "ps5-1-b-3.png", scale=True)
    elif run == "simB":
        write_image(R, "ps5-1-b-4.png", scale=True)

    return R
コード例 #28
0
ファイル: util.py プロジェクト: youngtaekoh/livewire
def getGaussianDerivs(sigma, M):
    L = (M - 1)/2
    sigma_sq = sigma * sigma
    sigma_quad = sigma_sq * sigma_sq

    dg = np.zeros(M)
    d2g = np.zeros(M)

    gaussian = np.squeeze(cv2.getGaussianKernel(M, sigma, cv2.CV_64F))
    for i in range(-L, L+1):
        idx = i + L
        # from http://www.cedar.buffalo.edu/~srihari/CSE555/Normal2.pdf
        dg[idx] = (-i/sigma_sq) * gaussian[idx]
        d2g[idx] = (-sigma_sq + i*i)/sigma_quad * gaussian[idx]

    return gaussian, dg, d2g
コード例 #29
0
ファイル: imtools.py プロジェクト: sweetgoofy/USS
    def digitise(img):
        ''' Extract water level from gradient field'''
        ret = np.zeros(img.shape[1])
        max_indices = []

        # Generate smoothing kernels
        kernal_gauss = cv2.getGaussianKernel(img.shape[0], 20)
        kernal_gauss *= 1.0/np.max(kernal_gauss)

        # Estimate centres for smoothing kernals.
        for i in xrange(img.shape[1]):
            col = color[:,i]
            grad_avg = fftconvolve(col, [1./15 for i in xrange(15)],
                                   mode = 'same')
            max_idx = np.argmin(grad_avg)
            max_indices.append(max_idx)

        max_indices = medfilt(max_indices, 31)
        max_indices = gaussian_filter(max_indices, 55)
        cpy = max_indices
        max_indices = fit_sine(max_indices).astype('int32')

        # find edge
        for i in xrange(img.shape[1]):
            col = color[:,i]
            max_idx = max_indices[i]
            k = np.roll(kernal_gauss, max_idx - img.shape[0]/2)

            # Nullify data further from 3 times Gaussian std.
            if max_idx > img.shape[0]/2:
                k[:max_idx - 20 * 3] = 0
            else:
                k[max_idx + 20 * 3:] = 0
                
            col = col.astype('float64')*k[:,0]

            # Threshold for detecking water surface
            thresh = np.percentile(col[col<0],1)
            for j in xrange(len(col)-1, -1, -1):
                if col[j] <= thresh:
                    ret[i] = j
                    break

        # Smooth result for shot noise
        ret = medfilt(ret, 31)

        return img.shape[0] - ret
コード例 #30
0
ファイル: Metrics.py プロジェクト: IamSVP/MPTC
def compute_SSIM(imagename1,imagename2):
	img1 = cv2.imread(imagename1)
	img2 = cv2.imread(imagename2)
	print imagename2,imagename1
	img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
	img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
	height,widht = img1.shape
	img1 = np.float64(img1)
	img2 = np.float64(img2)
	
	window = cv2.getGaussianKernel(11,2.0,cv2.CV_64F)

	
	C1 = 6.5025
	C2 = 58.5525

	mu1 = cv2.filter2D(img1,cv2.CV_64F,window)
	mu2 = cv2.filter2D(img2,cv2.CV_64F,window)
	mu1_sq = cv2.multiply(mu1,mu1)
	mu2_sq = cv2.multiply(mu2,mu2)
	mu1_mu2 = cv2.multiply(mu1,mu2)

	img1_sq = cv2.multiply(img1,img1)
	img2_sq = cv2.multiply(img2,img2)
	img1_img2 = cv2.multiply(img1,img2)

	sigma1_sq = cv2.filter2D(img1_sq,cv2.CV_64F,window)
	sigma1_sq = cv2.subtract(sigma1_sq,mu1_sq)

	sigma2_sq = cv2.filter2D(img2_sq,cv2.CV_64F,window)
	sigma2_sq = cv2.subtract(sigma2_sq,mu2_sq)	


	sigma12 = cv2.filter2D(img1_img2,cv2.CV_64F,window)
	sigma12 = cv2.subtract(sigma12,mu1_mu2)

	t1 = 2*mu1_mu2 + C1
	t2 = 2*sigma12 + C2
	t3 = cv2.multiply(t1,t2)

	t1 = mu1_sq + mu2_sq + C1
	t2 = sigma2_sq + sigma1_sq + C2
	t1 = cv2.multiply(t1,t2)

	ssim_map = cv2.divide(t3,t1)
	mssim = cv2.mean(ssim_map)
	return "{0:.5f}".format(mssim[0]),height,widht
コード例 #31
0
def main():
    """
    The main function for the file.
    """
    parser = argparse.ArgumentParser(description="Create a hybrid of images " +
                                     "based off the features found in them.")
    parser.add_argument(
        "featureFile",
        nargs=1,
        type=str,
        default=None,
        help="The file to read the features, viewable in a full size depiction "
        + "of the image, to merge into the hybrid image.")
    parser.add_argument(
        "colorFile",
        nargs=1,
        type=str,
        default=None,
        help="The file to read the colors out of, viewable in a preview like "
        + "depiction, to merge into the hybrid image.")
    parser.add_argument("output",
                        nargs="?",
                        type=str,
                        default="hybrid.png",
                        help="The hybrid image output.")
    args = parser.parse_args()

    # Open the images
    featureImage = cv2.imread(args.featureFile[0], cv2.IMREAD_COLOR)
    colorImage = cv2.imread(args.colorFile[0], cv2.IMREAD_COLOR)
    if featureImage is None:
        print('Error opening feature image.')
        parser.print_usage()
        return -1
    if colorImage is None:
        print('Error opening color image.')
        parser.print_usage()
        return -1

    # Make the images the same size
    if colorImage.shape[0] > featureImage.shape[0]:
        colorImage = colorImage[:featureImage.shape[0], :, :]
    else:
        featureImage = featureImage[:colorImage.shape[0], :, :]
    if colorImage.shape[1] > featureImage.shape[1]:
        colorImage = colorImage[:, :featureImage.shape[1], :]
    else:
        featureImage = featureImage[:, :colorImage.shape[1], :]

    # Step 1
    featureNorm = normalizeImage(featureImage)
    colorNorm = normalizeImage(colorImage)

    # Step 2
    kernel = cv2.getGaussianKernel(KERNEL_SIZE, KERNEL_SIGMA)
    lowpassKernel = kernel * kernel.transpose()

    # Step 3
    highpassKernel = np.zeros_like(kernel, dtype=np.float32)
    highpassKernel[int(KERNEL_SIZE / 2)] = 1
    highpassKernel = (highpassKernel *
                      highpassKernel.transpose()) - lowpassKernel

    # Step 4
    lowImage = cv2.filter2D(colorNorm, -1, lowpassKernel) * 255
    highImage = cv2.filter2D(featureNorm, -1, highpassKernel) * 255

    # Step 5
    outputImage = lowImage + highImage
    outfileName = args.output
    if outfileName[-4:] != DEFAULT_EXT:
        outfileName += DEFAULT_EXT

    return cv2.imwrite(outfileName, outputImage)
コード例 #32
0
ファイル: filter.py プロジェクト: Marbel/Python-Image-Filter
import numpy as np
import cv2


def dummy(val):
    pass


identity_kernel = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])
sharpen_kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
# use cv2 kernel
gaussian_kernel1 = cv2.getGaussianKernel(3, 0)
gaussian_kernel2 = cv2.getGaussianKernel(5, 0)
box_kernel = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], np.float32) / 9

kernels = [
    identity_kernel, sharpen_kernel, gaussian_kernel1, gaussian_kernel2,
    box_kernel
]

windowName = 'app'
color_original = cv2.imread("test.jpg")
color_modified = color_original.copy()
gray_original = cv2.cvtColor(color_original, cv2.COLOR_BGR2GRAY)
gray_modified = gray_original.copy()

cv2.namedWindow(windowName)
cv2.createTrackbar('contrast', windowName, 1, 100, dummy)
cv2.createTrackbar('brightness', windowName, 50, 100, dummy)
cv2.createTrackbar('filter', windowName, 0, len(kernels) - 1, dummy)
cv2.createTrackbar('grayscale', windowName, 0, 1, dummy)
コード例 #33
0
ファイル: saeutils.py プロジェクト: MesumRaza/saenews
    def get_vignet_face(self, input_arg, output_file='', fxy=('', '')):
        repo_path = get_path()
        if (type(input_arg) == str):
            img = cv2.imread(input_arg, 1)
        elif (type(input_arg) == np.ndarray):
            img = Image.fromarray(img)
        else:
            img = input_arg
        file_name = input_arg.split('.')[0]
        if (fxy == 'centre'):
            H, W = img.shape[:2]
            fx, fy = W // 2, H // 2
        elif (fxy[0] == '' or fxy[1] == ''):
            # Finding the Face
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            try:
                face_cascade = cv2.CascadeClassifier(
                    repo_path + '/haarcascade_frontalface_default.xml')
            except:
                requests.get(
                    'https://raw.githubusercontent.com/opencv/opencv/master/data/haarcascades/haarcascade_frontalface_default.xml'
                )
                ff = open(repo_path + 'haarcascade_frontalface_default.xml',
                          'w')
                ff.write(str(r.content.decode("utf-8")))
                ff.close()

            faces = face_cascade.detectMultiScale(gray, 1.3, 5)

            try:
                x, y, w, h = faces[0]
                fx, fy = x + w // 2, y + h // 2
            except IndexError:
                H, W = img.shape[:2]
                fx, fy = W // 2, H // 2
                print(
                    'No Face detected in the image. Keeping the focus at the centre point'
                )

        else:
            fx, fy = fxy

        # Focus Cordinate is already put
        rows, cols = img.shape[:2]
        sigma = min(rows, cols) // 2.5  # Standard Deviation of the Gaussian

        fxn = fx - cols // 2  # Normalised temperory vars
        fyn = fy - rows // 2

        zeros = np.copy(img)
        zeros[:, :, :] = 0

        a = cv2.getGaussianKernel(2 * cols, sigma)[cols - fx:2 * cols - fx]
        b = cv2.getGaussianKernel(2 * rows, sigma)[rows - fy:2 * rows - fy]
        c = b * a.T
        d = c / c.max()
        zeros[:, :, 0] = img[:, :, 0] * d
        zeros[:, :, 1] = img[:, :, 1] * d
        zeros[:, :, 2] = img[:, :, 2] * d

        # zeros = add_alpha(zeros)
        if output_file == '':
            output_file = 'vignet_out' + '.png'
        cv2.imwrite(output_file, zeros)
        return (output_file)
コード例 #34
0
import cv2

# Explore edge options

# Load an image
img = cv2.imread('images/fall-leaves.png')
cv2.imshow('Image', img)

# TODO: Create a Gaussian filter. Use cv2.getGaussianKernel.
gaussian_kernel = cv2.getGaussianKernel(ksize=5, sigma=3)
# TODO: Apply it, specifying an edge parameter (try different parameters).
# Use cv2.filter2D.
smoothed_img_border_const = cv2.filter2D(img,
                                         -1,
                                         gaussian_kernel,
                                         borderType=cv2.BORDER_CONSTANT)
cv2.imshow('Border Constant', smoothed_img_border_const)

smoothed_img_border_replicate = cv2.filter2D(img,
                                             -1,
                                             gaussian_kernel,
                                             borderType=cv2.BORDER_REPLICATE)
cv2.imshow('Border Replicate', smoothed_img_border_replicate)

smoothed_img_border_reflect = cv2.filter2D(img,
                                           -1,
                                           gaussian_kernel,
                                           borderType=cv2.BORDER_REFLECT)
cv2.imshow('Border Reflect', smoothed_img_border_reflect)

smoothed_img_border_reflect101 = cv2.filter2D(
コード例 #35
0
ファイル: week2_code_uni.py プロジェクト: weijian321/CV
# 图像变更模糊,因为范围更大,平均效果更明显
g_img = cv2.GaussianBlur(img,(17,17),5)
cv2.imshow('gaussian_blur_lenna', g_img)
key = cv2.waitKey()
if key == 27:
    cv2.destroyAllWindows()

# 图像更清晰,因为方差更小,高斯图像更尖锐,中心点起的作用更大
g_img = cv2.GaussianBlur(img,(7,7),1)
cv2.imshow('gaussian_blur_lenna', g_img)
key = cv2.waitKey()
if key == 27:
    cv2.destroyAllWindows()

# 来看看高斯核
kernel = cv2.getGaussianKernel(7, 5)
print(kernel)

# 为啥一维,因为一维运算快
# 理论解释
# 用显式地代码看隐式地高斯和显示地分步高斯地效果
g1_img = cv2.GaussianBlur(img,(7,7),5)
g2_img = cv2.sepFilter2D(img, -1, kernel, kernel) # ori, depth, kernelX, kernelY
cv2.imshow('g1_blur_lenna', g1_img)
cv2.imshow('g2_blur_lenna', g2_img)
key = cv2.waitKey()
if key == 27:
    cv2.destroyAllWindows()

######## Other Applications #########
# 2nd derivative: laplacian (双边缘效果)
コード例 #36
0
    def __init__(self):
        super(Video, self).__init__()

        self.play = True
        self.exit = False
        self.msecPerFrame = 1
        self.filename = ""

        # no.1
        self.grayscale = False
        # no.2
        self.invert = False
        # no.3
        self.histeql = False
        # no.4
        self.thresholding = False
        self.threshold = 125
        # no.5
        self.reducecolors = False
        self.reducechannelvalues = 2
        # no.5
        self.median = False
        self.mediansize = 3
        # no.6
        self.blur = False
        self.blurkernelsize = 3
        self.blurkernel = []
        # no.7
        self.sharpen = False
        self.sharpenkernelsize = 3
        self.sharpenkernel = []
        # no.8
        self.edges = False
        self.edgeKernel = []
        # no.9
        self.canny = False
        self.cannynumber = 10
        self.cannynumber2 = 30
        # no.10
        self.cardColorFun = False
        self.changeFileName = "../change.png"
        self.changeCard = None
        # no.11
        self.diffFun = False
        self.diffFileName = "../111.jpg"
        self.diffImage = None
        # no.12
        self.gaussianBlurFun = False
        self.gaussianX = 9
        self.gaussianY = 9
        # no.13
        self.medianBlurFun = False
        self.medianSize2 = 9
        # no.14
        self.averageBlurFun = False
        self.averageX = 9
        self.averageY = 9
        # no.15
        self.normalizationFun = False
        # no.16
        self.mapColorFun = False
        self.COLORMAP = cv2.COLORMAP_PINK

        ###############generate 15 Gaussian blur kernels from 3*3 to 31*31
        for i in range(3, 32, 2):
            self.blurkernel.insert(
                i / 2 - 1,
                cv2.getGaussianKernel(i, 0) *
                cv2.transpose(cv2.getGaussianKernel(i, 0)))
        #0-14:3-31
        # print("Blur Kernel my")
        # print(self.blurkernel)

###############generate sharpen kernel
# for i in range(3,32,2):
#     self.sharpenkernel.insert(i, cv2.Laplacian(self.blurkernel[i/2-1], cv2.CV_64F))
#     self.sharpenkernel[i/2-1] *= -1
        for i in range(3, 32, 2):
            #self.sharpenkernel.insert(i,self.blurkernel[i/2-1].copy())
            self.sharpenkernel.insert(i / 2 - 1,
                                      self.blurkernel[i / 2 - 1].copy())
            self.sharpenkernel[i / 2 - 1][i / 2][
                i / 2] -= 2.  # the center of current matrix minus 2 x-2
            self.sharpenkernel[
                i / 2 -
                1] *= -1  # turn all the ele in the matrix to negative -x,2-x
        # print("Sharpen Kernel my")
        # print(self.sharpenkernel)

#######################generate edge kernel
        self.edgeKernel = np.array([[-1., -1., -1.], [-1., 8., -1.],
                                    [-1., -1., -1.]])
コード例 #37
0
def gaussian_kernel(filter_size, sigma, mean):
    kx = cv2.getGaussianKernel(filter_size[0], sigma)
    ky = cv2.getGaussianKernel(filter_size[1], sigma)
    k = kx * np.transpose(ky)
    k *= (mean / np.max(k))
    return k
コード例 #38
0
def blur_image(im):
    gaus_kernel = cv2.getGaussianKernel(20, 20)
    my_im_blur = cv2.filter2D(im, -1, gaus_kernel)
    cv2.imshow('blurred_image', my_im_blur)
    cv2.waitKey(0)
    return
コード例 #39
0
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

# Create the kernel
kernel = np.ones((5, 5), np.float32) / 25

# 2D Convolutional Filter
dst = cv2.filter2D(img, -1, kernel)

# Averaging filter
blur = cv2.blur(img, (5, 5))

# Gaussian filter
gaussian_blur = cv2.GaussianBlur(img, (5, 5), 0)

# If you want, you can create a Gaussian kernel with thefunction,
print("Gaussian Kernel is: ", cv2.getGaussianKernel((10), 0))

# Median filter: replacing each pixel's value with the median of its neighboring pixels. This method is great when dealing with "salt and pepper noise".
median = cv2.medianBlur(img, 5)

# Bilateral filter: a non-linear, edge-preserving, and noise-reducing smoothing filter for images.
# It is highly effective at noise removal while preserving edges. But the operation is slower compared to other filters.
bilateralFilter = cv2.bilateralFilter(img, 9, 75, 75)

titles = [
    'image', '2D Convolution', 'blur', 'GaussianBlur', 'median',
    'bilateralFilter'
]
images = [img, dst, blur, gaussian_blur, median, bilateralFilter]

for i in range(6):
コード例 #40
0
            value = 0
            for k in range(0, len(kernely)):
                value = value + conv[row + k][col + ancho_borde] * kernely[k]

            convfinal[row][col] = value

    return convfinal


### MOSTRAR IMAGEN

# Imagen
orig = cv2.imread('imagenes/marilyn.bmp', 0)

# Kernel con tamaño de máscara 3 y sigma 1
kernel_array = cv2.getGaussianKernel(3, 1)
kernel31 = []
for k in range(0, len(kernel_array)):
    kernel31.append(kernel_array[k][0])

# Kernel con tamaño de máscara 5 y sigma 1
kernel_array = cv2.getGaussianKernel(5, 1)
kernel51 = []
for k in range(0, len(kernel_array)):
    kernel51.append(kernel_array[k][0])

# Kernel con tamaño de máscara 3 y sigma 3
kernel_array = cv2.getGaussianKernel(3, 3)
kernel33 = []
for k in range(0, len(kernel_array)):
    kernel33.append(kernel_array[k][0])
コード例 #41
0
def ridge_orient(im, gradientsigma, blocksigma, orientsmoothsigma):
    rows, cols = im.shape

    # Calculate image gradients.

    sze = np.fix(6 * gradientsigma)

    if np.remainder(sze, 2) == 0:
        sze = sze + 1

    gauss = cv2.getGaussianKernel(np.int(sze), gradientsigma)

    f = gauss * gauss.T

    fy, fx = np.gradient(f)
    # Gradient of Gaussian

    # Gx = ndimage.convolve(np.double(im),fx);

    # Gy = ndimage.convolve(np.double(im),fy);

    Gx = signal.convolve2d(im, fx, mode='same')

    Gy = signal.convolve2d(im, fy, mode='same')

    Gxx = np.power(Gx, 2)

    Gyy = np.power(Gy, 2)

    Gxy = Gx * Gy

    # Now smooth the covariance data to perform a weighted summation of the data.

    sze = np.fix(6 * blocksigma)

    gauss = cv2.getGaussianKernel(np.int(sze), blocksigma)

    f = gauss * gauss.T

    Gxx = ndimage.convolve(Gxx, f)

    Gyy = ndimage.convolve(Gyy, f)

    Gxy = 2 * ndimage.convolve(Gxy, f)

    # Analytic solution of principal direction

    denom = np.sqrt(np.power(Gxy, 2) +
                    np.power((Gxx - Gyy), 2)) + np.finfo(float).eps

    sin2theta = Gxy / denom
    # Sine and cosine of doubled angles

    cos2theta = (Gxx - Gyy) / denom

    if orientsmoothsigma:

        sze = np.fix(6 * orientsmoothsigma)

        if np.remainder(sze, 2) == 0:
            sze = sze + 1

        gauss = cv2.getGaussianKernel(np.int(sze), orientsmoothsigma)

        f = gauss * gauss.T

        cos2theta = ndimage.convolve(cos2theta, f)
        # Smoothed sine and cosine of

        sin2theta = ndimage.convolve(sin2theta, f)
        # doubled angles

    orientim = np.pi / 2 + np.arctan2(sin2theta, cos2theta) / 2

    return (orientim)
コード例 #42
0
                    elif event.type == KEYDOWN:
                        if event.key == pygame.K_ESCAPE:
                            control = False
                            sys.exit()
                        if event.key == pygame.K_RIGHT:
                            if spaceBreak == True:
                                kt = False

                if (duracao > tempoTransicao or kt == False):
                    print('salvando ' + palavra + ' duracao ' + str(duracao))

                    qt = False
                    screen.blit(bckg, (0, 0))
                    pygame.display.update()
                    aux = str(duracao)
                    gaussian = cv.getGaussianKernel(ksize=49, sigma=12)

                    pygame.image.save(
                        bckg, 'images/' + nome + '/' + palavra + '_bloco_' +
                        str(numBloco) + '_image ' + aux + '.png')
                    pygame.image.save(
                        bckg2, 'images/' + nome + '/' + palavra + '_bloco_' +
                        str(numBloco) + '_bck2_image ' + aux + '.png')
                    imgR = cv.imread('images/' + nome + '/' + palavra +
                                     '_bloco_' + str(numBloco) +
                                     '_bck2_image ' + aux + '.png')
                    #imgR = cv.cvtColor(imgR, cv.COLOR_BGR2GRAY)
                    blurredImage = cv.GaussianBlur(imgR,
                                                   (kernelBlur, kernelBlur),
                                                   12)
                    #blurredImage = cv.filter2D(imgR,-1,gaussian)
コード例 #43
0
ファイル: change.py プロジェクト: McFlyWYF/opencv-for-python
img_back = cv2.idft(f_ishift)
img_back = cv2.magnitude(img_back[:, :, 0], img_back[:, :, 1])

plt.subplot(121), plt.imshow(img, cmap='gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(img_back, cmap='gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
'''
不同算子分析
'''

# simple averaging filter without scaling parameter
mean_filter = np.ones((3, 3))
# creating a guassian filter
x = cv2.getGaussianKernel(5, 10)
# x.T 为矩俤䤛置
gaussian = x * x.T

# different edge detecting filters
# scharr in x-direction
scharr = np.array([[-3, 0, 3], [-10, 0, 10], [-3, 0, 3]])
# sobel in x direction
sobel_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])

# sobel in y direction
sobel_y = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
# laplacian
laplacian = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]])

filters = [mean_filter, gaussian, laplacian, sobel_x, sobel_y, scharr]
コード例 #44
0
import cv2
import numpy as np

img = cv2.imread('../img/gaussian_noise.jpg')

# 가우시안 커널을 직접 생성해서 블러링
k1 = np.array([ [1, 2, 1],
                [2, 4, 2],
                [1, 2, 1]]) * (1 / 16)

blur1 = cv2.filter2D(img, -1, k1)

# 가우시안 커널을 API로 얻어서 블러링
k2 = cv2.getGaussianKernel(3, 0)
blur2 = cv2.filter2D(img, -1, k2 * k2.T)

# 가우시안 블러 API로 블러링
blur3 = cv2.GaussianBlur(img, (3, 3), 0)

# 결과 출력
print('k1:', k1)
print('k2:', k2 * k2.T)
merged = np.hstack((img, blur1, blur2, blur3))
cv2.imshow('gaussian blur', merged)
cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #45
0
ファイル: down_sample.py プロジェクト: demonzyj56/sisr_cifar
#!/usr/bin/env python

import os
import numpy as np
import cv2

kernel = cv2.getGaussianKernel(3, 0.05)  # size and sigma


def blur_image(img, scale):
    new_img = cv2.filter2D(img, -1, kernel)
    new_img = cv2.resize(new_img, None, fx=1. / scale, fy=1. / scale)
    # new_img = cv2.resize(new_img, None, fx=scale, fy=scale)
    return new_img


def read_image(idx, cache_dir, format="{}.png"):
    filename = os.path.join(cache_dir, format.format(str(idx)))
    return cv2.imread(filename)


def save_image(img, idx, cache_dir, format="{}_down.png"):
    filename = os.path.join(cache_dir, format.format(str(idx)))
    cv2.imwrite(filename, img)


if __name__ == "__main__":
    cache_dir = "cache_test"
    down_cache_dir = "cache_test_down"
    for i in range(0, 10000):
        print i
コード例 #46
0
 def generate_gaussian_kernel(sz):
     kernel = cv2.getGaussianKernel(sz, 0)
     kernel = np.dot(kernel, kernel.transpose())
     return tf.cast(kernel[:, :, np.newaxis, np.newaxis], tf.float32)
def gaussian_filter_density(gt, pts, r=15, c=15, sigma=4):
    density = np.zeros(gt.shape, dtype=np.float32)
    gt_count = len(pts)

    if gt_count == 0:
        return density

    #     print('=======generating ground truth=========')
    Fixed_H = np.multiply(cv2.getGaussianKernel(r, sigma),
                          (cv2.getGaussianKernel(c, sigma)).T)
    H = Fixed_H
    h, w = gt.shape

    #     print('imageshape: ', gt.shape)

    for i, point in enumerate(pts):
        x = min(w, max(0, abs(int(point[0]))))  # read x?
        y = min(h, max(0, abs(int(point[1]))))  # read y?
        # pixel: (y,x)
        if x >= w or y >= h:
            continue
        x1 = x - int(c / 2)
        x2 = x + int(c / 2)
        y1 = y - int(r / 2)
        y2 = y + int(r / 2)

        dfx1 = 0
        dfx2 = 0
        dfy1 = 0
        dfy2 = 0
        change_H = False
        if x1 <= 0:
            dfx1 = abs(x1)
            x1 = 0
            change_H = True
        if y1 <= 0:
            dfy1 = abs(y1)
            y1 = 0
            change_H = True
        if x2 >= w:
            dfx2 = x2 - (w - 1)
            x2 = w - 1
            change_H = True
        if y2 >= h:
            dfy2 = y2 - (h - 1)
            y2 = h - 1
            change_H = True

        x1h = dfx1
        y1h = dfy1
        x2h = c - 1 - dfx2
        y2h = r - 1 - dfy2

        if change_H:
            H = np.multiply(cv2.getGaussianKernel(y2h - y1h + 1, sigma),
                            (cv2.getGaussianKernel(x2h - x1h + 1, sigma)).T)

        density[y1:y2 + 1, x1:x2 + 1] += H

        if change_H:
            H = Fixed_H

    #     print('===========done=============')
    return density
コード例 #48
0
import cv2
import numpy as np
img = cv2.imread('input.jpg')

rows, cols = img.shape[:2]

# generating vignette mask using Gaussian kernels
kernel_x = cv2.getGaussianKernel(cols, 200)
kernel_y = cv2.getGaussianKernel(rows, 200)
kernel = kernel_y * kernel_x.T
mask = 255 * kernel / np.linalg.norm(kernel)
output = np.copy(img)
# applying the mask to each channel in the input image
for i in range(3):
    output[:, :, i] = output[:, :, i] * mask

cv2.imshow('Original', img)
cv2.imshow('Vignette', output)
cv2.waitKey(0)
コード例 #49
0
ファイル: gauss.py プロジェクト: hieubkset/OpenCV-Tutorials
import cv2
import numpy as np
from matplotlib import pyplot as plt

img = cv2.imread('opencv_logo.png')

blur = cv2.GaussianBlur(img, (5, 5), 0)

plt.subplot(121), plt.imshow(img), plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(blur), plt.title('Blurred')
plt.xticks([]), plt.yticks([])
plt.show()

cv2.getGaussianKernel()
コード例 #50
0
 def __init__(self, sigma):
     kernel = cv2.getGaussianKernel(sigma2sz(sigma), sigma)
     super().__init__(_sepFilter2D, kernel)
コード例 #51
0
ファイル: summary_images.py プロジェクト: inscopix/CaImAn
def correlation_pnr(Y, gSig=None, center_psf=True, swap_dim=True, background_filter='disk'):
    """
    compute the correlation image and the peak-to-noise ratio (PNR) image.
    If gSig is provided, then spatially filtered the video.

    Args:
        Y:  np.ndarray (3D or 4D).
            Input movie data in 3D or 4D format
        gSig:  scalar or vector.
            gaussian width. If gSig == None, no spatial filtering
        center_psf: Boolearn
            True indicates subtracting the mean of the filtering kernel
        swap_dim: Boolean
            True indicates that time is listed in the last axis of Y (matlab format)
            and moves it in the front

    Returns:
        cn: np.ndarray (2D or 3D).
            local correlation image of the spatially filtered (or not)
            data
        pnr: np.ndarray (2D or 3D).
            peak-to-noise ratios of all pixels/voxels

    """
    if swap_dim:
        Y = np.transpose(
            Y, tuple(np.hstack((Y.ndim - 1, list(range(Y.ndim))[:-1]))))

    # parameters
    _, d1, d2 = Y.shape
    data_raw = Y.reshape(-1, d1, d2).astype('float32')

    # filter data
    data_filtered = data_raw.copy()
    if gSig:
        if not isinstance(gSig, list):
            gSig = [gSig, gSig]
        ksize = tuple([int(2 * i) * 2 + 1 for i in gSig])

        if center_psf:
            if background_filter == 'box':
                for idx, img in enumerate(data_filtered):
                    data_filtered[idx, ] = cv2.GaussianBlur(
                        img, ksize=ksize, sigmaX=gSig[0], sigmaY=gSig[1], borderType=1) \
                        - cv2.boxFilter(img, ddepth=-1, ksize=ksize, borderType=1)
            else:
                psf = cv2.getGaussianKernel(ksize[0], gSig[0], cv2.CV_32F).dot(
                    cv2.getGaussianKernel(ksize[1], gSig[1], cv2.CV_32F).T)
                ind_nonzero = psf >= psf[0].max()
                psf -= psf[ind_nonzero].mean()
                psf[~ind_nonzero] = 0
                for idx, img in enumerate(data_filtered):
                    data_filtered[idx, ] = cv2.filter2D(img, -1, psf, borderType=1)

            # data_filtered[idx, ] = cv2.filter2D(img, -1, psf, borderType=1)
        else:
            for idx, img in enumerate(data_filtered):
                data_filtered[idx, ] = cv2.GaussianBlur(
                    img, ksize=ksize, sigmaX=gSig[0], sigmaY=gSig[1], borderType=1)

    # compute peak-to-noise ratio
    data_filtered -= data_filtered.mean(axis=0)
    data_max = np.max(data_filtered, axis=0)
    data_std = get_noise_fft(data_filtered.T, noise_method='mean')[0].T
    pnr = np.divide(data_max, data_std)
    pnr[pnr < 0] = 0

    # remove small values
    tmp_data = data_filtered.copy() / data_std
    tmp_data[tmp_data < 3] = 0

    # compute correlation image
    cn = local_correlations_fft(tmp_data, swap_dim=False)

    return cn, pnr
コード例 #52
0
@enduml

def scale(imgs, factor):
    result = imgs.dot(factor)
    return result

# Load an color image in grayscale
img = cv2.imread('Macclaren.png', 0)  # load image as colour image Arg 0 = greyscale, 1 = RGB
img = cv2.resize(img, (0,0), fx=0.5, fy=0.5)
# Create a gaussian filter

bsize = 81  #define the size of the gaussiasn filter matrix
sigma = 5  # deifne the sigma, i.e how wide is the width

#img = cv2.GaussianBlur(img, (bsize, bsize), sigma)# use opencv to apply a gaussian filter
filter = cv2.getGaussianKernel(bsize,sigma) #create the guassian filter kernal


# fig = plt.figure()  # create a plotting  instance
# ax = fig.gca(projection='3d')  # plot in 3D for vieiwng
#
# # Make Data
# X = np.arange(-10, 10, float(20 / hsize))  # Create the X axis data, must be same length as gaussian size
# Y = np.arange(-10, 10, float(20 / hsize))  # Create the Y axis data, must be same length as gaussian size
# X, Y = np.meshgrid(X, Y)  # port X, Y to mesh grid...
#
# # Create the plotting surface
# surf = ax.plot_surface(X, Y, filter, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
#
# # set plot limits and format
# ax.set_zlim(np.amin(filter), np.amax(filter))
コード例 #53
0
ファイル: bcd.py プロジェクト: eyeKill/CV-Projects
def bottleCapDetect(img):
    scale_factor = min(1024 / img.shape[0], 768 / img.shape[1], 1)
    img = cv2.resize(img, (0, 0), fx=scale_factor, fy=scale_factor)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    edge_threshold = 10
    edeg_threahold_max = 2 * edge_threshold
    blurred = cv2.medianBlur(gray, 3)
    edges = cv2.Canny(blurred,
                      edge_threshold,
                      edeg_threahold_max,
                      apertureSize=3)
    struct_ele = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    edges_closed = edges.copy()
    edges_closed = cv2.morphologyEx(edges_closed,
                                    cv2.MORPH_DILATE,
                                    struct_ele,
                                    iterations=1)
    edges_closed = cv2.morphologyEx(edges_closed,
                                    cv2.MORPH_CLOSE,
                                    struct_ele,
                                    iterations=5)
    mask = np.zeros((edges_closed.shape[0] + 2, edges_closed.shape[1] + 2),
                    dtype="uint8")
    cv2.floodFill(edges_closed,
                  mask, (0, 0),
                  255,
                  flags=cv2.FLOODFILL_MASK_ONLY)[1]
    mask[...] = 255 * (1 - mask)
    cap_cnt = 10

    def get_markers(mask_orig):
        mask = mask_orig.copy()
        cnt = 0
        result = []
        while cnt < cap_cnt:
            mask = cv2.medianBlur(mask, 9)
            # do distance transform
            dist = cv2.distanceTransform(mask, cv2.DIST_L2, 5)
            dist = dist.astype("uint8")
            ret, markers_binary = cv2.threshold(dist, 0.8 * dist.max(), 255, 0)
            # do marker labelling
            ret, markers = cv2.connectedComponents(markers_binary)
            cur_cnt = markers.max()
            print("Got", cur_cnt, "marker(s)")
            cnt += cur_cnt
            cur_result = []
            for i in range(1, cur_cnt + 1):
                pos = np.nonzero(markers == i)
                x, y = pos[1], pos[0]
                minx, maxx, miny, maxy = x.min(), x.max(), y.min(), y.max()
                w = np.max(dist[markers == i])
                cur_result.append(((minx, miny), (maxx, maxy), w))
            result.extend(cur_result)
            if cnt < cap_cnt:
                for i in range(cur_cnt):
                    (minx, miny), (maxx, maxy), w = cur_result[i]
                    radius = w + 20  # just in case :)
                    print("Removing", (minx, miny), (maxx, maxy), radius)
                    mask = cv2.circle(mask,
                                      ((minx + maxx) // 2, (miny + maxy) // 2),
                                      radius, 0, -1)
            elif cnt > cap_cnt:
                print("warning: 翻车啦")
        return result

    markers = get_markers(mask)
    # now prepare for watersheding
    ws = np.logical_not(mask).astype('int32')
    for i, (p1, p2, w) in enumerate(markers):
        center = (p1[0] + p2[0]) // 2, (p1[1] + p2[1]) // 2
        axis = ((p2[0] - p1[0]) // 2, (p2[1] - p1[1]) // 2)
        cv2.ellipse(ws, center, axis, 0, 0, 360, i + 2, cv2.FILLED)
    flooded = ws.copy()
    flooded = cv2.watershed(cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR), flooded)
    flooded[...] = flooded - 1
    flooded[flooded <= 0] = 0
    # dilate a little to remove tiny edges
    flooded = flooded.astype("uint8")
    flooded = cv2.morphologyEx(flooded,
                               cv2.MORPH_DILATE,
                               struct_ele,
                               iterations=1)
    ws = flooded
    boxed = img.copy()
    bounding_boxes = []
    minimal_bounding_boxes = []
    for i in range(cap_cnt):
        # value in marker image is i+1
        buf = (ws == i + 1).astype('uint8')
        contours, hierarchy = cv2.findContours(buf, cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_NONE)
        assert (len(contours) == 1)
        x, y, w, h = cv2.boundingRect(contours[0])
        bounding_boxes.append(((x, y), (x + w, y + h)))
        minimal_bounding_boxes.append(cv2.minAreaRect(contours[0]))
        ellipse = cv2.fitEllipse(contours[0])
        cv2.ellipse(boxed, ellipse, (0, 255, 0), 2)
        box_points = cv2.boxPoints(minimal_bounding_boxes[-1])
        box_points = np.int0(box_points)
        cv2.rectangle(boxed, bounding_boxes[-1][0], bounding_boxes[-1][1],
                      (255, 0, 0), 2)
        cv2.drawContours(boxed, [box_points], 0, (0, 0, 255), 2)
    real_caps = []
    cap_edges = []
    real_ans = []
    cord = []
    edges = cv2.Canny(img, 40, 80)
    for i, ((p1, p2),
            (center, (width, height),
             a)) in enumerate(zip(bounding_boxes, minimal_bounding_boxes)):
        g = gray[p1[1]:p2[1], p1[0]:p2[0]]
        e = edges[p1[1]:p2[1], p1[0]:p2[0]]
        cord.append(center)
        if width / height > 1.5 or height / width > 1.5:
            print(i, "is a side")
            real_ans.append('side')
        else:
            real_ans.append(None)
            # do circle approximation for better center mounting
            circles = cv2.HoughCircles(g, cv2.HOUGH_GRADIENT, 2, 40, 1, 20, 40,
                                       200)
            if type(circles[0][0]) == np.ndarray:
                for i in circles[0, :]:
                    # draw the outer circle
                    cv2.circle(g, (i[0], i[1]), i[2], 255, 2)
                    # draw the center of the circle
                    cv2.circle(g, (i[0], i[1]), 2, 255, 3)
        real_caps.append(g)
        cap_edges.append(e)
    k = len(real_caps)
    t = cv2.getGaussianKernel(9, 1)
    gaussian_kernel = t * t.T
    gaussian_kernel *= 255 / np.max(gaussian_kernel)
    gaussian_kernel = np.uint8(gaussian_kernel)
    gaussian_kernel = cv2.resize(gaussian_kernel, (64, 64))
    for no, (cap, edge) in enumerate(zip(real_caps, cap_edges)):
        if real_ans[no] is not None:
            print("Skipping", no, "it is a side")
            continue
        c = cv2.resize(cap, (128, 128))
        e = cv2.resize(edge, (128, 128))
        rx, ry = c.shape[0] // 4, c.shape[1] // 4
        cx, cy = c.shape[0] // 2, c.shape[1] // 2
        score = np.sum(e[cx - rx:cx + rx, cy - ry:cy + ry] * gaussian_kernel)
        print(score)
        real_ans[no] = 'top' if score > 50000 else 'tail'
    labelled = img.copy()
    for i in range(len(real_ans)):
        if real_ans[i] == 'top':
            color = (255, 0, 0)
        elif real_ans[i] == 'tail':
            color = (0, 255, 0)
        else:
            color = (0, 0, 255)
        cv2.rectangle(labelled, bounding_boxes[i][0], bounding_boxes[i][1],
                      color, 2)
        cv2.putText(labelled, real_ans[i], bounding_boxes[i][0],
                    cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2)
        cv2.putText(labelled, "({},{})".format(int(cord[i][0]),
                                               int(cord[i][1])),
                    (int(cord[i][0]) - 50, int(cord[i][1])),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
    return labelled
コード例 #54
0
def SIFT(image1, n):

    #SCALE-SPACE EXTREMA DETECTION

    #detect keypoints using a cascade filtering approach

    sigma = 1.7
    k = 2**0.5
    scales = 5
    octaves = 4
    base_image = np.zeros((shape(image1)))
    base_image[:] = image1

    #Using Guassian Kernel to smooth the image for each octaves with
    #respective scalings for creating the pyramid structure
    image_octaveList = []
    image_baseList = []
    for i in range(octaves):
        image_scaleList = []
        for j in range(scales):

            if i == 0 and j == 0:
                temp1 = cp.deepcopy(base_image)
                image_scaleList.append(temp1)
            elif i > 0 and j == 0:
                temp2 = ndimage.zoom(image_baseList[i - 1][0], 0.5, order=1)
                temp3 = cp.deepcopy(temp2)
                image_scaleList.append(temp3)

        image_baseList.append(image_scaleList)

    #Guassian Blurring based on the histogram
    #which can be computed from the difference of two nearby scales separated by a
    #constant multiplicative factor k
    for i in range(octaves):
        image_scaleList = []
        for j in range(scales):

            if j == 0:
                temp1 = np.zeros(np.shape(image_baseList[i][0]))
                temp1[:] = image_baseList[i][0]
            sigma = math.pow(k, j) * 1.7
            histogram_size = int(math.ceil(7 * sigma))
            histogram_size = 2 * histogram_size + 1

            temp2 = temp3 = np.zeros(np.shape(temp1))
            temp2 = cv2.GaussianBlur(temp1, (histogram_size, histogram_size),
                                     sigma, sigma)
            image_scaleList.append(temp2)

        image_octaveList.append(image_scaleList)
        #print shape(image_octaveList)

    #Difference of Guassian computation at each of the Ocatves
    # List initiation by taking difference between consecutive Gaussian blurred images

    DoG_List = []
    for i in range(octaves):
        image_scaleList = []
        for j in range(1, scales):

            difference = np.zeros(np.shape(image_octaveList[i][0]))
            difference[:] = np.subtract(image_octaveList[i][j],
                                        image_octaveList[i][j - 1])
            image_scaleList.append(difference)

        DoG_List.append(image_scaleList)
        #print shape(DoG_List)

    #LOCAL EXTREMA DETECTION

    #each sample point is compared to its eight neighbors in the current image and nine
    #neighbors in the scale above and below

    #Finding the EXTREMUM, That is minima/maxima
    c1 = 0  #count

    #Remove the boundary pixels
    #Binarize Image of Extremum Points
    image_extremumList = []
    for i in range(octaves):
        image_scaleList = []
        for j in range(1, scales - 2):
            image_extremum = np.zeros(DoG_List[i][j].shape, dtype=np.float64)
            for l in range(1, DoG_List[i][j].shape[0]):
                for m in range(1, DoG_List[i][j].shape[1]):
                    #It is selected only if it is larger than all of these neighbors or smaller than all
                    #of them
                    ext_points = DoG_List[i][j][l][m]
                    if ext_points == max(
                            DoG_List[i][j][l - 1:l + 2, m - 1:m + 2].max(),
                            DoG_List[i][j - 1][l - 1:l, m - 1:m + 2].max(),
                            DoG_List[i][j + 1][l - 1:l + 2,
                                               m - 1:m + 2].max()):
                        image_extremum[l][m] = ext_points
                        c1 += 1
                    elif ext_points == min(
                            DoG_List[i][j][l - 1:l + 2, m - 1:m + 2].min(),
                            DoG_List[i][j - 1][l - 1:l + 2, m - 1:m + 2].min(),
                            DoG_List[i][j + 1][l - 1:l + 2,
                                               m - 1:m + 2].min()):
                        image_extremum[l][m] = ext_points
                        c1 += 1
            image_scaleList.append(image_extremum)
        image_extremumList.append(image_scaleList)
    print "Number of Scaled Space Extremum Points:", c1

    #Finding Candidate KeyPoints in each of the middle two layers of DoG
    #keypoint detection is to identify locations and scales that can be
    #repeatably assigned under differing views of the same object

    key_points = 0
    sigma_nonzero = []
    extremum_nonzero = []
    for i in range(octaves):
        image_sigmaList = []
        image_scaleList = []
        for j in range(scales - 3):
            temp4 = []
            temp4[:] = np.transpose(image_extremumList[i][j].nonzero())
            key_points += len(temp4)
            image_scaleList.append(temp4)
            image_sigmaList.append(math.pow(k, j) * 1.6)
        extremum_nonzero.append(image_scaleList)
        sigma_nonzero.append(image_sigmaList)

    #plot all the non-zero extremum points
    plt.gray()
    plt.figure(n + 1)
    plt.imshow(image1)
    for i in range(octaves):
        for j in range(0, 2):
            for l in range(len(extremum_nonzero[i][j])):
                x = math.pow(2, i) * extremum_nonzero[i][j][l][0]
                y = math.pow(2, i) * extremum_nonzero[i][j][l][1]
                x1 = [x]
                y1 = [y]
                plt.plot(y1, x1, 'b*')
    plt.title('Non-Zero Extremum Points')

    #ACCURATE KEYPOINT LOCALIZATION

    #finding a detailed fit to the nearby data for location, scale,
    #and ratio of principal curvatures
    c2 = 1  # counter for the finding the key points counter
    c3 = 0  # counter
    extremum_points = []
    for i in range(octaves):
        image_scaleList = []
        for j in range(2):
            c2 = 1
            keyPointsPerScale = []

            for l in range(len(extremum_nonzero[i][j])):
                matrix_A = np.zeros((3, 3))
                matrix_B = np.zeros((3, 1))
                x_coord = extremum_nonzero[i][j][l][0]
                y_coord = extremum_nonzero[i][j][l][1]
                sigma_current = sigma_nonzero[i][j]

                #simply locate keypoints at the location and scale of the central sample point
                ##Finding the derivativess and filling the 3x3 Linear Systems
                if (x_coord + 1 < DoG_List[i][0].shape[0]
                        and y_coord + 1 < DoG_List[i][0].shape[1]
                        and x_coord - 1 > -1 and y_coord - 1 > -1):
                    x_newcoord = x_coord
                    y_newcoord = y_coord
                    xnew = np.zeros((3, 1))
                    sigma_new = sigma_current

                    matrix_A[0][0] = DoG_List[i][j][x_coord][
                        y_coord] - 2 * DoG_List[i][j + 1][x_coord][
                            y_coord] + DoG_List[i][j + 2][x_coord][y_coord]
                    matrix_A[0][1] = DoG_List[i][j + 2][
                        x_coord + 1][y_coord] - DoG_List[i][j + 2][
                            x_coord - 1][y_coord] - DoG_List[i][j][
                                x_coord +
                                1][y_coord] + DoG_List[i][j][x_coord -
                                                             1][y_coord]
                    matrix_A[0][2] = DoG_List[i][j + 2][x_coord][
                        y_coord + 1] - DoG_List[i][j + 2][x_coord][
                            y_coord - 1] - DoG_List[i][j][x_coord][
                                y_coord +
                                1] + DoG_List[i][j - 2][x_coord][y_coord - 1]

                    matrix_A[1][0] = matrix_A[0][2]
                    matrix_A[1][1] = DoG_List[i][j + 1][
                        x_coord + 1][y_coord] - 2 * DoG_List[i][
                            j + 1][x_coord][y_coord] + DoG_List[i][j + 1][
                                x_coord - 1][y_coord]
                    matrix_A[1][2] = DoG_List[i][j + 1][x_coord - 1][
                        y_coord - 1] - DoG_List[i][j + 1][x_coord + 1][
                            y_coord - 1] - DoG_List[i][j + 1][x_coord - 1][
                                y_coord + 1] + DoG_List[i][j + 1][x_coord +
                                                                  1][y_coord +
                                                                     1]

                    matrix_A[2][0] = matrix_A[0][2]
                    matrix_A[2][1] = matrix_A[1][2]
                    matrix_A[2][2] = DoG_List[i][j + 1][x_coord][
                        y_coord + 1] - 2 * DoG_List[i][j + 1][x_coord][
                            y_coord] + DoG_List[i][j + 1][x_coord][y_coord - 1]

                    matrix_B[0][0] = DoG_List[i][
                        j +
                        2][x_coord][y_coord] - DoG_List[i][j][x_coord][y_coord]
                    matrix_B[1][0] = DoG_List[i][j + 1][
                        x_coord + 1][y_coord] - DoG_List[i][j + 1][x_coord -
                                                                   1][y_coord]
                    matrix_B[2][0] = DoG_List[i][j + 1][x_coord][
                        y_coord + 1] - DoG_List[i][j + 1][x_coord][y_coord - 1]

                    xdash = np.dot(np.linalg.pinv(matrix_A), matrix_B)
                    xnew[:] = xdash

                    #If the offset ˆx is larger than 0.5 in any dimension, then it means that the extremum
                    #lies closer to a different sample point
                    # Change points having offset greater than 0.5 in any dimensions
                    skipPoint = 0
                    if abs(xdash[0][0]) > 0.5 or abs(xdash[1][0]) > 0.5 or abs(
                            xdash[2][0]) > 0.5:
                        skipPoint = 1
                        if abs(xdash[1][0]) > 0.5:
                            x_newcoord = x_coord + round(xdash[1][0])
                            xnew[1][0] = xdash[1][0] - round(xdash[1][0])
                            if (x_newcoord > image_octaveList[i][0].shape[0] -
                                    1) or x_newcoord < 0:
                                skipPoint = 1

                        if abs(xdash[2][0]) > 0.5:
                            y_newcoord = y_coord + round(xdash[2][0])
                            xnew[2][0] = xdash[2][0] - round(xdash[2][0])
                            if (y_newcoord > image_octaveList[i][0].shape[1] -
                                    1) or y_newcoord < 0:
                                skipPoint = 1

                        if abs(xdash[0][0]) > 0.5:
                            if xdash[0][0] > 0:
                                sigma_new = math.pow(k, (j + 1)) * 1.6
                                xnew[0][0] = (sigma_new - math.pow(k, j) *
                                              1.6) - xdash[0][0]
                            else:
                                sigma_new = math.pow(k, (j - 1)) * 1.6
                                xnew[0][0] = (math.pow(k, j) * 1.6 -
                                              sigma_new) + xdash[0][0]

                    # Eliminating Low Contrast KeyPoints and checking for poor edge localizations
                    if (skipPoint == 0):
                        contrast_keypoint = DoG_List[i][
                            j + 1][x_newcoord][y_newcoord] + 0.6 * matrix_B[1][
                                0] * xnew[2][0] + matrix_B[2][0] * xnew[2][
                                    0] + matrix_B[0][0] * xnew[0][0]

                        #all extrema with a value of |D(ˆx)| less than 0.03 were
                        #discarded (as before, we assume image pixel values in the range [0,1]).

                        #Hessian Part
                        if abs(contrast_keypoint) > 0.03:
                            diff_xx = DoG_List[i][j + 1][
                                x_coord + 1][y_coord] - 2 * DoG_List[i][
                                    j + 1][x_coord][y_coord] + DoG_List[i][
                                        j + 1][x_coord - 1][y_coord]
                            diff_xy = DoG_List[i][j + 1][x_coord - 1][
                                y_coord - 1] - DoG_List[i][j + 1][x_coord + 1][
                                    y_coord -
                                    1] + DoG_List[i][j + 1][x_coord - 1][
                                        y_coord +
                                        1] + DoG_List[i][j + 1][x_coord +
                                                                1][y_coord + 1]
                            diff_yy = DoG_List[i][j + 1][x_coord][
                                y_coord + 1] - 2 * DoG_List[i][
                                    j + 1][x_coord][y_coord] + DoG_List[i][
                                        j + 1][x_coord][y_coord - 1]

                            trace_H = diff_xx + diff_yy
                            determinant_H = diff_xx * diff_yy - diff_xy**2
                            curvature_ratio = (trace_H *
                                               trace_H) / determinant_H
                            #Eliminating edge responses
                            #the curvatures have different signs so the
                            #point is discarded as not being an extremum

                            #experiments in the paper use a value of r = 10,
                            #which eliminates keypoints that have a ratio between the principal curvatures greater than 10
                            if abs(curvature_ratio) < 10.0:
                                key_attributePoints = []

                                key_attributePoints.append(c2)
                                key_attributePoints.append(x_newcoord)
                                key_attributePoints.append(y_newcoord)

                                key_attributePoints.append(sigma_new)

                                key_attributePoints.append(xnew[0][0])
                                key_attributePoints.append(xnew[1][0])
                                key_attributePoints.append(xnew[2][0])

                                key_attributePoints.append(x_coord)
                                key_attributePoints.append(y_coord)

                                key_attributePoints.append(sigma_current)
                                key_attributePoints.append(j + 1)

                                c2 = c2 + 1
                                keyPointsPerScale.append(key_attributePoints)
                                c3 += 1

            image_scaleList.append(keyPointsPerScale)
        extremum_points.append(image_scaleList)
    print "The initial key points locations at maxima and minima of the difference-of-Gaussian function:", c3

    #keypoint selection on a natural image
    # Get All the KEYPOINTS
    plt.gray()
    plt.figure(n + 2)
    plt.imshow(image1)

    for i in range(octaves):
        for j in range(2):
            for l in range(len(extremum_points[i][j])):
                x = math.pow(2, i) * extremum_points[i][j][l][1]
                y = math.pow(2, i) * extremum_points[i][j][l][2]
                x1 = [x]
                y1 = [y]
                plt.plot(y1, x1, 'b*')
    plt.title('Key Points')

    # ORIENTATION ASSIGNMENT

    #In order to avoid too much clutter, a low-resolution pixel image is used and keypoints are shown as
    # vectors giving the location, scale, and orientation of each keypoint
    c4 = []
    c5 = 0
    for i in range(octaves):
        image_scaleList = []
        for j in range(scales - 3):
            c2 = 1

            keyPointsPerScale = []
            for p in range(len(extremum_points[i][j])):
                x_coord = extremum_points[i][j][p][1]
                y_coord = extremum_points[i][j][p][2]

                sig = extremum_points[i][j][p][3]
                IOr = np.zeros(image_octaveList[i][j].shape)
                IOr = image_octaveList[i][j]

                histogram_size = int(math.ceil(7 * sig))

                Iblur = np.zeros(IOr.shape)

                H = cv2.getGaussianKernel(histogram_size, int(sig))
                Iblur[:] = cv2.filter2D(IOr, -1, H)

                bins = np.zeros((1, 36))

                #keypoint descriptor can be represented relative to this orientation and therefore achieve invariance
                #to image rotation

                #The highest peak in the histogram is detected, and then any other local peak that is within
                #80% of the highest peak is used to also create a keypoint with that orientation
                for s in range(-histogram_size, histogram_size + 1):
                    for t in range(-histogram_size, histogram_size + 1):
                        if (((x_coord + s) > 0) and ((x_coord + s) <
                                                     (Iblur.shape[0] - 1))
                                and ((y_coord + t) > 0)
                                and ((y_coord + t) < (Iblur.shape[1] - 1))):
                            xmag1 = Iblur[x_coord + s + 1][y_coord + t]
                            xmag2 = Iblur[x_coord + s - 1][y_coord + t]

                            ymag1 = Iblur[x_coord + s][y_coord + t + 1]
                            ymag2 = Iblur[x_coord + s][y_coord + t - 1]
                            m = math.sqrt(
                                math.pow((xmag1 - xmag2), 2) +
                                math.pow((ymag1 - ymag2), 2))
                            den = xmag2 - xmag1

                            #An orientation histogram is formed from the gradient orientations of sample points within
                            #a region around the keypoint
                            if den == 0:
                                den = 5
                            theta = math.degrees(
                                math.atan((ymag2 - ymag1) / (den)))
                            #The orientation histogram has 36 bins covering the 360 degree
                            #range of orientations.

                            if (theta < 0):
                                theta = 360 + theta
                            binary = (int)((theta / 360) * 36) % 36

                            if binary == 36:
                                binary = 35
                            bins[0][binary] = bins[0][binary] + m

                maxBinNo = np.argmax(bins)
                maxtheta = maxBinNo * 10
                maxmag = bins[0][maxBinNo]

                extremum_points[i][j][p].append(maxtheta)
                extremum_points[i][j][p].append(maxmag)

                nbins = 36
                threshold = 0.8
                o = 0
                for y in range(0, 36):
                    orientation = 0
                    y_prev = (y - 1 + nbins) % nbins
                    y_next = (y + 1) % nbins

                    if bins[0][y] > threshold * maxtheta and bins[0][y] > bins[
                            0][y_prev] and bins[0][y] > bins[0][y_next]:
                        offset = (bins[0][y_prev] - bins[0][y_next]) / (
                            2 * (bins[0][y_prev] + bins[0][y_next] -
                                 2 * bins[0][y]))
                        exact_bin = y + offset
                        orientation = exact_bin * 360 / float(36)

                        #Each sample added to the histogram is weighted by its gradient magnitude
                        #and by a Gaussian-weighted circular window with a σ that is 1.5 times that of the scale
                        #of the keypoint.
                        if orientation > 360:
                            orientation -= 360

                        o += 1
                        extPtskey_attributePoints = []
                        extPtskey_attributePoints[:] = extremum_points[i][j][p]
                        extPtskey_attributePoints[11] = orientation
                        keyPointsPerScale.append(extPtskey_attributePoints)
            c5 += len(keyPointsPerScale)
            image_scaleList.append(keyPointsPerScale)
        c4.append(image_scaleList)
    print "Principal Oreintation points after Thresholding", c5

    #THE LOCAL IMAGE DESCRIPTOR

    #compute gradient for scale space
    dx_list = []
    dy_list = []
    for i in range(len(image_octaveList)):
        image_scaleList1 = []
        image_scaleList2 = []
        for j in range(scales):
            dx, dy = np.gradient(image_octaveList[i][j])
            image_scaleList1.append(dx)
            image_scaleList2.append(dy)
        dx_list.append(image_scaleList1)
        dy_list.append(image_scaleList2)

    const = 3
    plt.gray()
    plt.figure(n + 3)
    plt.imshow(image1)

    for i in range(octaves):
        for j in range(2):
            for l in range(len(extremum_points[i][j])):
                x = math.pow(2, i) * extremum_points[i][j][l][1]
                y = math.pow(2, i) * extremum_points[i][j][l][2]
                dx = const * extremum_points[i][j][l][3] * math.degrees(
                    math.cos(extremum_points[i][j][l][10]))
                dy = const * extremum_points[i][j][l][3] * math.degrees(
                    math.sin(extremum_points[i][j][l][10]))
                x1 = [x]
                y1 = [y]
                plt.plot(y1, x1, 'b*')
    plt.title('Image Descriptor Key Points')
    plt.figure(n + 4)
    plt.imshow(image1)
    plt.title('Original Image')
    plt.show()
コード例 #55
0
        img_padded = np.pad(img, padding_size, 'edge')
    elif padding_way == 'ZERO':
        img_padded = np.pad(img, padding_size, 'constant')
    print(img_padded)
    out_h, out_w = (h + 2 * padding_size + 1 - kh), (w + 2 * padding_size + 1 -
                                                     kw)
    img_pooling = np.zeros((out_h, out_w))
    for i in range(out_h):
        for j in range(out_w):
            img_pooling[i][j] = np.median(img_padded[i:i + kh, j:j + kw])
    return img_pooling


if __name__ == '__main__':
    img_gray = cv2.imread('lena.png', 0)
    kernel = cv2.getGaussianKernel(5, 1)
    kernel2D = kernel * kernel.T
    # img_gray=np.arange(1,17).reshape(4,4)
    # kernel2D=np.arange(1,5).reshape(2,2)
    print(img_gray.shape)
    print(kernel2D.shape)
    img_padding_REPLICA = medianBlur(img_gray, kernel2D, 'REPLICA')
    print(img_padding_REPLICA.shape)
    img_padding_ZERO = medianBlur(img_gray, kernel2D, 'ZERO')
    print(img_padding_ZERO.shape)

    cv2.imshow('img_padding_REPLICA', img_padding_REPLICA)
    cv2.imshow('img_padding_ZERO', img_padding_ZERO)
    key = cv2.waitKey()
    if key == 27:
        cv2.destroyAllWindows()
コード例 #56
0
ファイル: imageManip.py プロジェクト: qenops/dUtils
def getMotionKernel(ksize, sigma=3):
    d1Kernel = cv2.getGaussianKernel(ksize, sigma) 
    kernel = d1Kernel * np.ones([1,ksize])/ksize
    return kernel
コード例 #57
0
def GaussianKernel(ksize=101, nsig=30):
	gauss1D = cv2.getGaussianKernel(ksize, nsig)
	gauss2D = gauss1D*np.transpose(gauss1D)
	gauss2D = gauss2D/gauss2D[int(ksize/2), int(ksize/2)]
	return gauss2D
コード例 #58
0
def gaussian_kernel_2d(kernel_size, sigma):
    kx = cv2.getGaussianKernel(kernel_size, sigma)
    ky = cv2.getGaussianKernel(kernel_size, sigma)
    return np.multiply(kx, np.transpose(ky))
コード例 #59
0
    def __ridge_orient(self):
        # RIDGEORIENT - Estimates the local orientation of ridges in a fingerprint
        #
        # Usage:  [orientim, reliability, coherence] = ridgeorientation(im, gradientsigma,...
        #                                             blocksigma, ...
        #                                             orientsmoothsigma)
        #
        # Arguments:  im                - A normalised input image.
        #             gradientsigma     - Sigma of the derivative of Gaussian
        #                                 used to compute image gradients.
        #             blocksigma        - Sigma of the Gaussian weighting used to
        #                                 sum the gradient moments.
        #             orientsmoothsigma - Sigma of the Gaussian used to smooth
        #                                 the final orientation vector field.
        #                                 Optional: if ommitted it defaults to 0
        #
        # Output:    orientim          - The orientation image in radians.
        #                                 Orientation values are +ve clockwise
        #                                 and give the direction *along* the
        #                                 ridges.
        #             reliability       - Measure of the reliability of the
        #                                 orientation measure.  This is a value
        #                                 between 0 and 1. I think a value above
        #                                 about 0.5 can be considered 'reliable'.
        #                                 reliability = 1 - Imin./(Imax+.001);
        #             coherence         - A measure of the degree to which the local
        #                                 area is oriented.
        #                                 coherence = ((Imax-Imin)./(Imax+Imin)).^2;
        #
        # With a fingerprint image at a 'standard' resolution of 500dpi suggested
        # parameter values might be:
        #
        #    [orientim, reliability] = ridgeorient(im, 1, 3, 3);
        #
        # See also: RIDGESEGMENT, RIDGEFREQ, RIDGEFILTER

        ### REFERENCES

        # May 2003      Original version by Raymond Thai,
        # January 2005  Reworked by Peter Kovesi
        # October 2011  Added coherence computation and orientsmoothsigma made optional
        #
        # School of Computer Science & Software Engineering
        # The University of Western Australia
        # pk at csse uwa edu au
        # http://www.csse.uwa.edu.au/~pk

        rows, cols = self._normim.shape
        #Calculate image gradients.
        sze = np.fix(6 * self.gradient_sigma)
        if np.remainder(sze, 2) == 0:
            sze = sze + 1

        gauss = cv2.getGaussianKernel(np.int(sze), self.gradient_sigma)
        f = gauss * gauss.T

        fy, fx = np.gradient(f)  #Gradient of Gaussian

        Gx = signal.convolve2d(self._normim, fx, mode='same')
        Gy = signal.convolve2d(self._normim, fy, mode='same')

        Gxx = np.power(Gx, 2)
        Gyy = np.power(Gy, 2)
        Gxy = Gx * Gy

        #Now smooth the covariance data to perform a weighted summation of the data.
        sze = np.fix(6 * self.block_sigma)

        gauss = cv2.getGaussianKernel(np.int(sze), self.block_sigma)
        f = gauss * gauss.T

        Gxx = ndimage.convolve(Gxx, f)
        Gyy = ndimage.convolve(Gyy, f)
        Gxy = 2 * ndimage.convolve(Gxy, f)

        # Analytic solution of principal direction
        denom = np.sqrt(np.power(Gxy, 2) +
                        np.power((Gxx - Gyy), 2)) + np.finfo(float).eps

        sin2theta = Gxy / denom  # Sine and cosine of doubled angles
        cos2theta = (Gxx - Gyy) / denom

        if self.orient_smooth_sigma:
            sze = np.fix(6 * self.orient_smooth_sigma)
            if np.remainder(sze, 2) == 0:
                sze = sze + 1
            gauss = cv2.getGaussianKernel(np.int(sze),
                                          self.orient_smooth_sigma)
            f = gauss * gauss.T
            cos2theta = ndimage.convolve(cos2theta,
                                         f)  # Smoothed sine and cosine of
            sin2theta = ndimage.convolve(sin2theta, f)  # doubled angles

        self._orientim = np.pi / 2 + np.arctan2(sin2theta, cos2theta) / 2
コード例 #60
0
import cv2
import numpy as np

img = cv2.cvtColor(cv2.imread('./images/dahlia.jpg'), cv2.COLOR_BGR2RGB)
rows, cols = img.shape[:2]

# generating vignette mask using Gaussian kernels
kernel_x = cv2.getGaussianKernel(
    cols, 150)  # second parameter = standard deviation of the Gaussian
kernel_y = cv2.getGaussianKernel(rows, 150)
kernel = kernel_y * kernel_x.T  # building 2D kernel whose size matches the size of the image.

mask = 255 * kernel / np.linalg.norm(kernel)
output = np.copy(img)

# applying the mask to each channel in the input image
for i in range(3):
    output[:, :, i] = output[:, :, i] * mask

cv2.imshow('Original', img)
cv2.imshow('Vignette', output)
cv2.waitKey(0)