Exemple #1
0
def skew_detection(image_gray):
    h, w = image_gray.shape[:2]
    eigen = cv2.cornerEigenValsAndVecs(image_gray,12, 5)
    angle_sur = np.zeros(180,np.uint)
    eigen = eigen.reshape(h, w, 3, 2)
    flow = eigen[:,:,2]
    vis = image_gray.copy()
    vis[:] = (192 + np.uint32(vis)) / 2
    d = 12
    points =  np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)
    for x, y in points:
        vx, vy = np.int32(flow[int(y), int(x)]*d)
        # cv2.line(rgb, (x-vx, y-vy), (x+vx, y+vy), (0, 355, 0), 1, cv2.LINE_AA)
        ang = angle(vx,vy)
        angle_sur[(ang+180)%180] +=1

    # torr_bin = 30
    angle_sur = angle_sur.astype(np.float)
    angle_sur = (angle_sur-angle_sur.min())/(angle_sur.max()-angle_sur.min())
    angle_sur = filters.gaussian_filter1d(angle_sur,5)
    skew_v_val =  angle_sur[20:180-20].max()
    skew_v = angle_sur[30:180-30].argmax() + 30
    skew_h_A = angle_sur[0:30].max()
    skew_h_B = angle_sur[150:180].max()
    skew_h = 0
    if (skew_h_A > skew_v_val*0.3 or skew_h_B > skew_v_val*0.3):
        if skew_h_A>=skew_h_B:
            skew_h = angle_sur[0:20].argmax()
        else:
            skew_h = - angle_sur[160:180].argmax()
    return skew_h,skew_v
def main():
    import sys
    try:
        fn = sys.argv[1]
    except:
        fn = '../data/starry_night.jpg'

    img = cv.imread(cv.samples.findFile(fn))
    if img is None:
        print('Failed to load image file:', fn)
        sys.exit(1)

    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    h, w = img.shape[:2]

    eigen = cv.cornerEigenValsAndVecs(gray, 15, 3)
    eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
    flow = eigen[:,:,2]

    vis = img.copy()
    vis[:] = (192 + np.uint32(vis)) / 2
    d = 12
    points =  np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)
    for x, y in np.int32(points):
        vx, vy = np.int32(flow[y, x]*d)
        cv.line(vis, (x-vx, y-vy), (x+vx, y+vy), (0, 0, 0), 1, cv.LINE_AA)
    cv.imshow('input', img)
    cv.imshow('flow', vis)
    cv.waitKey()

    print('Done')
    def test_texture_flow(self):

        img = self.get_sample('samples/cpp/pic6.png')

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        h, w = img.shape[:2]

        eigen = cv2.cornerEigenValsAndVecs(gray, 15, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        flow = eigen[:,:,2]

        vis = img.copy()
        vis[:] = (192 + np.uint32(vis)) / 2
        d = 80
        points =  np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)

        textureVectors = []

        for x, y in np.int32(points):
            textureVectors.append(np.int32(flow[y, x]*d))

        eps = 0.05

        testTextureVectors = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0],
        [-38, 70], [-79, 3], [0, 0], [0, 0], [-39, 69], [-79, -1],
        [0, 0], [0, 0], [0, -79], [17, -78], [-48, -63], [65, -46],
        [-69, -39], [-48, -63]]

        for i in range(len(testTextureVectors)):
            self.assertLessEqual(cv2.norm(textureVectors[i] - testTextureVectors[i], cv2.NORM_L2), eps)
Exemple #4
0
def skew_detection(image_gray):
    h, w = image_gray.shape[:2]
    eigen = cv2.cornerEigenValsAndVecs(image_gray, 12, 5)
    angle_sur = np.zeros(180, np.uint)
    eigen = eigen.reshape(h, w, 3, 2)
    flow = eigen[:, :, 2]
    vis = image_gray.copy()
    vis[:] = (192 + np.uint32(vis)) / 2
    d = 12
    points = np.dstack(np.mgrid[d / 2:w:d, d / 2:h:d]).reshape(-1, 2)
    for x, y in points:
        vx, vy = np.int32(flow[y, x] * d)
        # cv2.line(rgb, (x-vx, y-vy), (x+vx, y+vy), (0, 355, 0), 1, cv2.LINE_AA)
        ang = angle(vx, vy)
        angle_sur[(ang + 180) % 180] += 1
    # torr_bin = 30
    angle_sur = angle_sur.astype(np.float)
    angle_sur = (angle_sur - angle_sur.min()) / (angle_sur.max() -
                                                 angle_sur.min())
    angle_sur = filters.gaussian_filter1d(angle_sur, 5)
    skew_v_val = angle_sur[20:180 - 20].max()
    skew_v = angle_sur[30:180 - 30].argmax() + 30
    skew_h_A = angle_sur[0:30].max()
    skew_h_B = angle_sur[150:180].max()
    skew_h = 0
    if (skew_h_A > skew_v_val * 0.3 or skew_h_B > skew_v_val * 0.3):
        if skew_h_A >= skew_h_B:
            skew_h = angle_sur[0:20].argmax()
        else:
            skew_h = -angle_sur[160:180].argmax()
    return skew_h, skew_v
Exemple #5
0
def get_top_n_points(img, special_points, window_size=3, k=0.05, top_n=1000):
    '''Function for sorting special points, generated by FAST. Uses Harris
    measure as metric.
    Input:
        img - np.array, input image
        special_points - list, each element - tuple with indices of special
        point
        window_size - int, size of window for calculating metric
        k - float, metric parameter
        top_n - number of points with better metric to be returned
    Output:
        measures - dict, keys - tuple with indices of special point, values -
        metric value for special point
    '''
    block_size = window_size
    k_prime = k / (1 - k)
    aperture_size = 3
    if img.shape[-1] == 3:
        # to grayscale
        buf_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        buf_img = img.copy()
    harr_matr = cv2.cornerEigenValsAndVecs(buf_img, block_size, aperture_size)
    measures = {}
    for point in special_points:
        l1, l2, x1, y1, x2, y2 = harr_matr[point[0]][point[1]]
        measures[point] = l1 * l2 - k * (l1 + l2)**2
        # another version of measure
        # measures[point] = (l1 - k_prime * l2) * (l2 - k_prime * l1)
    measures = {
        k: v
        for k, v in sorted(measures.items(), key=lambda item: item[1])
    }
    measures = list(measures.keys())[-top_n:]
    return measures
Exemple #6
0
def harrisCorners( src, winSize, blockSize, alpha = 0.04): 
	# Preprocess
	src = cv2.GaussianBlur(src,(5,5),0)
	pad = winSize + blockSize
	size = 2*winSize + 1
	# src_padded = cv2.copyMakeBorder(src,pad,pad,pad,pad,cv2.BORDER_REPLICATE) # DO I need this?
	windower = np.ones((size,size))
	norm = np.sum(windower)
	sc = float(2**(winSize-1))*blockSize
	sc = sc*255
	gradX = cv2.Sobel(src,cv2.CV_64F,1,0,ksize=blockSize)
	gradY = cv2.Sobel(src,cv2.CV_64F,0,1,ksize=blockSize)
	print gradX[gradX.nonzero()]
	print gradY[gradY.nonzero()]
	cv2.imshow('gradX', gradX)
	cv2.imshow('gradY', gradY)
	# prC = cv2.preCornerDetect(src, blockSize)
	# print prC.shape
	# return prC
	eigs = cv2.cornerEigenValsAndVecs(src, winSize, blockSize)
	eigs = eigs[:,:,0:2]
	R = np.zeros(src.shape)
	structTensor = np.zeros((2,2), dtype=np.float64)

	Lambda1 = eigs[:,:,0]
	Lambda2 = eigs[:,:,1]

	R = (Lambda1*Lambda2) - alpha*(Lambda1+Lambda2)**2
				
		


	# perform non-maximal suppression on R
	return R
def shock_filter(img, sigma = 11, str_sigma = 11, blend= 0.5, iter_n = 4, thresh = 70):
    h, w = img.shape[:2]
    blurred_gray = cv2.GaussianBlur(img, (5, 5), 1.5)
    img = cv2.addWeighted(img, 1.5, blurred_gray, -0.5, 0)
    t, img = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)
    
    for i in range(iter_n):
        eigen = cv2.cornerEigenValsAndVecs(img, str_sigma, 3)
        eigen = eigen.reshape(h, w, 3, 2)
        x, y = eigen[:, :, 1, 0], eigen[:, :, 1, 1] 
        
        gxx = cv2.Sobel(img, cv2.CV_32F, 2, 0, ksize = sigma)
        gxy = cv2.Sobel(img, cv2.CV_32F, 1, 1, ksize = sigma)
        gyy = cv2.Sobel(img, cv2.CV_32F, 0, 2, ksize = sigma)
        gvv = x * x * gxx + 2 * x * y * gxy + y * y * gyy
        m = gvv < 0
        
        ero = cv2.erode(img, None)
        dil = cv2.dilate(img, None)
        img1 = ero
        img1[m] = dil[m]
        img = np.uint8(img * (1.0 - blend) + img1 * blend)
    
    #return cv2.morphologyEx(img, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1)), iterations=1)
    return img
Exemple #8
0
def main():
    import sys
    try:
        fn = sys.argv[1]
    except:
        fn = '../Data/starry_night.jpg'

    img = cv.imread(cv.samples.findFile(fn))
    if img is None:
        print('Failed to load image file:', fn)
        sys.exit(1)

    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    height, width = img.shape[:2]

    eigen = cv.cornerEigenValsAndVecs(gray, 15, 3)
    eigen = eigen.reshape(height, width, 3, 2)  # [[e1, e2], v1, v2]
    flow = eigen[:, :, 2]

    vis = img.copy()
    vis[:] = (192 + np.uint32(vis)) / 2
    d = 12
    points = np.dstack(np.mgrid[d / 2:width:d, d / 2:height:d]).reshape(-1, 2)
    for x, y in np.int32(points):
        vx, vy = np.int32(flow[y, x] * d)
        cv.line(vis, (x - vx, y - vy), (x + vx, y + vy), (0, 0, 0), 1,
                cv.LINE_AA)

    size = img.shape[0], img.shape[1], 3
    both = np.empty((img.shape[0], img.shape[1] * 2, img.shape[2]), img.dtype)
    both = cv.hconcat([img, vis])
    cv.imshow("Original and texture flow results", both)
    cv.waitKey()

    print('Done')
    def test_texture_flow(self):

        img = self.get_sample('samples/cpp/pic6.png')

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        h, w = img.shape[:2]

        eigen = cv2.cornerEigenValsAndVecs(gray, 15, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        flow = eigen[:, :, 2]

        vis = img.copy()
        vis[:] = (192 + np.uint32(vis)) / 2
        d = 80
        points = np.dstack(np.mgrid[d / 2:w:d, d / 2:h:d]).reshape(-1, 2)

        textureVectors = []

        for x, y in np.int32(points):
            textureVectors.append(np.int32(flow[y, x] * d))

        eps = 0.05

        testTextureVectors = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0],
                              [-38, 70], [-79, 3], [0, 0], [0, 0], [-39, 69],
                              [-79, -1], [0, 0], [0, 0], [0, -79], [17, -78],
                              [-48, -63], [65, -46], [-69, -39], [-48, -63]]

        for i in range(len(testTextureVectors)):
            self.assertLessEqual(
                cv2.norm(textureVectors[i] - testTextureVectors[i],
                         cv2.NORM_L2), eps)
def find_white(img):
    baw = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
    baw2 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

    equation = a = np.ndarray((len(baw),len(baw[0])), dtype=int)
    for i in range(len(equation)):
        for j in range(len(equation[i])):
            equation[i][j] = baw[i][j][0]
    mask = np.zeros_like(equation)
    blockSize = 3#2*b+1
    ksize = 3
    eigenvalues = cv2.cornerEigenValsAndVecs(baw2, blockSize, ksize, borderType=cv2.BORDER_DEFAULT)
    for i in range(t, len(mask) - t, 1):
        for j in range(t, len(mask[i]) - t, 1):
            e1, e2 = eigenvalues[i][j][0], eigenvalues[i][j][1]
            if equation[i][j] >= lowerRange and equation[i][j] - equation[i][j - t] > differenceRange and equation[i][j] - equation[i][j + t]: #turn white
                mask[i][j] = 255
            if equation[i][j] >= lowerRange and equation[i][j] - equation[i - t][j] > differenceRange and equation[i][j] - equation[i + t][j] : #turn white
                mask[i][j] = 255
            else:
                mask[i][j] = 0
    imask = mask > 0  # for every part of mask that is greater than 0
    white = np.zeros_like(img, np.uint8)  # copy format of img
    white[imask] = img[imask]  # make white resemble the same color
    return white
Exemple #11
0
def ptosHarris(filename,
               levels=5,
               tam_ventana=(3, 3),
               sigmaI=1.5,
               sigmaD=1,
               dibujar=True,
               N=500):
    # Construimos la pirámide Gaussiana de la imagen
    gauss = pyrGauss(filename, levels=levels, visualize=False)

    # Sobre cada nivel de la pirámide usar la función de OpenCV cornerEigenValsAndVecs para extraer la
    # información de autovalores y autovectores de la matriz Harris en cada nivel (fijamo valores de blockSize y
    # ksize equivalentes al uso de las máscaras gaussianas de sigmaI y sigmaD respectivamente).
    blockSize = round(6 * sigmaI + 1)
    ksize = round(6 * sigmaD + 1)

    corner = []
    for i in range(len(gauss)):
        corner.append(cv2.cornerEigenValsAndVecs(gauss[i], blockSize, ksize))

    # Usamos uno de los criterios estudiados a partir de los autovalores y creamos una matriz con el valor del
    # criterio selección asociado a cada píxel (para el criterio de Harris usamos k=0.04).
    mharris, seleccionados = seleccion(gauss, corner)

    # Implementamos la fase de supresión de valores no-máximos sobre dicha matriz.
    matrices, informacion = noMaximos(mharris, seleccionados, levels,
                                      tam_ventana)

    # Ordenamos de mayor a menor los puntos resultantes de acuerdo a su valor
    #   arg[1]->nivel
    #   arg[0]->pto
    informacion.sort(key=lambda arg: mharris[arg[1]][arg[0]], reverse=True)

    # Seleccionamos al menos los 500 puntos de mayor valor.
    del (informacion[N:len(informacion)])

    # Separamos los ptos dependiendo de la escala
    informacion.sort(key=lambda arg: arg[1])
    ptos = []
    ptos_nivel = []
    nivel = 0
    for info in informacion:
        if nivel != info[1]:
            ptos.append(ptos_nivel)
            nivel += 1
            ptos_nivel = []

        ptos_nivel.append(info[0])
    ptos.append(ptos_nivel)

    # Mostramos el resultado dibujando sobre la imagen original un círculo centrado en cada punto y de radio
    # proporcional al valor del sigma usado para su detección
    if dibujar:
        circulos(filename, ptos, sigmaI)

    return ptos, gauss
def corner_eigen(img):
    print(img.dtype)
    eig = cv2.cornerEigenValsAndVecs(img, 7, 7)
    result = np.empty(img.shape, dtype=np.float32)
    for i in range(img.shape[0]):
        for j in range(img.shape[1]):
            lambda_1 = eig[i, j, 0]
            lambda_2 = eig[i, j, 1]
            result[i, j] = lambda_1 * lambda_2 - 0.05 * pow((lambda_1 + lambda_2), 2)
    return result
Exemple #13
0
    def make_gvv(channel):
        eigen = cv2.cornerEigenValsAndVecs(channel, str_sigma, -1)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        x, y = eigen[:,:,1,0], eigen[:,:,1,1]

        gxx = cv2.Sobel(channel, cv2.CV_32F, 2, 0, ksize=sigma)
        gxy = cv2.Sobel(channel, cv2.CV_32F, 1, 1, ksize=sigma)
        gyy = cv2.Sobel(channel, cv2.CV_32F, 0, 2, ksize=sigma)
        gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
        return gvv
Exemple #14
0
def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4, mctype=1):
    def make_gvv(channel):
        eigen = cv2.cornerEigenValsAndVecs(channel, str_sigma, -1)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        x, y = eigen[:,:,1,0], eigen[:,:,1,1]

        gxx = cv2.Sobel(channel, cv2.CV_32F, 2, 0, ksize=sigma)
        gxy = cv2.Sobel(channel, cv2.CV_32F, 1, 1, ksize=sigma)
        gyy = cv2.Sobel(channel, cv2.CV_32F, 0, 2, ksize=sigma)
        gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
        return gvv

    h, w = img.shape[:2]
    for i in xrange(iter_n):
        print i,

        if mctype == 0:
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            gvv = make_gvv(gray)
        elif mctype == 2:
            gvv_i = [make_gvv(img[:,:,i]) for i in xrange(3)]
            gvv = sum(gvv_i)
        elif mctype == 1:
            wi = [None] * 3
            for i in range(3):
                channel = img[:,:,i]
                eigen = cv2.cornerEigenValsAndVecs(channel, str_sigma, -1)
                eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
                x, y = eigen[:,:,1,0], eigen[:,:,1,1]
                wi[i] = (x, y)
            x = sum(w[0] for w in wi)
            y = sum(w[1] for w in wi)

            gi = [None] * 3
            for i in range(3):
                channel = img[:,:,i]
                gxx = cv2.Sobel(channel, cv2.CV_32F, 2, 0, ksize=sigma)
                gxy = cv2.Sobel(channel, cv2.CV_32F, 1, 1, ksize=sigma)
                gyy = cv2.Sobel(channel, cv2.CV_32F, 0, 2, ksize=sigma)
                gi[i] = (gxx, gxy, gyy)
            gxx = sum(g[0] for g in gi)
            gxy = sum(g[1] for g in gi)
            gyy = sum(g[2] for g in gi)

            gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy

        m = gvv < 0

        ero = cv2.erode(img, None)
        dil = cv2.dilate(img, None)
        img1 = ero
        img1[m] = dil[m]
        img = np.uint8(img*(1.0 - blend) + img1*blend)
    print 'done'
    return img
Exemple #15
0
def eigenBasedFeats(_input):
	assert(_input.ndim==1|_input.ndim==3)
	if(_input.ndim == 3):
		_input = cv2.cvtColor(_input,cv2.COLOR_RGB2GRAY)
	eigen = cv2.cornerEigenValsAndVecs(_input,15,3);
	eigen = eigen.reshape(_input.shape[0], _input.shape[1], 3, 2)
	texture_mag = normalize(np.sqrt(eigen[:,:,0,0]**2 +  eigen[:,:,0,1]**2))
	texture_dir1 = normalize(np.arctan2(eigen[:,:,1,1],eigen[:,:,1,0]))
	texture_dir2 = normalize(np.arctan2(eigen[:,:,2,1],eigen[:,:,2,0]))
	texture_prop  = np.dstack((texture_mag,texture_dir1,texture_dir2));
	return texture_prop;
Exemple #16
0
def harris_corner_detection():
    """corners are regions in the image with large variation in intensity in all the directions"""
    """deal with grayScale img"""
    """with some mathematical tricks we can get R=det(M)-k(trace(M))^2, where M is sum(w(x,y)[IxIx,IxIy
                                                                                            IxIy,IyIy])
                                                                                            w(x,y) is weighted matrix"""
    """det(M)=lambda1(r1)lambda2(r2) and trace(M)=r1+r2
    r1 and r2 are the eigenvalues of M
    when |R| is small which means r1 and r2 is small the region is flat
    when R<0 which happens when r1>>r2 or vice versa, the region is edge
    when R is large, which means r1 and r2 are large and r1≈r2 the region is corner"""
    """:parameter img(float 32),blockSize(the neighbour considered for corner detection
    ksize: aperture parameter of the sobel derivative used.
    k: harris detector free parameter"""
    """it returns the score resultImg with the same of ori """
    img = cv.imread('../Resources/answerCard.png')
    imgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    imgGray = np.float32(imgGray)
    imgHarris = cv.cornerHarris(imgGray, 2, 3, 0.04)
    """the value in imgHarris is the R mentioned above"""
    tempI = cv.cornerEigenValsAndVecs(imgGray, 2, 3)
    """tempI return(x,y,6) where 6 are
        λ1,λ2 are the non-sorted eigenvalues of M
        x1,y1 are the eigenvectors corresponding to λ1
        x2,y2 are the eigenvectors corresponding to λ2"""
    (x, y) = np.where(imgHarris > 0.2 * imgHarris.max())
    for perPixel in list(zip(y, x)):
        cv.circle(img, perPixel, 2, [0, 255, 0], -1)
    """if we want to get more precise result we can use cv.cornerSubPix(img(gray,float),centroids,
    half-radius of windowSize, half-radius of zero zone ( for singular situation of windowSize)
    that under this zero zone, pixels will not be summed"""
    """first we should get centroids through cv.connectedComponentsWithStats(we also use cv.connectedComponents in fg
    extraction named watershed algorithm), which returns
    ret,labels,stats(statistics output for each label, including the background label. Statistics are accessed via
    stats(label, COLUMN) where COLUMN is one of ConnectedComponentsTypes)
    ,centroids.
    It iterates for better results until reaches a threshold we set.
    """
    __, imgHarris = cv.threshold(imgHarris, 0.01 * imgHarris.max(), 255,
                                 cv.THRESH_BINARY)
    # !!!we need binary img to filter noise
    criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 100, 1
                )  # the first parameter means the algorithm returns
    # when any one of them(max-iter,precession satisfies)
    # count(min iterNum) ,max_iter(Num),eps(returns when reach 0.001 precession)
    ret, labels, stats, centroids = cv.connectedComponentsWithStats(
        np.uint8(imgHarris))
    corners = cv.cornerSubPix(imgGray, np.float32(centroids), (1, 1), (-1, -1),
                              criteria)
    for perPixel in corners:
        cv.circle(img, tuple(perPixel), 1, [0, 0, 255], -1)
    cv.imshow('temp', img)
    cv.waitKey(0)
    """ we can see the red points(subPixels) is better than the green points"""
Exemple #17
0
    def second_image_processing(thresh, k_value, img1, kp1, w):
        gray2 = cv2.cvtColor(second, cv2.COLOR_BGR2GRAY)

        eigen = cv2.cornerEigenValsAndVecs(gray2, w, 3)
        print(type(eigen))
        mc1 = np.zeros(gray2.shape)

        rows, cols = gray2.shape
        for i in range(rows):
            for j in range(cols):
                lam1 = eigen[i, j][0]
                lam2 = eigen[i, j][1]
                mc1[i, j] = (lam1 * lam2) - (k_value * (math.pow(
                    (lam1 + lam2), 2)))

        corner2 = []
        minvalue, maxvalue, minloc, maxloc = cv2.minMaxLoc(mc1)

        #==============================================================================
        #=============================================================================
        for index, x in np.ndenumerate(mc1):
            if x > thresh:
                corner2.append(index)

    #normalise

        criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 300, 1)
        total_corners1 = np.float32(corner2)

        cv2.cornerSubPix(gray2, total_corners1, (5, 5), (-1, -1), criteria)
        uniqueCorners1 = np.unique(np.int0(total_corners1), axis=0)

        print("unique", len(uniqueCorners1))
        flag = 1
        second1 = cv2.cvtColor(gray2, cv2.COLOR_GRAY2BGR)
        print(flag)
        for i in uniqueCorners1:
            x, y = i.ravel()
            cv2.rectangle(second1, (y - 10, x - 10), (y + 10, x + 10),
                          (0, 255, 0))
            cv2.putText(second1, str(flag), (y, x), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 0, 255))
            flag += 1
        kp2 = [cv2.KeyPoint(x[1], x[0], 3) for x in uniqueCorners1]

        feature_matching(img1, second1, kp1, kp2)
def get_harris(img, sigma_block_size = 1.5, sigma_ksize = 1, k = 0.04, \
                threshold = -10000, env = 5, scale = -1):
    """
    Obtiene una lista potencial de los puntos Harris de una imagen (img).
    Los valores de los parámetros utilizados dependen del sigma que se
    recibe (sigma_block_size, sigma_ksize).
    Recibe (k), (threshol) y (env) utilizados en las funciones creadas para la
    obtención de los puntos.
    Se puede indicar la escala (scale) para devolver la escala a la que
    pertenecen los puntos generados junto a estos.
    """

    # Se obtiene block_size y ksize
    block_size = get_block_size(sigma_block_size)
    ksize = get_ksize(sigma_ksize)

    # Se calculan los autovalores y los autovectores de la imagen
    vals_and_vecs = cv2.cornerEigenValsAndVecs(img, block_size, ksize)

    # El resultado es una matriz de 3 dimensiones, donde se encuentra cada pixel
    # de la imagen y por cada pixel se tienen 6 valores:
    # - l1, l2: autovalores no ordenados de M
    # - x1, y1: autovalores correspondientes a l1
    # - x2, y2: autovalores correspondientes a l2
    # Se tiene que trabajar solo con los autovalores de M, por lo que se
    # separa en 6 canales el resultado y se guarda l1 y l2.
    vals_and_vecs = cv2.split(vals_and_vecs)
    l1 = vals_and_vecs[0]
    l2 = vals_and_vecs[1]

    # Se utiliza el criterio de Harris implementado para obtener la matriz
    # con el valor asociado a cada pixel
    harris = selection_criteria_harris(l1, l2, k)

    # Se suprimen los valores no máximos
    max_points = not_max_suppression_harris(harris, threshold, env)

    # Se guarda la longitud para recorrer todos los puntos y añadir la escala
    length = len(max_points)

    # Se añade la escala a cada punto
    for i in range(length):
        max_points[i][0].size = scale

    # Se devuelven los puntos
    return max_points
Exemple #19
0
def enhanceLinear(iImage, iSigma, iBeta):
    Obcutljivost = 5
    imgG = cv.cvtColor(iImage, cv.COLOR_RGB2GRAY)
    blur = cv.GaussianBlur(imgG, (3, 3), 0)

    test = cv.cornerEigenValsAndVecs(blur, iSigma, 3)
    test2 = test[:, :, 0:2]

    Lambda1 = test[:, :, 0]
    Lambda2 = test[:, :, 1]
    #     print('Lambda1', Lambda1.shape)
    #     print('Lambda2', Lambda2.shape)

    QLA = (Lambda1 - Lambda2) / (Lambda1 + Lambda2 - iBeta)
    oQLA = np.int8(-(QLA * 255) * Obcutljivost)

    return oQLA
def find_harris_corners(image, k, window_size, console_consumer=None):
    """
    Harris Corner Detector algorithm implementation.
    Harris detector responses are the CRF(x, y) values from the equation: CRF = det(M) − k*(tr(M)**2) (Corner Response Function)
    where det(M) = Lambda1*Lambda2, which is the value of the determinant, and tr(M) = Lambda1+Lambda2, which is the trace
    of the determinant. (Lambda1, Lambda2 = eigen values).
    When the CRF value of the pixel point is greater than the given threshold, it is determined that the target point is a corner point.
    :param image: Input single-channel 8-bit or floating-point image.
    :param k: Harris detector free parameter in the equation.
    :param window_size: Neighborhood size.
    :param console_consumer: Used for logging purposes. (Passing step updates to a listener)
    :return: Image to store the Harris detector responses. It has the same size as source image. dst(x,y) = detM(x,y) − k*(trM(x,y)**2)
    Corners in the image can be found as the local maxima of this response map.
    """
    if not isinstance(image, np.ndarray):
        log(console_consumer, "find_harris_corners: Not a tensor. Was: Image=",
            image.__class__)
        return None

    if image.ndim not in [2, 3]:
        log(
            console_consumer,
            'find_harris_corners: Illegal image dimension. Image N-dimension can be 2 or 3 only. Was:',
            image.ndim)
        return None

    if image.ndim == 3:
        twoD = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    else:
        twoD = image

    # Find all eigen values and vectors for each pixel in the image.
    # Eigen values are used in order to find the determinant and trace, which are needed for Harris Corner equation
    eigen_vals_and_vectors = cv2.cornerEigenValsAndVecs(twoD, window_size, 5)

    # 0 is for first eigen value, and 1 is for second eigen value
    # Use batch operation rather than going in a very slow loop over them and calculate the response for each pixel
    # det(M) = lambda1 * lambda2
    # trace(M) = lambda1 + lambda2
    detM = eigen_vals_and_vectors[:, :, 0] * eigen_vals_and_vectors[:, :, 1]
    traceM = eigen_vals_and_vectors[:, :, 0] + eigen_vals_and_vectors[:, :, 1]
    harris_responses = detM - k * (traceM**2)

    return harris_responses
Exemple #21
0
def detectHarrisCorners(image, window_size, block_size, alpha = 0.04):
	# preprocess the input image
	image = cv2.GaussianBlur(image,(5,5),0)

	gradient_x = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=block_size)
	gradient_y = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=block_size)
	# print gradient_x[gradient_x.nonzero()]
	# print gradient_y[gradient_y.nonzero()]
	cv2.imshow('Gradient along X', gradient_x)
	# cv2.imwrite('gradien_along_x.png', gradient_x)
	cv2.imshow('Gradient along Y', gradient_y)
	# cv2.imwrite('gradien_along_y.png', gradient_y)
	eigs = cv2.cornerEigenValsAndVecs(image, window_size, block_size)
	eigs = eigs[:,:,0:2]

	lambda_1 = eigs[:,:,0]
	lambda_2 = eigs[:,:,1]

	return lambda_1*lambda_2 - alpha*(lambda_1+lambda_2)**2
Exemple #22
0
Fichier : P3.py Projet : fjsaezm/VC
def harris(src, level, block_size=3, ksize=3, threshold=10):
    #Get lambda1,lambda2, eiv11,eiv12, eiv21,eiv22
    e_v = cv2.cornerEigenValsAndVecs(src, blockSize=block_size, ksize=ksize)
    #Corner strength matrix
    first_m = np.asarray([[
        corner_strength(e_v[i, j, 0], e_v[i, j, 1])
        for j in range(src.shape[1])
    ] for i in range(src.shape[0])])
    # Get values that are > than threshold
    threshold_m = np.asarray([[
        first_m[i, j] if first_m[i, j] > threshold else 0
        for j in range(first_m.shape[1])
    ] for i in range(first_m.shape[0])])

    # Supress no max in winsize X winsize neighborhood
    sup_no_max_m = supresionNoMax(threshold_m, 5)

    # Return keypoints
    return get_keypoints(sup_no_max_m, block_size, level)
def minEigenValueDetection(gray_image, src_color):
    eigenValues = cv2.cornerEigenValsAndVecs(gray_image, 3, 3)

    corners = []

    corners_inserted_image = np.empty(gray_image.shape, dtype=np.float32)
    corners_inserted_image.fill(0)

    for i in range(gray_image.shape[0]):
        for j in range(gray_image.shape[1]):

            lambda_1 = eigenValues[i, j, 0]
            lambda_2 = eigenValues[i, j, 1]

            if lambda_1 > 0 and lambda_2 > 0:
                if lambda_2 > 0.09:
                    corners_inserted_image[i, j] = 255
                    corners.append((j, i))
                    src_color = cv2.circle(src_color, (j, i), 2, (0, 255, 0))

    return corners_inserted_image, src_color, corners
Exemple #24
0
def get_eigenVals_and_eigenVecs(pyramide, thresdhold, blockS, kSize):
    eingen_vals_and_vecs = []
    strong_values = []

    for im in pyramide:
        # Obtenemos la matriz de con los autovalores de la matriz
        # y los respectivos autovectores para cada uno de los autovalores
        result = cv2.split(
            cv2.cornerEigenValsAndVecs(src=im.astype(np.uint8),
                                       blockSize=blockS,
                                       ksize=kSize))
        # Calculamos el determinante como el producto de los autovalores
        det = cv2.mulSpectrums(result[0], result[1], flags=cv2.DFT_ROWS)
        # Calculamos la traza como la suma de los autovalores
        trace = result[0] + result[1]
        # Realizamos la función de valoración de Harris
        eingen_vals_and_vecs.append(harrisCriterio(det, trace))
        # Y obtenemos los índices de los píxeles que sobrepasan el umbral mínimo
        strong_values.append(np.where(eingen_vals_and_vecs[-1] > thresdhold))

    return eingen_vals_and_vecs, strong_values
Exemple #25
0
def harris():
    img = reloadimage()
    img1 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    k = cv2.getTrackbarPos('k', 'image1')/1000
    threshold = cv2.getTrackbarPos('threshold', 'image1')/10
    winsize = cv2.getTrackbarPos('winsize', 'image1')
    max_r = 0
    r = []

    corner_list = []


    m = cv2.cornerEigenValsAndVecs(np.array(img1, dtype="float32"), 3, 3)
    print(m.shape)
    for i in range(0, m.shape[0]):
        for j in range(0, m.shape[1]):
            r.append([i, j, m[0]*m[1]-k*((m[0]+m[1])**2)])

    print(r)

    for pixel in r:
        if pixel[2] > threshold*max_r:
            max_r = pixel[2]

    for pixel in r:
        if pixel[2] >  threshold*max_r:
            corner_list.append((pixel[1], pixel[0]))

    while corner_list:
        x = corner_list.pop()
        cv2.rectangle(img,(x[0]-1,x[1]+1),(x[0]+1,x[1]-1),(0,0,255),1)

    cv2.imshow('image1', img)
    print("k = ", k)
    print("threshold = ", threshold)
    print("winsize = ", winsize)


    return
Exemple #26
0
Fichier : p3.py Projet : advy99/VC
def puntos_interes(imagen, tam_bloque, k_size):
    """
    Funcion para obtener los puntos de interes de una imagen
    """

    # obtenemos los valores con opencv
    val_eigen = cv.cornerEigenValsAndVecs(imagen, tam_bloque, k_size)

    # nos quedamos con los valores singulares
    val_eigen = val_eigen[:, :, :2]

    # calculamos el producto y la suma de los valores singulares
    producto = np.prod(val_eigen, axis=2)
    suma = np.sum(val_eigen, axis=2)

    # hacemos la division de los productos y la suma, y la salida será una matriz de ceros a excepción de donde la suma sera 0, para no dividir por 0
    puntos_interes = np.divide(producto,
                               suma,
                               out=np.zeros(imagen.shape),
                               where=suma != 0.0)

    return puntos_interes
def coherence_filter(im, sigma=15, str_sigma=15, blend=0.5, n_iter=5):
    """

    Args:
        im: image
        sigma: odd int
            kernel size for derivatives
        str_sigma: odd int
            kernel size for Eigen values and vectors
        blend: float [0, ... ,1]
            blend coefficient
        n_iter: int
            count of iterations
    Function makes coherence filter on image

    Returns: ndarray
    Function returns filtered image
    """
    h, w = im.shape[:2]

    for i in range(n_iter):
        eigen = cv.cornerEigenValsAndVecs(im, str_sigma, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]

        x, y = eigen[:, :, 1, 0], eigen[:, :, 1, 1]

        gxx, gxy, gyy = get_second_derivative(im, sigma)
        gvv = x * x * gxx + 2 * x * y * gxy + y * y * gyy

        m = gvv < 0

        ero = cv.erode(im, None)
        dil = cv.dilate(im, None)
        im1 = ero
        im1[m] = dil[m]
        im = np.uint8(im * (1.0 - blend) + im1 * blend)

    return im
def calcular_Harris(img,
                    blocksize,
                    ksize,
                    n_piramide,
                    escala,
                    reescalar=True,
                    marco=True,
                    criterio=0.04):

    mi_imagen_harris = cv2.cornerEigenValsAndVecs(img, blocksize, ksize)

    nueva_imagen = np.zeros(
        (mi_imagen_harris.shape[0], mi_imagen_harris.shape[1]),
        dtype=np.float32)

    nueva_imagen = pow(
        (mi_imagen_harris[:, :, 0] * mi_imagen_harris[:, :, 1]) - criterio *
        (mi_imagen_harris[:, :, 0] + mi_imagen_harris[:, :, 1]), 2)

    lista_maximos_coordenadas = suprimir_no_maximos(nueva_imagen, blocksize,
                                                    n_piramide, escala)

    return nueva_imagen, lista_maximos_coordenadas
def coherence_filter(img, sigma=11, str_sigma=11, blend=0.5, iter_n=4):
    h, w, _ = img.shape

    for i in range(iter_n):
        #       print(i)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        #       print(gray)
        eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
        #       print(eigen)
        eigen = eigen.reshape(h, w, 3, 2)  #[e1,e2],v1,v2
        x, y = eigen[:, :, 1, 0], eigen[:, :, 1, 1]
        gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
        gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
        gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
        gvv = x * x * gxx + 2 * x * y * gxy + y * y * gyy
        m = gvv < 0
        ero = cv2.erode(img, None)
        dil = cv2.dilate(img, None)
        img1 = ero
        img1[m] = dil[m]
        img = np.uint8(img * (1.0 - .5) + img1 * .5)

    return img
Exemple #30
0
    def test_texture_flow(self):

        img = self.get_sample('samples/data/chessboard.png')

        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        h, w = img.shape[:2]

        eigen = cv.cornerEigenValsAndVecs(gray, 5, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        flow = eigen[:,:,2]

        d = 300
        eps = d / 30

        points =  np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)

        textureVectors = []
        for x, y in np.int32(points):
            textureVectors.append(np.int32(flow[y, x]*d))

        for i in range(len(textureVectors)):
            self.assertTrue(cv.norm(textureVectors[i], cv.NORM_L2) < eps
            or abs(cv.norm(textureVectors[i], cv.NORM_L2) - d) < eps)
Exemple #31
0
    def test_texture_flow(self):

        img = self.get_sample('samples/data/chessboard.png')

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        h, w = img.shape[:2]

        eigen = cv2.cornerEigenValsAndVecs(gray, 5, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        flow = eigen[:,:,2]

        d = 300
        eps = d / 30

        points =  np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)

        textureVectors = []
        for x, y in np.int32(points):
            textureVectors.append(np.int32(flow[y, x]*d))

        for i in range(len(textureVectors)):
            self.assertTrue(cv2.norm(textureVectors[i], cv2.NORM_L2) < eps
            or abs(cv2.norm(textureVectors[i], cv2.NORM_L2) - d) < eps)
Exemple #32
0
def flowField(im, fig=None, blocksize=11, ksize=3, resizefactor=1, eigenvec=1):
    """ Calculate flowfield of an image

    Args:
        im (numpy array): input image
        fig (integer or None): number of visualization window
    Returns:
        flow (numpy array): flow
        ll (?): ??
    """
    im8 = scaleImage(im)
    im8 = cv2.resize(im8, None, fx=resizefactor, fy=resizefactor)
    h, w = im8.shape[:2]
    eigen = cv2.cornerEigenValsAndVecs(im8, blocksize, ksize=ksize)
    eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
    flow = eigen[:, :, eigenvec]
    ll = eigen[:, :, 0]

    if fig is not None:
        vis = im8.copy()
        vis[:] = (192 + np.uint32(vis)) / 2
        d = 12
        points = np.dstack(np.mgrid[int(d / 2):w:d,
                                    int(d / 2):h:d]).reshape(-1, 2)
        for x, y in points:
            vx, vy = np.int32(flow[y, x] * d)
            # vx,vy=int(ff*ll[y,x,0]*vx), int(ff*ll[y,x,0]*vy)
            try:
                linetype = cv2.LINE_AA
            except:
                linetype = 16  # older opencv

            cv2.line(vis, (x - vx, y - vy), (x + vx, y + vy), (0, 0, 0), 1,
                     linetype)
        cv2.imshow('input', im8)
        cv2.imshow('flow', vis)
    return flow, ll
def coherence_filter(img, sigma, str_sigma, blend, iter_n ):
    h, w = img.shape[:2]
    gray=img
    for i in range(0,iter_n):
        #print(i)

        #gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        x, y = eigen[:,:,1,0], eigen[:,:,1,1]

        gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
        gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
        gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
        gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
        m = gvv < 0

        ero = cv2.erode(img, None)
        dil = cv2.dilate(img, None)
        img1 = ero
        img1[m] = dil[m]
        img = np.uint8(img*(1.0 - blend) + img1*blend)
    #print('done')
    return img
Exemple #34
0
	def __extract_texture__(self,frame,regions,region_props,useGLCM=True,useEigen=True):
		num_regions = len(np.unique(regions));
		gray = GRAY(frame)
		texture_data_glcm = None; texture_data_eig = None
		if useGLCM:
			def texture_prop(region,patch_size = 2):
				_mean_min = region_props[0][region]-patch_size;
				_mean_max = region_props[0][region]+patch_size;
				glcm = greycomatrix(gray_frame[_mean_min[0]:_mean_max[0],_mean_min[1]:_mean_max[1]],
							[3], [0], 256, symmetric=True, normed=True)
				_dis = greycoprops(glcm, 'dissimilarity')[0, 0];
				_cor = greycoprops(glcm, 'correlation')[0, 0];
				return (_dis,_cor);
			texture_data_glcm = np.array([texture_prop(region) for region in range(num_regions)])
		
		if useEigen:			
			eigen = cv2.cornerEigenValsAndVecs(gray,15,3);
			eigen = eigen.reshape(gray.shape[0], gray.shape[1], 3, 2)
			texture_mag = normalize(np.sqrt(eigen[:,:,0,0]**2 +  eigen[:,:,0,1]**2))
			texture_dir1 = normalize(np.arctan2(eigen[:,:,1,1],eigen[:,:,1,0]))
			texture_dir2 = normalize(np.arctan2(eigen[:,:,2,1],eigen[:,:,2,0]))
			texture_prop  = np.dstack((texture_mag,texture_dir1,texture_dir1));
			texture_data_eig = np.array([np.sum(texture_prop[np.where(regions==region)],0)
												for region in range(num_regions)])
			_inv_freq = 1/(region_props[1]+0.0000001); 	
			texture_data_eig = texture_data_eig * _inv_freq[:,None]
			
		if useGLCM and useEigen:
			texture_data = np.hstack((texture_data_glcm,texture_data_eig));
		elif useGLCM:
			texture_data = texture_data_glcm
		elif useEigen:
			texture_data = texture_data_eig
		else:
			raise ArgumentError("argument useGLCM and useEigen both cannot be false");
		return texture_data
def coherence_filter(img, sigma=15, str_sigma=15, blend=0.7, iter_n=5):
    h, w = img.shape[:2]

    img = np.float32(img)

    for i in xrange(iter_n):
        gray = img
        eigen = cv.cornerEigenValsAndVecs(gray, str_sigma, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        x, y = eigen[:, :, 1, 0], eigen[:, :, 1, 1]

        gxx = cv.Sobel(gray, cv.CV_32F, 2, 0, ksize=sigma)
        gxy = cv.Sobel(gray, cv.CV_32F, 1, 1, ksize=sigma)
        gyy = cv.Sobel(gray, cv.CV_32F, 0, 2, ksize=sigma)
        gvv = x * x * gxx + 2 * x * y * gxy + y * y * gyy
        m = gvv < 0

        ero = cv.erode(img, None)
        dil = cv.dilate(img, None)
        img1 = ero
        img1[m] = dil[m]
        img = np.uint8(img * (1.0 - blend) + img1 * blend)

    return img
Exemple #36
0
    def draw(self, img_in, extra_in):
        img = supt.make_gray(img_in)

        values = cv2.cornerEigenValsAndVecs(
            src=img,
            blockSize=self.block_size,
            ksize=self.k_size,
            borderType=self.border_type,
        )

        Mc = np.empty(img.shape, dtype=np.float32)
        L1 = values[:, :, 0]
        L2 = values[:, :, 1]
        Mc = np.multiply(L1, L2) - 0.04 * (np.multiply((L1 + L2), (L1 + L2)))
        values_min, values_max, _, _ = cv2.minMaxLoc(Mc)
        bound = values_min + (values_max - values_min) * self.threshold / 100

        mask = Mc > bound

        shape_y, shape_x = np.shape(Mc)
        [X, Y] = np.meshgrid(np.arange(shape_x), np.arange(shape_y))
        pts_harris = np.stack((X[mask], Y[mask]), axis=1)

        return img_in, pts_harris
Exemple #37
0
def coherence_filter(img, sigma=11, str_sigma=11, blend=0.5, iter_n=4):
    h, w = img.shape[:2]

    for i in range(iter_n):
        print(i, end=" ")

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        x, y = eigen[:, :, 1, 0], eigen[:, :, 1, 1]

        gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
        gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
        gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
        gvv = x * x * gxx + 2 * x * y * gxy + y * y * gyy
        m = gvv < 0

        ero = cv2.erode(img, None)
        dil = cv2.dilate(img, None)
        img1 = ero
        img1[m] = dil[m]
        img = np.uint8(img * (1.0 - blend) + img1 * blend)
    print("done")
    return img
Exemple #38
0
                iterations = 1)
        else : 
            img1u8 = raw
        # CannyThreshold()
        cv2.imshow("filtered", img1u8)
        cv2.waitKey(1)
    elif keydown & 0xff == ord('f'):
        pre_filter = not pre_filter
        # if (pre_filter):
            # use_gaussian = False
        if f_count == 0:
            f_count = f_count + 1

            h = vecshape[1][0]
            w= vecshape[1][1]
            eigen = cv2.cornerEigenValsAndVecs(img1u8, 15, 3)
            eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
            flow = eigen[:,:,2]
            vis = img1u8.copy()
            vis[:] = (192 + np.uint32(vis)) / 2
            d = 12
            points =  np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)
            for x, y in np.int32(points):
                vx, vy = np.int32(flow[y, x]*d)
                cv2.line(vis, (x-vx, y-vy), (x+vx, y+vy), (0, 0, 0), 1, cv2.LINE_AA)
            cv2.imshow('input', img1u8)
            cv2.imshow('flow', vis)
            cv2.waitKey(0)
            

gray = np.array(blur[t], dtype = 'float32')

corners = cv2.goodFeaturesToTrack(gray,5,0.01,5)
corners = np.int0(corners)

for j,i in enumerate(corners):
    x,y = i.ravel()
    cv2.circle(gray,(x,y),1, 200 + j * 10,-1)

plt.figure(25); plt.clf();
plt.imshow(gray, interpolation = 'none'); plt.show()


#%%
#gray = np.array(blur[t], dtype = 'float32')
evs = cv2.cornerEigenValsAndVecs(gray, blockSize = 3, ksize = 3)

plt.figure(26); plt.clf();
for i in range(6):
  plt.subplot(2,3,i+1);
  plt.imshow(evs[:,:,i], interpolation ='none');
  


for c in corners[:,0]:
  print evs[c[0], c[1], 0:2];

#%%
evs[corners[:,0,0], corners[:,0,1], 0:2]

#%%  

def nada3(t):
    print "Filter used as base image:", Filters[t]


filters = [img, imgBlur, imgFiltered, imgGB, imgM, imgL, imgH]
Filters = ["Raw image", "Blur", "Custom filter", "Gaussian blur", "Morph", "Laplacian", "Harris corner"]
cv2.namedWindow("input", cv2.WINDOW_NORMAL)
cv2.namedWindow("flow", cv2.WINDOW_NORMAL)
cv2.createTrackbar("filter", "flow", 0, 6, nada3)
while True:
    base_image = cv2.getTrackbarPos("filter", "flow")
    Img = filters[base_image]
    h, w = Img.shape[:2]
    eigen = cv2.cornerEigenValsAndVecs(Img, 15, 3)
    eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
    flow = eigen[:, :, 2]

    vis = Img.copy()
    vis[:] = (192 + np.uint32(vis)) / 2
    d = 12
    points = np.dstack(np.mgrid[d / 2 : w : d, d / 2 : h : d]).reshape(-1, 2)
    for x, y in points:
        vx, vy = np.int32(flow[y, x] * d)
        cv2.line(vis, (x - vx, y - vy), (x + vx, y + vy), (0, 0, 0), 1, cv2.CV_AA)

    cv2.imshow("input", Img)
    cv2.imshow("flow", vis)
    k = cv2.waitKey(1) & 0xFF
    if k == 27:
Exemple #41
0
if __name__ == '__main__':
    import sys
    try:
        fn = sys.argv[1]
    except:
        fn = '../data/starry_night.jpg'

    img = cv2.imread(fn)
    if img is None:
        print('Failed to load image file:', fn)
        sys.exit(1)

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    h, w = img.shape[:2]

    eigen = cv2.cornerEigenValsAndVecs(gray, 15, 3)
    eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
    flow = eigen[:,:,2]

    vis = img.copy()
    vis[:] = (192 + np.uint32(vis)) / 2
    d = 12
    points =  np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)
    for x, y in np.int32(points):
        vx, vy = np.int32(flow[y, x]*d)
        cv2.line(vis, (x-vx, y-vy), (x+vx, y+vy), (0, 0, 0), 1, cv2.LINE_AA)
    cv2.imshow('input', img)
    cv2.imshow('flow', vis)
    cv2.waitKey()
Exemple #42
0
parser.add_argument('--input', help='Path to input image.', default='../data/building.jpg')
args = parser.parse_args()

src = cv.imread(args.input)
if src is None:
    print('Could not open or find the image:', args.input)
    exit(0)

src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)

# Set some parameters
blockSize = 3
apertureSize = 3

# My Harris matrix -- Using cornerEigenValsAndVecs
myHarris_dst = cv.cornerEigenValsAndVecs(src_gray, blockSize, apertureSize)

# calculate Mc
Mc = np.empty(src_gray.shape, dtype=np.float32)
for i in range(src_gray.shape[0]):
    for j in range(src_gray.shape[1]):
        lambda_1 = myHarris_dst[i,j,0]
        lambda_2 = myHarris_dst[i,j,1]
        Mc[i,j] = lambda_1*lambda_2 - 0.04*pow( ( lambda_1 + lambda_2 ), 2 )

myHarris_minVal, myHarris_maxVal, _, _ = cv.minMaxLoc(Mc)

# Create Window and Trackbar
cv.namedWindow(myHarris_window)
cv.createTrackbar('Quality Level:', myHarris_window, myHarris_qualityLevel, max_qualityLevel, myHarris_function)
myHarris_function(myHarris_qualityLevel)
Exemple #43
0
#              todraw = cv2.drawContours(todraw, contours, 1, (0, 0, 255), -1)
#              cv2.imshow("pressed f", todraw)
#              cv2.moveWindow("pressed f", 20, 20)
        else :
            cv2.destroyWindow("mask")
            curr = np.ma.copy(img_f32c1)
#              CannyThreshold(mmnorm_to_u8(curr))


    elif keydown & 0xff == ord('g'):
        if g_count == 0:
            g_count = g_count + 1

            h = vecshape[1][0]
            w = vecshape[1][1]
            eigen = cv2.cornerEigenValsAndVecs(np.float32(curr), 15, 9)
            eigen = eigen.reshape(h, w, 3, 2)  #  [[e1, e2], v1, v2]

            eigenvalues = eigen[:, :, 0]
            flow1 = eigen[:, :, 1]
            flow2 = eigen[:, :, 2]

#              # regulate direction of the flow field
#              for i in xrange(len(flow2)):
#                  for j in xrange(len(flow2[0])):
#                      if (flow2[i][j][0] < 0 and flow2[i][j][1] < 0):
#                         flow2[i][j][0] = -flow2[i][j][0]
#                         flow2[i][j][1] = -flow2[i][j][1]
#                     elif (flow2[i][j][0] < 0 and flow2[i][j][1] > 0):
#                         flow2[i][j][0] = -flow2[i][j][0]
#                         flow2[i][j][1] = -flow2[i][j][1]