Exemplo n.º 1
0
Arquivo: main2.py Projeto: Daiver/jff
def takePixelInterpolated(img, pt):
    rows, cols = img.shape
    y, x = pt

    x0 = cv2.borderInterpolate(int((x)),   cols, cv2.BORDER_REFLECT_101);
    x1 = cv2.borderInterpolate(int((x+1)), cols, cv2.BORDER_REFLECT_101);
    y0 = cv2.borderInterpolate(int((y)),   rows, cv2.BORDER_REFLECT_101);
    y1 = cv2.borderInterpolate(int((y+1)), rows, cv2.BORDER_REFLECT_101);

    a = x - int(x)
    c = y - int(y)
    b = ((img[y0, x0] * (1.0 - a) + img[y0, x1] * a) * (1.0 - c) + (img[y1, x0] * (1.0 - a) + img[y1, x1] * a) * c)
    return b
Exemplo n.º 2
0
def DoG(scales, orient, size):
    # scale=range(1,scales+1)
    # print(scale)
    orients = np.linspace(0, 360, orient)
    # kernels=[[0 for x in range(1,scales)]for y in range(1,orient)]
    DoG_stack = list()
    for each_scale in scales:
        for each_size in size:
            kernel = gkern(each_size, each_scale)
            border = cv2.borderInterpolate(0, 1, cv2.BORDER_CONSTANT)
            sobelx64f = cv2.Sobel(kernel,
                                  cv2.CV_64F,
                                  1,
                                  0,
                                  ksize=5,
                                  borderType=border)
            for index, eachOrient in enumerate(orients):
                # plt.figure(figsize=(16,2))
                image = skimage.transform.rotate(sobelx64f, eachOrient)
                DoG_stack.append(image)

                # plt.subplots_adjust(hspace=0.1,wspace=1.5)
                # plt.subplot(scales,orient,index+1)
                # plt.imshow(image,cmap='binary')
                # plt.show()
        return DoG_stack
Exemplo n.º 3
0
def preprocessingAffine(img,M,scale,hasNoise,hasBlur,hasVflip,hasHflip,hasShaei):
    from keras.preprocessing import image

    img2 = cv2.resize(img, (RESIZE_SIZE, RESIZE_SIZE), interpolation=cv2.INTER_NEAREST)
    img2 = cv2.warpAffine(img2, M, (RESIZE_SIZE, RESIZE_SIZE),borderMode=cv2.borderInterpolate(2, 10, cv2.BORDER_REFLECT_101))

    if(hasBlur):
        img2=cv2.GaussianBlur(img2, (3, 3), 1.0)
    if(hasVflip):
        img2=cv2.flip(img2, 0)
    if(hasHflip):
        img2=cv2.flip(img2, 1)


    if(	len(hasShaei) != 0):
        base = .3 * RESIZE_SIZE
        pts1 = np.float32([[0,0], [RESIZE_SIZE, 0], [RESIZE_SIZE,RESIZE_SIZE], [0,RESIZE_SIZE]])
        pts2=np.float32([[-base,-base], [RESIZE_SIZE+base, -base], [RESIZE_SIZE+base,RESIZE_SIZE+base], [-base,RESIZE_SIZE+base]])
        for i in hasShaei:
            pts2[i]=pts1[i]
        M = cv2.getPerspectiveTransform(pts1, pts2)

        img2 = cv2.warpPerspective(img2, M, (RESIZE_SIZE, RESIZE_SIZE))


    img2 =cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
    img2 = Image.fromarray(img2)
    img2=image.img_to_array(img2)

    return img2
Exemplo n.º 4
0
def kernel2matrixA(kernel, height, width):
    N = height * width
    k_size = kernel.shape[0]
    c = math.floor(k_size / 2)

    A = np.zeros((N, N))
    for col in range(height):
        for row in range(width):
            tmp = np.zeros((height, width))
            for j in range(-c, c + 1):
                for i in range(-c, c + 1):
                    y = cv2.borderInterpolate(col + j, height,
                                              cv2.BORDER_REPLICATE)
                    x = cv2.borderInterpolate(row + i, width,
                                              cv2.BORDER_REPLICATE)
                    tmp[y, x] += kernel[j + c, i + c]
            A[col * width + row] = np.reshape(tmp, N)
    return A
Exemplo n.º 5
0
def detect_seatbelt_lines(frame, originalFrame):
    if frame is None or not frame.any():
        return []

    # should be calucalated from image darkness
    alpha = 1.3

    frame = cv2.convertScaleAbs(frame, alpha=alpha, beta=0)

    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    border = cv2.borderInterpolate(0, 1, cv2.BORDER_DEFAULT)
    sobel_x = cv2.Sobel(frame_gray,
                        cv2.CV_16S,
                        1,
                        0,
                        ksize=3,
                        scale=1,
                        delta=0,
                        borderType=border)
    sobel_y = cv2.Sobel(frame_gray,
                        cv2.CV_16S,
                        0,
                        1,
                        ksize=3,
                        scale=1,
                        delta=0,
                        borderType=border)

    abs_x = cv2.convertScaleAbs(sobel_x)
    abs_y = cv2.convertScaleAbs(sobel_y)

    weighted = cv2.addWeighted(abs_x, 0.5, abs_y, 0.5, 0)
    retval, thresholded = cv2.threshold(weighted, 100, 255, cv2.THRESH_BINARY)

    cv2.imshow('frame', frame)

    coef_y, coef_x = originalFrame.shape[:2]
    rho = 1  # distance resolution in pixels of the Hough grid
    theta = np.pi / 180  # angular resolution in radians of the Hough grid
    threshold = (
        int(coef_y / 55)
    )  # minimum number of votes (intersections in Hough grid cell)
    min_line_length = coef_y / 20  # minimum number of pixels making up a line
    max_line_gap = coef_y / 14  # maximum gap in pixels between connectable line segments

    # run Hough on edge detected image
    lines = cv2.HoughLinesP(thresholded, rho, theta, threshold, np.array([]),
                            min_line_length, max_line_gap)
    if lines is None:
        return []

    seatbelt_lines = filter_seatbelt_lines(lines)
    return seatbelt_lines
Exemplo n.º 6
0
    def get_img_region(self,
                   img: np.ndarray,
                   i: int,
                   j: int) -> np.ndarray:
        """

        :param img:
        :param i:
        :param j:
        :return:
        """
        region = np.zeros(shape=self.size, dtype=np.float32)
        i_offset = self.size[1] // 2
        j_offset = self.size[0] // 2
        for i_region, i_img in enumerate(range(i - i_offset, i + i_offset + 1)):
            for j_region, j_img in enumerate(range(j - j_offset, j + j_offset + 1)):
                # calculate the interpolated indexes given certain border type
                i_img = cv2.borderInterpolate(i_img, img.shape[0], self.border_type)
                j_img = cv2.borderInterpolate(j_img, img.shape[1], self.border_type)
                region[i_region, j_region] = img[i_img, j_img]
        return region
Exemplo n.º 7
0
def dogFilterBank(norient, scale, sz):
    #returns filter of size sz x sz x (norient x scale)

    F = np.zeros([sz, sz, norient * len(scale)])
    count = 0
    for s in scale:
        constant = cv2.borderInterpolate(0, 1, cv2.BORDER_CONSTANT)
        kernel = gauss2D(sz, s)
        dG = cv2.Sobel(kernel, cv2.CV_64F, 1, 0, ksize=3, borderType=constant)
        orient = np.linspace(0, 360, norient + 1)
        for j in range(len(orient) - 1):
            f = skimage.transform.rotate(dG, orient[j])
            F[:, :, count] = f
            count += 1
    return F
Exemplo n.º 8
0
def DoG(scales, orient, size):
    orients = np.linspace(0, 360, orient)
    kernels = []
    kernel = gkern(size, scales)
    border = cv2.borderInterpolate(0, 1, cv2.BORDER_CONSTANT)
    sobelx64f = cv2.Sobel(kernel, cv2.CV_64F, 1, 0, ksize=3, borderType=border)
    plt.subplots(3, 5, figsize=(20, 20))
    for i, eachOrient in enumerate(orients):
        image = skimage.transform.rotate(sobelx64f, eachOrient)
        plt.subplots_adjust(hspace=1.0, wspace=1.5)
        plt.subplot(3, 5, i + 1)
        plt.imshow(image, cmap='binary')
        kernels.append(image)
        image = 0
    plt.savefig('DoG.png')
    plt.close()
    return kernels
Exemplo n.º 9
0
def makeDOGFilters(scales, orient, size):
    kernels = []
    for scale in scales:
        orients = np.linspace(0, 360, orient)
        kernel = gauss2D(size, scale)
        border = cv2.borderInterpolate(0, 1, cv2.BORDER_CONSTANT)
        sobelx64f = cv2.Sobel(kernel,
                              cv2.CV_64F,
                              1,
                              0,
                              ksize=3,
                              borderType=border)
        for i, eachOrient in enumerate(orients):
            #plt.figure(figsize=(16,16))
            image = skimage.transform.rotate(sobelx64f, eachOrient)
            kernels.append(image)
            image = 0
    return kernels
Exemplo n.º 10
0
def main():

    Parser = argparse.ArgumentParser()
    Parser.add_argument('--indexPic',
                        dest='indexPic',
                        type=int,
                        default=1,
                        help='input for the index of the image')
    Parser.add_argument(
        '--imgPath',
        dest='imgPath',
        default=
        '/home/kartikmadhira/CMSC733/YourDirectoryID_hw0/Phase1/BSDS500/Images/',
        help='Path to load images from, Default:BasePath')
    Args = Parser.parse_args()
    indexPic = Args.indexPic
    imgPath = Args.imgPath
    imgPath = imgPath + str(indexPic) + '.jpg'
    """
	Generate Difference of Gaussian Filter Bank: (DoG)
	Display all the filters in this filter bank and save image as DoG.png,
	use command "cv2.imwrite(...)"
	"""
    dog1 = DoG(16, 15, 49)
    """
	Generate Leung-Malik Filter Bank: (LM)
	Display all the filters in this filter bank and save image as LM.png,
	use command "cv2.imwrite(...)"
	"""

    F = makeLMfilters()
    saveLMFilters(F)
    """
	Generate Gabor Filter Bank: (Gabor)
	Display all the filters in this filter bank and save image as Gabor.png,
	use command "cv2.imwrite(...)"
	"""

    angle1 = np.linspace(0, 360, 12)
    gaborKernels = []
    gabor1 = gabor_fn(9, 0.25, 1, 1, 1)
    for eachAngle in angle1:
        gab1 = skimage.transform.rotate(gabor1, eachAngle)
        gaborKernels.append(gab1)
    angle2 = np.linspace(0, 360, 12)
    gabor2 = gabor_fn(16, 0.25, 1, 1, 1)
    for eachAngle in angle2:
        gab2 = skimage.transform.rotate(gabor2, eachAngle)
        gaborKernels.append(gab2)
    angle3 = np.linspace(0, 360, 12)
    gabor3 = gabor_fn(16, 0.25, 1, 1, 1)
    for eachAngle in angle3:
        gab3 = skimage.transform.rotate(gabor3, eachAngle)
        gaborKernels.append(gab3)

    saveGaborFilters(gaborKernels)
    """
	Generate Half-disk masks
	Display all the Half-disk masks and save image as HDMasks.png,
	use command "cv2.imwrite(...)"
	"""
    a1, b1 = half_disk(25)
    orient = np.linspace(0, 360, 15)
    halfDiskBank1 = []
    for eachAngle in orient:
        rotatedMask = skimage.transform.rotate(b1, eachAngle)
        image1 = np.logical_or(a1, rotatedMask).astype(np.int)
        halfDiskBank1.append(image1)
        image1 = 0
    halfDiskBank2 = []
    for each in halfDiskBank1:
        image = np.flip(each).astype(np.int)
        halfDiskBank2.append(image)
        image = 0

    saveHalfDisks(halfDiskBank1, halfDiskBank2)
    """
	Generate Texton Map
	Filter image using oriented gaussian filter bank
	"""

    ss = cv2.imread(imgPath, 0)
    w, h = ss.shape
    ss2 = ss
    for i in range(48):
        border = cv2.borderInterpolate(0, 1, cv2.BORDER_CONSTANT)
        image = cv2.filter2D(ss, -1, F[:, :, i], borderType=border)
        ss2 = np.dstack((ss2, image))
        image = 0
    for i in range(15):
        border = cv2.borderInterpolate(0, 1, cv2.BORDER_CONSTANT)
        image = cv2.filter2D(ss, -1, dog1[i], borderType=border)
        ss2 = np.dstack((ss2, image))
        image = 0
    for i in range(12):
        border = cv2.borderInterpolate(0, 1, cv2.BORDER_CONSTANT)
        image = cv2.filter2D(ss, -1, gaborKernels[i], borderType=border)
        ss2 = np.dstack((ss2, image))
        image = 0

    _, _, d = ss2.shape
    ss3 = ss2[:, :, 1:d]
    ss4 = np.reshape(ss3, ((w * h), d - 1))
    ss4.shape
    """
	Generate texture ID's using K-means clustering
	Display texton map and save image as TextonMap_ImageName.png,
	use command "cv2.imwrite('...)"
	"""
    kmeans = KMeans(n_clusters=64, random_state=2)
    kmeans.fit(ss4)
    labels = kmeans.predict(ss4)
    textonMap = np.reshape(labels, (w, h))
    plt.imshow(textonMap)
    plt.savefig('TextonMap_' + str(indexPic) + '.png')
    plt.close()
    print('saved DOG and LM')
    """
	Generate Texton Gradient (Tg)
	Perform Chi-square calculation on Texton Map
	Display Tg and save image as Tg_ImageName.png,
	use command "cv2.imwrite(...)"
	"""

    Tg = chiSquareDist(textonMap, halfDiskBank1, halfDiskBank2, 64)
    Tg = Tg[:, :, 1:16]
    plt.imshow(np.mean(Tg, axis=2))
    plt.savefig('Tg_' + str(indexPic) + '.png')
    plt.close()
    print('saved DOG and LM')
    """
	Generate Brightness Map
	Perform brightness binning 
    """

    newSSshape = np.reshape(ss, ((w * h), 1))
    kmeansBrightness = KMeans(n_clusters=16, random_state=2)
    kmeansBrightness.fit(newSSshape)

    labelsBrightnesss = kmeansBrightness.predict(newSSshape)
    brightMap = np.reshape(labelsBrightnesss, ((w, h)))
    plt.imshow(brightMap)
    plt.savefig('BrightMap_' + str(indexPic) + '.png')
    plt.close()
    """
	Generate Brightness Gradient (Bg)
	Perform Chi-square calculation on Brightness Map
	Display Bg and save image as Bg_ImageName.png,
	use command "cv2.imwrite(...)"
	"""
    Bg = chiSquareDist(brightMap, halfDiskBank1, halfDiskBank2, 16)
    Bg = Bg[:, :, 1:16]
    plt.imshow(np.mean(Bg, axis=2))
    plt.savefig('Bg_' + str(indexPic) + '.png')
    plt.close()
    print('saved DOG and LM')

    #
    #
    #	"""
    #	Generate Color Map
    #	Perform color binning or clustering
    #	"""
    ssColor = cv2.imread(imgPath)
    ssColor = np.reshape(ssColor, ((w * h), 3))
    kmeansColor = KMeans(n_clusters=16, random_state=2)
    kmeansColor.fit(ssColor)
    colorMap = kmeansColor.predict(ssColor)
    colorMap = np.reshape(colorMap, (w, h))
    plt.imshow(colorMap)
    plt.savefig('ColorMap_' + str(indexPic) + '.png')
    plt.close()
    """
	Generate Color Gradient (Cg)
	Perform Chi-square calculation on Color Map
	Display Cg and save image as Cg_ImageName.png,
	use command "cv2.imwrite(...)"
	"""
    Cg = chiSquareDist(colorMap, halfDiskBank1, halfDiskBank2, 16)
    Cg = Cg[:, :, 1:16]
    plt.imshow(np.mean(Cg, axis=2))
    plt.savefig('Cg_' + str(indexPic) + '.png')
    plt.close()
    """
	Read Sobel Baseline
	use command "cv2.imread(...)"
	"""
    sobelImagePath = '/home/kartikmadhira/CMSC733/YourDirectoryID_hw0/Phase1/BSDS500/SobelBaseline/' + str(
        indexPic) + '.png'
    sobelBaseline = cv2.imread(sobelImagePath, 0)
    """
	Read Canny Baseline
	use command "cv2.imread(...)"
	"""
    cannyImagePath = '/home/kartikmadhira/CMSC733/YourDirectoryID_hw0/Phase1/BSDS500/CannyBaseline/' + str(
        indexPic) + '.png'
    cannyBaseline = cv2.imread(cannyImagePath, 0)

    tmp1 = (Tg + Bg + Cg) / 3
    average = np.mean(tmp1, axis=2)
    """
	Combine responses to get pb-lite output
	Display PbLite and save image as PbLite_ImageName.png
	use command "cv2.imwrite(...)"
	"""
    final = np.multiply(average, (0.5 * cannyBaseline + 0.5 * sobelBaseline))
    plt.imshow(final, cmap='binary')
    cv2.imwrite('PbLite_' + str(indexPic) + '.png', final)
    print('saved DOG and LM')
    plt.close()
Exemplo n.º 11
0
cv2.BORDER_CONSTANT : 复制指定的常亮扩展边界
cv2.BORDER_WRAP     : 复制对边的像素扩展边界
cv2.BORDER_REPLICATE: 复制边缘的像素扩展边界
cv2.BORDER_REFLECT  : 通过镜像复制扩展边界
cv2.BORDER_REFLECT_101 :通过镜像复制扩展边界,边界像素除外
cv2.BORDER_DEFAULT : BORDER_REFLECT_101 的别名
'''
border = cv2.copyMakeBorder(img, 10, 10, 10, 10, cv2.BORDER_DEFAULT)
"""
自定义外推

borderInterpolate(p, len, borderType) -> retval
"""

border = cv2.borderInterpolate(100, img.rows, cv2.BORDER_REFLECT_101)
border = cv2.borderInterpolate(-5, img.cols, cv2.BORDER_WRAP)
'''
阈值化操作

threshold(src, thresh, maxval, type[, dst]) -> retval, dst


'''
ret, threshold = cv2.threshold(img, 20, 255, cv2.THRESH_BINARY)

# 使用 Otsu算法 自动决定最优的阈值
ret, otsu = cv2.threshold(img, 20, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
"""

adaptiveThreshold(src, maxValue, adaptiveMethod, thresholdType, blockSize, C[, dst]) -> dst
Exemplo n.º 12
0
def main():
    data_path = '/home/pratique/Downloads/cmsc733/Homework0/116353601_hw0/Phase1/BSDS500/Images/'
    img = cv2.imread(
        '/home/pratique/Downloads/cmsc733/Homework0/116353601_hw0/Phase1/BSDS500/Images/1.jpg'
    )
    img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    N_dim_tensor_dog = texton_tensor(img_gray)
    p, q, r = np.shape(N_dim_tensor_dog)
    inp = np.reshape(N_dim_tensor_dog, ((p * q), r))
    kmeans = sklearn.cluster.KMeans(n_clusters=64, random_state=2)
    kmeans.fit(inp)
    labels = kmeans.predict(inp)
    l = np.reshape(labels, (p, q))
    plt.imshow(l)
    plt.show()

    # brightness kmean
    p, q = np.shape(img_gray)
    inp = np.reshape(img_gray, ((p * q), 1))
    kmeans = sklearn.cluster.KMeans(n_clusters=16, random_state=2)
    kmeans.fit(inp)
    labels = kmeans.predict(inp)
    l = np.reshape(labels, (p, q))
    plt.imshow(l, cmap='binary')
    plt.show()

    # img_lab = cv2.cvtColor(img, CV_BGR2Lab)
    img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    p, q, r = np.shape(img_hsv)
    inp = np.reshape(img_hsv, ((p * q), r))
    kmeans = sklearn.cluster.KMeans(n_clusters=16, random_state=2)
    kmeans.fit(inp)
    labels = kmeans.predict(inp)
    l = np.reshape(labels, (p, q))
    plt.imshow(l)
    plt.show()

    kernel = gkern(21, 7)
    border = cv2.borderInterpolate(0, 1, cv2.BORDER_CONSTANT)
    sobelx64f = cv2.Sobel(kernel, cv2.CV_64F, 1, 0, ksize=5, borderType=border)
    sobely64f = cv2.Sobel(kernel, cv2.CV_64F, 0, 1, ksize=5, borderType=border)
    # theta = np.radians(45)
    # c, s = np.cos(theta), np.sin(theta)
    # R = np.array(((c,-s), (s, c)))
    final = skimage.transform.rotate(sobelx64f, 90)
    plt.imshow(final, cmap='binary')
    plt.show()
    # DoG(4,30,6)

    #LM

    F = makeLMfilters()
    print F.shape

    for i in range(0, 18):
        plt.subplot(3, 6, i + 1)
        plt.axis('off')
        plt.imshow(F[:, :, i], cmap='gray')
        plt.show()

    for i in range(0, 18):
        plt.subplot(3, 6, i + 1)
        plt.axis('off')
        plt.imshow(F[:, :, i + 18], cmap='gray')
    plt.show()

    for i in range(0, 12):
        plt.subplot(4, 4, i + 1)
        plt.axis('off')
        plt.imshow(F[:, :, i + 36], cmap='gray')
        plt.show()
Exemplo n.º 13
0
def make_counter(IM, n):
    cv2.borderInterpolate(p, len, borderType)
Exemplo n.º 14
0
 def locality(i, j):
     return ((cv2.borderInterpolate(i + a, w, cv2.BORDER_WRAP),
              cv2.borderInterpolate(j + b, h, cv2.BORDER_WRAP))
             for a, b in locality_pattern(n))