Пример #1
0
def method8():
    while c.isOpened():
        rd, image = c.read()

        if rd:
            # ---------> 前處理 <----------
            m1 = image[:, :, 0]  # 取 藍色通道
            m2 = image[:, :, 2]  # 取 紅色通道
            m3 = cv2.subtract(m1, m2)  # 紅色通道 - 藍色通道
            m3 = cv2.subtract(m3, m2)  # 紅色通道 - 藍色通道

            # ---------> 最小方框點 <----------
            x, y, w, h = cv2.boundingRect(m3)

            # ---------> 畫方框在原始圖片上 <----------
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 255), 3)

            cv2.imshow("image", image)
            cv2.imshow("m1", m1)
            cv2.imshow("m2", m2)
            cv2.imshow("m3", m3)

        else:
            break

        if cv2.waitKey(10) != -1:
            break
Пример #2
0
def gradient2_demo(image):
    kernel = cv.getStructuringElement(cv.MORPH_RECT,(3,3))
    dm = cv.dilate(image,kernel)
    ed = cv.erode(image,kernel)
    dst1 = cv.subtract(dm,ed)
    dst2 = cv.subtract(ed,dm)
    cv.imshow("dst1",dst1)
    cv.imshow("dst2",dst2)
Пример #3
0
def isDeleted(image_cv):
    deleted = cv2.imread('assets/deleted_img/image404.png')
    deleted_nb = cv2.imread('assets/deleted_img/image404_nb.png')
    try:
        diff = cv2.subtract(image_cv, deleted)
    except:
        diff = True
    try:
        diff_nb = cv2.subtract(image_cv, deleted_nb)
    except:
        diff_nb = True
    return (np.all(diff == 0) | np.all(diff_nb == 0))
Пример #4
0
def isDeleted(image_cv: np.ndarray) -> bool:
    deleted = cv2.imread("assets/deleted_img/image404.png")
    deleted_nb = cv2.imread("assets/deleted_img/image404_nb.png")
    try:
        diff = cv2.subtract(image_cv, deleted)
    except Exception:
        diff = True
    try:
        diff_nb = cv2.subtract(image_cv, deleted_nb)
    except Exception:
        diff_nb = True
    return cast(bool, np.all(diff == 0) | np.all(diff_nb == 0))
Пример #5
0
def laplace_pyramid_demo(image):  #必须宽高相等
    pyramid_image = gauss_pyramid_demo(image)
    level = len(pyramid_image)
    for i in range(level - 1, -1, -1):
        if (i - 1) < 0:
            expand = cv.pyrUp(pyramid_image[i], dstsize=image.shape[:2])
            lpls = cv.subtract(image, expand)
            cv.imshow("laplace" + str(i), lpls)
        else:
            expand = cv.pyrUp(pyramid_image[i],
                              dstsize=pyramid_image[i - 1].shape[:2])
            lpls = cv.subtract(pyramid_image[i - 1], expand)
            cv.imshow("laplace" + str(i), lpls)
Пример #6
0
def check_isDeleted(output_filename):
    deleted = cv2.imread('assets/deleted_img/image404.png')
    deleted_nb = cv2.imread('assets/deleted_img/image404_nb.png')
    image = cv2.imread(output_filename)

    try:
        diff = cv2.subtract(image, deleted)
    except:
        diff = True
    try:
        diff_nb = cv2.subtract(image, deleted_nb)
    except:
        diff_nb = True

    return (np.all(diff == 0) | np.all(diff_nb == 0))
Пример #7
0
def lapalian_demo(image):
    image = cv.resize(image, (512, 512))  # 金字塔的源图像宽高必须是2^N
    print(image.shape)
    pyramid_images = pyramid_demo(image)  #做拉普拉斯金字塔必须用到高斯金字塔的结果
    level = len(pyramid_images)
    for i in range(level - 1, -1, -1):
        if (i - 1) < 0:
            expand = cv.pyrUp(pyramid_images[i], dstsize=image.shape[:2])
            lpls = cv.subtract(image, expand)
            cv.imshow("lapalian_down_" + str(i), lpls)
        else:
            expand = cv.pyrUp(pyramid_images[i],
                              dstsize=pyramid_images[i - 1].shape[:2])
            lpls = cv.subtract(pyramid_images[i - 1], expand)
            cv.imshow("lapalian_down_" + str(i), lpls)
Пример #8
0
def watershed_demo(image):
    blurred = cv.pyrMeanShiftFiltering(image, 10, 100)
    gray = cv.cvtColor(blurred, cv.COLOR_BGR2GRAY)
    ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
    cv.imshow("binary", binary)

    kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))
    mb = cv.morphologyEx(binary, cv.MORPH_OPEN, kernel, iterations=2)
    sure_bg = cv.dilate(binary, kernel, iterations=3)
    cv.imshow("mor", sure_bg)

    dist = cv.distanceTransform(mb, cv.DIST_L2, 3)
    dist_output = cv.normalize(dist, 0, 1.0, cv.NORM_MINMAX)
    cv.imshow("dist", dist_output * 50)

    ret, surface = cv.threshold(dist, dist.max() * 0.6, 255, cv.THRESH_BINARY)
    cv.imshow("interface", surface)

    surface_fg = np.uint8(surface)
    unknow = cv.subtract(sure_bg, surface_fg)
    ret, markers = cv.connectedComponents(surface_fg)
    print(ret)

    markers += 1
    markers[unknow == 255] = 0
    markers = cv.watershed(image, markers=markers)
    image[markers == -1] = [0, 0, 255]
    cv.imshow("result", image)
Пример #9
0
def watershed(image, image_color):
    print('watershed', image.shape)
    cv2.imshow('Image', image)
    gradiente = segmentar_iterativo(image)
    cv2.imshow('Gradiente', gradiente)
    gradiente_inverso = image_not(gradiente)
    cv2.imshow('Gradiente inverso', gradiente_inverso)

    kernel = np.ones((3, 3), np.uint8)
    thresh = gradiente_inverso
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3)

    gradiente_erode = cv2.erode(opening, kernel, iterations=13)
    cv2.imshow('Gradiente erode', gradiente_erode)

    gradiente_erode = np.uint8(gradiente_erode)
    unknown = cv2.subtract(gradiente_inverso, gradiente_erode)
    cv2.imshow('gradiente_inverso - gradiente_erode', unknown)

    ret, markers = cv2.connectedComponents(gradiente_erode)
    # gradiente_inverso é a imagem dos limites da barragem
    markers = markers + 1
    markers[unknown == 255] = 0
    # markers[markers >= 1] = 250
    cv2.imshow('markers', markers)

    markers = cv2.watershed(image_color, markers)
    image_color[markers == -1] = [255, 255, 0]
    cv2.imshow('Resultado - Watershed', image_color)

    cv2.waitKey(0)
Пример #10
0
def method7():
    while c.isOpened():
        rd, image = c.read()

        if rd:
            # ---------> 前處理 <----------
            m1 = image[:, :, 0]  # 取 藍色通道
            m2 = image[:, :, 2]  # 取 紅色通道
            m3 = cv2.subtract(m1, m2)  # 紅色通道 - 藍色通道

            # ---------> 二值化 <----------
            m3 = cv2.adaptiveThreshold(m3, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                       cv2.THRESH_BINARY, 11, 9)

            # ---------> Canny 運算 <----------
            m4 = cv2.Canny(m3, 100, 30)

            # ---------> 最小方框點 <----------
            x, y, w, h = cv2.boundingRect(m4)

            # ---------> 畫方框在原始圖片上 <----------
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 255), 3)

            cv2.imshow("image", image)
            cv2.imshow("m1", m1)
            cv2.imshow("m2", m2)
            cv2.imshow("m3", m3)
            cv2.imshow("m4", m4)

        else:
            break

        if cv2.waitKey(10) != -1:
            break
Пример #11
0
def method6():
    while c.isOpened():
        rd, image = c.read()

        if rd:
            # ---------> 前處理 <----------
            m1 = image[:, :, 0]  # 取 藍色通道
            m2 = image[:, :, 2]  # 取 紅色通道
            m3 = cv2.subtract(m1, m2)  # 紅色通道 - 藍色通道

            # ---------> 二值化 <----------
            th, m3 = cv2.threshold(m3, 50, 255, cv2.THRESH_BINARY)
            m4 = cv2.bitwise_not(m3)

            m5 = cv2.Canny(m4, 100, 30)

            # ---------> 最小方框點 <----------
            x, y, w, h = cv2.boundingRect(m5)

            # ---------> 畫方框在原始圖片上 <----------
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 255), 3)

            cv2.imshow("image", image)
            cv2.imshow("m1", m1)
            cv2.imshow("m2", m2)
            cv2.imshow("m3", m3)
            cv2.imshow("m4", m4)
            cv2.imshow("m5", m5)
        else:
            break

        if cv2.waitKey(10) != -1:
            break
Пример #12
0
    def describe(self, image):
        # convert the image to the HSV color space and initialize
        # the features used to quantify the image
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        features = []
        # grab the dimensions and compute the center of the image
        (h, w) = image.shape[:2]
        (cX, cY) = (int(w * 0.5), int(h * 0.5))

        # divide the image into four rectangles/segments (top-left,
        # top-right, bottom-right, bottom-left)
        segments = [(0, cX, 0, cY), (cX, w, 0, cY), (cX, w, cY, h),
                    (0, cX, cY, h)]
        # construct an elliptical mask representing the center of the
        # image
        (axesX, axesY) = (int(w * 0.75) // 2, int(h * 0.75) // 2)
        ellipMask = np.zeros(image.shape[:2], dtype="uint8")
        cv2.ellipse(ellipMask, (cX, cY), (axesX, axesY), 0, 0, 360, 255, -1)
        # loop over the segments
        for (startX, endX, startY, endY) in segments:
            # construct a mask for each corner of the image, subtracting
            # the elliptical center from it
            cornerMask = np.zeros(image.shape[:2], dtype="uint8")
            cv2.rectangle(cornerMask, (startX, startY), (endX, endY), 255, -1)
            cornerMask = cv2.subtract(cornerMask, ellipMask)
            # extract a color histogram from the image, then update the
            # feature vector
            hist = self.histogram(image, cornerMask)
            features.extend(hist)
        # extract a color histogram from the elliptical region and
        # update the feature vector
        hist = self.histogram(image, ellipMask)
        features.extend(hist)
        # return the feature vector
        return features
Пример #13
0
def s2(img):  #segmentacionWarershed
    img = img
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ret, thresh = cv2.threshold(gray, 0, 255,
                                cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
    # Eliminación del ruido
    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
    # Encuentra el área del fondo
    sure_bg = cv2.dilate(opening, kernel, iterations=3)
    # Encuentra el área del primer
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(),
                                 255, 0)
    # Encuentra la región desconocida (bordes)
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)
    # Etiquetado
    ret, markers = cv2.connectedComponents(sure_fg)
    # Adiciona 1 a todas las etiquetas para asegurra que el fondo sea 1 en lugar de cero
    markers = markers + 1
    # Ahora se marca la región desconocida con ceros
    markers[unknown == 255] = 0
    markers = cv2.watershed(img, markers)
    img[markers == -1] = [255, 0, 0]
    return img
Пример #14
0
def thresholding(img_gray):
    _, img_th = cv2.threshold(img_gray,np.average(img_gray)-32,255,cv2.THRESH_BINARY)
    img_th2 = cv2.adaptiveThreshold(img_gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,21,7)
    img_th3 = np.bitwise_and(img_th, img_th2)
    img_th4 = cv2.subtract(img_th2, img_th3)
    for i in range(5):
        img_th4 = cv2.medianBlur(img_th4, 5)
    return img_th4
Пример #15
0
def ssr_c(img, size):
    img_G = replaceZeroes(cv2.GaussianBlur(img, (size, size), 0))
    img = replaceZeroes(img)
    # img_G = cv2.GaussianBlur(img, (size, size), 0)
    log_S = cv2.log(img / 255.0)
    g_L = cv2.log(img_G / 255.0)
    log_L = cv2.multiply(log_S, g_L)
    log_R = cv2.subtract(log_S, log_L)
    dst_R = cv2.normalize(log_R, None, 0, 255, cv2.NORM_MINMAX)
    R_c = cv2.convertScaleAbs(dst_R)
    return R_c
def subtract_images_white(image_path_1, image_path_2, write_path):
    image1 = cv2.imread(image_path_1)
    image2 = cv2.imread(image_path_2)
    difference = cv2.subtract(image2, image1)
    Conv_hsv_Gray = cv2.cvtColor(difference, cv2.COLOR_BGR2GRAY)
    ret, mask = cv2.threshold(Conv_hsv_Gray, 0, 255,
                              cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
    difference[mask != 255] = [0, 0, 255]
    image1[mask != 255] = [0, 0, 255]
    image2[mask != 255] = [0, 0, 255]
    # cv2.imshow('imgw',image1)
    cv2.imwrite(write_path, image1)
Пример #17
0
def maximizeContrast(imgGrayscale):
    height, width = imgGrayscale.shape
    imgTopHat = np.zeros((height, width, 1), np.uint8)
    imgBlackHat = np.zeros((height, width, 1), np.uint8)
    structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    imgTopHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_TOPHAT,
                                 structuringElement)
    imgBlackHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_BLACKHAT,
                                   structuringElement)
    imgGrayscalePlusTopHat = cv2.add(imgGrayscale, imgTopHat)
    imgGrayscalePlusTopHatMinusBlackHat = cv2.subtract(imgGrayscalePlusTopHat,
                                                       imgBlackHat)
    return imgGrayscalePlusTopHatMinusBlackHat
Пример #18
0
def upload():
    # file=request.files['temp']
    f = request.files['temp']
    tempname = request.form['tempname']
    temp_path = '../templates/'
    name = f.filename.replace(' ', '_')
    # print(tempname)
    f.save(secure_filename(f.filename))

    inputImage = cv2.imread(name)
    inputImageGray = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(inputImageGray, 150, 200, apertureSize=3)

    # print(edges)
    edges = abs(cv2.subtract(255, edges))

    minLineLength = 30
    maxLineGap = 5
    lines = cv2.HoughLinesP(edges, cv2.HOUGH_PROBABILISTIC, np.pi / 180, 30,
                            minLineLength, maxLineGap)
    for x in range(0, len(lines)):
        for x1, y1, x2, y2 in lines[x]:
            pts = np.array([[x1, y1], [x2, y2]], np.int32)
            cv2.polylines(inputImage, [pts], True, (0, 255, 0))

    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(inputImage, "Tracks Detected", (500, 250), font, 0.5, 255)

    os.remove(name)

    filename = tempname + '.png'

    #Following converts white pixels to transparent
    imagePIL = Image.fromarray(edges)
    imagePIL = imagePIL.convert("RGBA")
    datas = imagePIL.getdata()

    newData = []
    for item in datas:
        if item[0] == 255 and item[1] == 255 and item[2] == 255:
            newData.append((255, 255, 255, 0))
        else:
            if item[0] > 150:
                newData.append((0, 0, 0, 255))
            else:
                newData.append(item)

    imagePIL.putdata(newData)
    imagePIL.save(temp_path + filename, "PNG")

    return send_file(temp_path + filename, mimetype='image/png')
Пример #19
0
def img_calc(img1, img2, method):
    if method == "add":
        return cv.add(img1, img2)
    elif method == "sub":
        return cv.subtract(img1, img2)
    elif method == "multi":
        return cv.multiply(img1, img2)
    elif method == "divide":
        return cv.divide(img1, img2)
    elif method == "and":
        return cv.bitwise_and(img1, img2)
    elif method == "or":
        return cv.bitwise_or(img1, img2)
    elif method == "not":
        return cv.bitwise_not(img1, img2)
    else:
        return False
Пример #20
0
    def watershed(self, _img=None):
        # # 灰度和二值转换
        _img = self.img if _img is None else _img
        _gray = cv2.cvtColor(_img, cv2.COLOR_BGR2GRAY)
        _, _binary = cv2.threshold(_gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
        # # 形态学操作
        # # # 形态学操作卷积核
        _kernel = np.ones((3, 3), np.uint8)
        # # # 开运算去噪(去掉椒盐噪声的影响)
        _opening = cv2.morphologyEx(_binary, cv2.MORPH_OPEN, _kernel, iterations=2)
        # # # 如果能画出背景和前景, 分割算法会很好
        # # # 考虑到数据量的原因, 使用程序 机械的找出
        # # # 找出一定是背景的部分 膨胀操作: 扩大图形区的面积
        _sure_bg = cv2.dilate(_opening, _kernel, iterations=3)
        # cv_show(_sure_bg)
        # # 距离变换函数: 对原始图像进行计算 之后二值处理, 获取前景
        # # 该函数的第一个参数只能是单通道的二值的图像, 第二个参数是距离方法
        # # 计算图像上255点与最近的0点之间的距离 DIST_L2应是欧氏距离, 会输出小数
        # # DIST_L1应是哈密顿距离, 不会有小数
        _dist_transform = cv2.distanceTransform(_opening, cv2.DIST_L1, 5)
        # cv_show(_dist_transform)
        # # 距离变换之后做一二值变换, 得到大概率是图像前景的点
        _, _sure_fg = cv2.threshold(_dist_transform, 0.5 * _dist_transform.max(), 255, cv2.THRESH_BINARY)
        # # 转换类型, 否则会很危险
        _sure_fg = np.uint8(_sure_fg)
        # cv_show(_sure_fg)
        # # 绘制unknown区 交给算法, 自下而上的洪泛算法
        _unknown = cv2.subtract(_sure_bg, _sure_fg)
        # cv_show(_unknown)
        _, _markers = cv2.connectedComponents(_sure_fg)
        _markers = _markers + 1
        _markers[_unknown == 255] = 0
        _img1 = _img.copy()
        _markers = cv2.watershed(_img1, _markers)

        # # 圈出来 之后可以根据结果将一部分的值变为黑色
        def random_color(a: int):
            return np.random.randint(0, 255, (a, 3))

        _markers_label = np.unique(_markers)
        _colors = random_color(_markers_label.size)
        for _mark, _color in zip(_markers_label, _colors):
            _img1[_markers == _mark] = _color
        # # 展示
        cv_show(_img1)
Пример #21
0
def skeletize(img):
    size = np.size(img)
    skel = np.zeros(img.shape, np.uint8)
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    done = False

    while not done:
        eroded = cv2.erode(img, element)
        temp = cv2.dilate(eroded, element)
        temp = cv2.subtract(img, temp)
        skel = cv2.bitwise_or(skel, temp)
        img = eroded.copy()

        zeroes = size - cv2.countNonZero(img)
        if zeroes == size:
            done = True

    return skel
Пример #22
0
def watershed(image, image_color):
    gradiente = segmentar_iterativo(image)
    gradiente_inverso = image_not(gradiente)

    kernel = np.ones((3, 3), np.uint8)
    thresh = gradiente_inverso
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3)

    gradiente_erode = cv2.erode(opening, kernel, iterations=13)

    gradiente_erode = np.uint8(gradiente_erode)
    unknown = cv2.subtract(gradiente_inverso, gradiente_erode)

    ret, markers = cv2.connectedComponents(gradiente_erode)

    markers = markers + 1
    markers[unknown == 255] = 0

    markers = cv2.watershed(image_color, markers)
    image_color[markers == -1] = [255, 255, 0]
    return image_color
Пример #23
0
def cleanImage(image, stage=0):
    V = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    # applying topHat/blackHat operations
    topHat = cv2.morphologyEx(V, cv2.MORPH_TOPHAT, kernel)
    blackHat = cv2.morphologyEx(V, cv2.MORPH_BLACKHAT, kernel)
    # add and subtract between morphological operations
    add = cv2.add(V, topHat)
    subtract = cv2.subtract(add, blackHat)
    if (stage == 1):
        return subtract
    T = threshold_local(subtract,
                        29,
                        offset=35,
                        method="gaussian",
                        mode="mirror")
    thresh = (subtract > T).astype("uint8") * 255
    if (stage == 2):
        return thresh
    # invert image
    thresh = cv2.bitwise_not(thresh)
    return thresh
Пример #24
0
def overlay(bw1, bw2, L1=15, S1=13, L2=15, S2=13, showfft=False, fname=None):
    # Overlay takes in two black and white images

    gauss1 = np.outer(cv2.getGaussianKernel(L1,S1), cv2.getGaussianKernel(L1,S1))
    gauss1 /=  np.sum(gauss1)
    gauss2 = np.outer(cv2.getGaussianKernel(L2,S2), cv2.getGaussianKernel(L2,S2))
    gauss2 /=  np.sum(gauss2)
    lpf_kernel = gauss1
    impulse = np.zeros(gauss2.shape)
    impulse[impulse.shape[0] // 2, impulse.shape[1] // 2] = 1
    hpf_kernel = cv2.subtract(impulse, gauss2)

    im1_filt = convolve2d(bw1, lpf_kernel, mode="same")
    im2_filt = convolve2d(bw2, hpf_kernel, mode="same")

    if showfft and fname:
        postlpf = np.abs(np.fft.fftshift(np.fft.fft2(im1_filt)))
        posthpf = np.log(np.abs(np.fft.fftshift(np.fft.fft2(im2_filt))))
        scipy.misc.imsave("output/" + fname + "hpffft.jpg", posthpf)
        scipy.misc.imsave("output/" + fname + "lpffft.jpg", postlpf)

    final = (im1_filt + im2_filt) / 2
    return final
Пример #25
0
def convolve_3d(im, kernel_type="gaussian", L=15, S=13):
    kernel = None
    gauss = np.outer(cv2.getGaussianKernel(L,S), cv2.getGaussianKernel(L,S))
    if kernel_type == "gaussian":
        kernel = gauss / np.sum(gauss)
    else:
        kernel = gauss / np.sum(gauss)
        impulse = np.zeros(gauss.shape)
        impulse[impulse.shape[0] // 2, impulse.shape[1] // 2] = 1
        kernel = cv2.subtract(impulse, kernel)


    final_color = []
    for i in range(3):
        curr = convolve2d(im[:,:,i], kernel, mode="same")
        final_color.append(curr)

    fin_shape = (final_color[0].shape[0], final_color[0].shape[1], 3)
    final = np.zeros(fin_shape)
    final[..., 0] = final_color[0]
    final[..., 1] = final_color[1]
    final[..., 2] = final_color[2]
    final = np.clip(final, 0, 1)
    return final
Пример #26
0
def upload():
    # file=request.files['temp']
    f = request.files['temp']
    tempname = request.form['tempname']
    temp_path = '../templates/'
    name = f.filename.replace(' ', '_')
    print(tempname)
    f.save(secure_filename(f.filename))

    inputImage = cv2.imread(name)
    inputImageGray = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)

    edges = cv2.Canny(inputImageGray, 150, 200, apertureSize=3)

    print(edges)
    edges = abs(cv2.subtract(255, edges))

    minLineLength = 30
    maxLineGap = 5
    lines = cv2.HoughLinesP(edges, cv2.HOUGH_PROBABILISTIC, np.pi / 180, 30,
                            minLineLength, maxLineGap)
    for x in range(0, len(lines)):
        for x1, y1, x2, y2 in lines[x]:
            pts = np.array([[x1, y1], [x2, y2]], np.int32)
            cv2.polylines(inputImage, [pts], True, (0, 255, 0))

    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(inputImage, "Tracks Detected", (500, 250), font, 0.5, 255)

    cv2.imwrite(temp_path + tempname + '.jpeg', edges)
    cv2.waitKey(0)

    os.remove(name)

    filename = tempname + '.jpeg'
    return send_file(temp_path + filename, mimetype='image/jpeg')
Пример #27
0
def watershedAlgorithm(image):

    img = cv.imread(image)
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU)

    # noise removal
    kernel = np.ones((5, 5), np.uint8)
    opening = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations=2)

    # sure background area
    sure_bg = cv.dilate(opening, kernel, iterations=3)

    # Finding sure foreground area
    dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5)
    ret, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv.subtract(sure_bg, sure_fg)

    # Marker labelling
    ret, markers = cv.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers = markers + 1

    # Now, mark the region of unknown with zero
    markers[unknown == 255] = 0

    markers = cv.watershed(img, markers)
    img[markers == -1] = [255, 0, 0]



    return img
Пример #28
0
def watershed(img, img_gray):
#     mean = np.average(img_gray)
#     _, thresh1 = cv2.threshold(img_gray,mean,255,cv2.THRESH_BINARY_INV)
#     _, thresh2 = cv2.threshold(img_gray,200,255,cv2.THRESH_BINARY)
#     thresh = np.bitwise_or(thresh1, thresh2)
    _, thresh = cv2.threshold(img_gray,np.average(img_gray)-40,255,cv2.THRESH_BINARY_INV)

    kernel = np.ones((3,3),np.uint8)
    opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel,iterations=2)

    sure_bg = cv2.dilate(opening,kernel,iterations=2)

    dist_transform = cv2.distanceTransform(sure_bg,cv2.DIST_L2,5)
    _, sure_fg = cv2.threshold(dist_transform,0.5*dist_transform.max(),255,0)
    sure_fg = np.uint8(sure_fg)

    unknown = cv2.subtract(sure_fg, sure_bg)

    ret, markers = cv2.connectedComponents(unknown)
    markers = markers + 1
    markers[unknown == 255] = 0

    markers = cv2.watershed(img,markers)
    return dist_transform
Пример #29
0
    ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    cv2.imshow('Image1', thresh)

    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
    sure_bg = cv2.dilate(opening, kernel, iterations=3)
    cv2.imshow('Image2 - sure_bg', sure_bg)
    # cv2.imshow('Image2 - opening', opening)

    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    ret, sure_fg = cv2.threshold(dist_transform, 0.7*dist_transform.max(), 255, 0)
    cv2.imshow('Image3 - sure_fg', sure_fg)
    cv2.imshow('Image3 - dist_transform', dist_transform)

    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)
    cv2.imshow('Image4', unknown)

    ret, markers = cv2.connectedComponents(sure_fg)

    # markers = markers+1

    markers[unknown == 255] = 0
    markers[markers >= 1] = 255
    cv2.imshow('Image5', markers)
    # if len(markers.shape) == 2:
    #     a = 0
    #     for x in range(markers.shape[0]):
    #         for y in range(markers.shape[1]):
    #             if markers[x, y] > 0:
    #                 a += 1
# So we need to extract the area which we are sure they are coins. Erosion removes the boundary pixels. So whatever remaining, we can be sure it is coin. That would work if objects were not touching each other. But since they are touching each other, another good option would be to find the distance transform and apply a proper threshold. Next we need to find the area which we are sure they are not coins. For that, we dilate the result. Dilation increases object boundary to background. This way, we can make sure whatever region in background in result is really a background, since boundary region is removed.

# The remaining regions are those which we don’t have any idea, whether it is coins or background. Watershed algorithm should find it. These areas are normally around the boundaries of coins where foreground and background meet (Or even two different coins meet). We call it border. It can be obtained from subtracting sure_fg area from sure_bg area.

#open to remove noise
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel,iterations = 2)
# sure bg
surebg = cv2.dilate(opening, kernel, iterations = 4)
# sure fg
surefg = cv2.erode(opening,kernel,iterations = 3)
blurfg = cv2.GaussianBlur(cv2.erode(opening,kernel,iterations = 10),(21,21),0)
ret,fgthresh = cv2.threshold(blurfg,200,255,cv2.THRESH_BINARY)
fgthresh = np.uint8(fgthresh)
# unknown or boundary
unknown = cv2.subtract(surebg,surefg)
# what is it? it goes wrong
# dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)

cv2.imshow('opening',opening)
cv2.imshow('sure bg',surebg)
cv2.imshow('sure fg',surefg)
cv2.imshow('unknown',unknown)
cv2.imshow('thresh sure fg',fgthresh)
# cv2.imshow('dist_tra',dist_transform)

# See the result. In the thresholded image, we get some regions of coins which we are sure of coins and they are detached now. (In some cases, you may be interested in only foreground segmentation, not in separating the mutually touching objects. In that case, you need not use distance transform, just erosion is sufficient. Erosion is just another method to extract sure foreground area, that’s all.)

# Now we know for sure which are region of coins, which are background and all. So we create marker (it is an array of same size as that of original image, but with int32 datatype) and label the regions inside it. The regions we know for sure (whether foreground or background) are labelled with any positive integers, but different integers, and the area we don’t know for sure are just left as zero. For this we use cv2.connectedComponents(). It labels background of the image with 0, then other objects are labelled with integers starting from 1.

# But we know that if background is marked with 0, watershed will consider it as unknown area. So we want to mark it with different integer. Instead, we will mark unknown region, defined by unknown, with 0.
Пример #31
0
# generate gaussian pyramid for orange
orange_copy = orange.copy()
gp_orange = [orange_copy]

for i in range(6):
    orange_copy = cv2.pyrDown(orange_copy)
    gp_orange.append(orange_copy)
    #cv2.imshow(str(i), orange_copy)

# generate laplacian pyramid for apple
apple_copy = gp_apple[5]
lp_apple = [apple_copy]

for i in range(5, 0, -1):
    apple_extended = cv2.pyrUp(gp_apple[i])
    laplacian = cv2.subtract(gp_apple[i - 1], apple_extended)
    lp_apple.append(laplacian)
    #cv2.imshow(str(i), apple_copy)

# generate laplacian pyramid for orange
orange_copy = gp_orange[5]
lp_orange = [orange_copy]

for i in range(5, 0, -1):
    orange_extended = cv2.pyrUp(gp_orange[i])
    laplacian = cv2.subtract(gp_orange[i - 1], orange_extended)
    lp_orange.append(laplacian)
    #cv2.imshow(str(i), orange_copy)

# Now add left and right halves of images in each level
apple_orange_pyramid = []
Пример #32
0
    def imagePyramidImg(self):
        imageFirst = Image.open(self.filename)
        imageLast = imageFirst.resize((450, 450), Image.ANTIALIAS)
        imageLast.save('img/dist/temp1.jpg')
        imageFirst2 = Image.open(self.filename2)
        imageLast2 = imageFirst2.resize((450, 450), Image.ANTIALIAS)
        imageLast2.save('img/dist/temp2.jpg')

        A = cv2.imread('img/dist/temp1.jpg')
        B = cv2.imread('img/dist/temp2.jpg')

        G = A.copy()
        gpA = [G]
        for i in range(6):
            G = cv2.pyrDown(G)
            gpA.append(G)

        G = B.copy()
        gpB = [G]
        for i in range(6):
            G = cv2.pyrDown(G)
            gpB.append(G)

        lpA = [gpA[5]]
        for i in range(6, 0, -1):

            GE = cv2.pyrUp(gpA[i])
            GE = cv2.resize(GE, gpA[i - 1].shape[-2::-1])
            L = cv2.subtract(gpA[i - 1], GE)
            lpA.append(L)

        lpB = [gpB[5]]
        for i in range(6, 0, -1):

            GE = cv2.pyrUp(gpB[i])
            GE = cv2.resize(GE, gpB[i - 1].shape[-2::-1])
            L = cv2.subtract(gpB[i - 1], GE)

            lpB.append(L)

        LS = []
        lpAc = []
        for i in range(len(lpA)):
            b = cv2.resize(lpA[i], lpB[i].shape[-2::-1])
            lpAc.append(b)

        j = 0
        for i in zip(lpAc, lpB):
            la, lb = i
            rows, cols, dpt = la.shape
            ls = np.hstack((la[:, 0:cols // 2], lb[:, cols // 2:]))
            j = j + 1
            LS.append(ls)

        ls_ = LS[0]
        for i in range(1, 6):
            ls_ = cv2.pyrUp(ls_)
            ls_ = cv2.resize(ls_, LS[i].shape[-2::-1])
            ls_ = cv2.add(ls_, LS[i])

        B = cv2.resize(B, A.shape[-2::-1])
        real = np.hstack((A[:, :cols // 2], B[:, cols // 2:]))

        cv2.imwrite('img/dist/pyramid.jpg', ls_)