コード例 #1
0
def image_transformation(file):
    # open image and apply filter
    img = cv2.imdecode(numpy.frombuffer(file, numpy.uint8),
                       cv2.IMREAD_UNCHANGED)
    img = cv2.resize(img, None, fx=1.2, fy=1.2, interpolation=cv2.INTER_CUBIC)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    kernel = numpy.ones((1, 1), numpy.uint8)
    img = cv2.dilate(img, kernel, iterations=1)
    img = cv2.erode(img, kernel, iterations=1)
    img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)

    # get coordinates
    coords = numpy.column_stack(numpy.where(img > 0))
    angle = cv2.minAreaRect(coords)[-1]
    # the `cv2.minAreaRect` function returns values in the
    # range [-90, 0); as the rectangle rotates clockwise the
    # returned angle trends to 0 -- in this special case we
    # need to add 90 degrees to the angle
    if angle < -45:
        angle = -(90 + angle)
    # otherwise, just take the inverse of the angle to make
    # it positive
    else:
        angle = -angle

    (h, w) = img.shape[:2]
    center = (w // 2, h // 2)
    m = cv2.getRotationMatrix2D(center, angle, 1.0)
    rotated = cv2.warpAffine(img,
                             m, (w, h),
                             flags=cv2.INTER_CUBIC,
                             borderMode=cv2.BORDER_REPLICATE)
    img = rotated

    ret, thresh_binary = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
    value_thresh_binary = cv2.Laplacian(thresh_binary, cv2.CV_64F).var()

    blur = cv2.GaussianBlur(img, (5, 5), 0)
    # blur = gaussian_blur(img, 5)
    # img = otsu(blur)
    ret3, thresh_otsu = cv2.threshold(blur, 0, 255,
                                      cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    value_otsu = cv2.Laplacian(thresh_otsu, cv2.CV_64F).var()

    if value_thresh_binary > value_otsu:
        print(value_thresh_binary)
        img = thresh_binary
    else:
        print(value_otsu)
        img = thresh_otsu
    return img
コード例 #2
0
def edgeDetectionZeroCrossingSimple(I: np.ndarray) -> (np.ndarray, np.ndarray):

    src_gray = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)
    lap = cv2.Laplacian(src_gray, cv2.CV_16S, ksize=7)

    # TO DO :
    # Look for patterns like {+, 0, -} or {+, -} (zerocrossing) not sure what to do

    return lap
コード例 #3
0
 def laplaceDetection(self):
     try:
         img = cv2.imread(self.filename)
         laplacian = cv2.Laplacian(img, cv2.CV_64F)
         cv2.imwrite('img/dist/laplace.jpg', laplacian,
                     [cv2.IMWRITE_JPEG_QUALITY, 100])
         cv2.waitKey()
     except:
         pass
コード例 #4
0
def laplacian(m, n, thres, l, sigma, mean, p, img, hist_range):
    lap = cv2.Laplacian(img, cv2.CV_64F)
    lap = np.int64(lap)
    sum1 = 0
    for i in range(1, m - 1):
        for j in range(1, n - 1):
            sum1 = sum1 + lap[i][j]**2

    return sum1
コード例 #5
0
ファイル: edge.py プロジェクト: ntajxyooj/computer-gv
 def CovertLaplacian(self):
     img = cv2.imread(
         os.path.join(root, '..', 'static', 'photos',
                      session['img_name_org_edge']))
     laplacian = cv2.Laplacian(img, cv2.CV_64F)
     filename = str(randint(1000000000,
                            9999999999)) + session['img_name_org_edge']
     cv2.imwrite(os.path.join(root, '..', 'static', 'photos', filename),
                 laplacian)
     session['img_name_covert_edge'] = filename
     session['covert_title_edge'] = "Detection By Laplacian"
コード例 #6
0
def laplace(img: np.ndarray) -> np.ndarray:
    """
    laplace filter

    Args:
        img: image as numpy array

    Returns:
        img
    """
    return cv2.Laplacian(img, cv2.CV_64F)
コード例 #7
0
ファイル: FiltrosTK.py プロジェクト: HorstGab/Codigos
def zeroCross():
    image = imageInit
    image = grayScale()
    LoG = cv2.Laplacian(image, cv2.CV_16S)

    minLoG = cv2.morphologyEx(LoG, cv2.MORPH_ERODE, np.ones((2, 2)))
    maxLoG = cv2.morphologyEx(LoG, cv2.MORPH_DILATE, np.ones((2, 2)))

    edged = (np.logical_or(np.logical_and(minLoG < 0, LoG > 0),
                           np.logical_and(maxLoG > 0, LoG < 0)) *
             255).astype('uint8')
    show_img(ImageTk.PhotoImage(Img.fromarray(edged)))
コード例 #8
0
def edgeDetectionZeroCrossingLOG(I: np.ndarray) -> (np.ndarray, np.ndarray):
    # smooth with gaussian
    I = blurImage2(I, 9)

    # turn to GrayScale and activate laplace filter
    src_gray = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)
    lap = cv2.Laplacian(src_gray, cv2.CV_16S, ksize=7)

    # show the img
    # activate abs only if you want to show the picture
    # make sure you find the edge points before using abs

    lap = cv2.convertScaleAbs(lap)

    return lap
コード例 #9
0
    def __apply(self, grayed, index, p):
        coordinates = self._coordinates(p)
        rect = self.bounds[index]

        roi_gray = grayed[rect[1]:(rect[1] + rect[3]),
                          rect[0]:(rect[0] + rect[2])]
        laplacian = open_cv.Laplacian(roi_gray, open_cv.CV_64F)

        coordinates[:, 0] = coordinates[:, 0] - rect[0]
        coordinates[:, 1] = coordinates[:, 1] - rect[1]

        status = np.mean(np.abs(
            laplacian * self.mask[index])) < MotionDetector.LAPLACIAN

        return status
コード例 #10
0
def variance_of_laplacian(image):
    """Assessment of bluriness of an image. Bigger value - lower bluriness.
    Idea taken from https://www.pyimagesearch.com/2015/09/07/blur-detection-with-opencv/
    and modified. I found it emperically that (max-min)**2/var is working better than simple variance
    as an assessment of bluriness. 

    Args:
        image
        
    Returns:
        assessment of bluriness
    """
    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    conv2d = cv2.Laplacian(gray_image, cv2.CV_64F)
    return int((conv2d.max() - conv2d.min())**2 / (2 * conv2d.var()))
コード例 #11
0
ファイル: gradation_Test.py プロジェクト: SSKim76/Python_Test
def grad(img):
    laplacian = cv2.Laplacian(img, cv2.CV_64F)
    sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=3)
    sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=3)

    plt.subplot(2, 2, 1), plt.imshow(img, cmap='gray')
    plt.title('orignal'), plt.xticks([]), plt.yticks([])

    plt.subplot(2, 2, 2), plt.imshow(laplacian, cmap='gray')
    plt.title('Laplacian'), plt.xticks([]), plt.yticks([])

    plt.subplot(2, 2, 3), plt.imshow(sobelx, cmap='gray')
    plt.title('sobel X'), plt.xticks([]), plt.yticks([])

    plt.subplot(2, 2, 4), plt.imshow(sobely, cmap='gray')
    plt.title('sobel Y'), plt.xticks([]), plt.yticks([])

    plt.show()
コード例 #12
0
ファイル: main.py プロジェクト: Moooorry/Project
def edge_detection():
    img = cv2.imread('2.jpg', cv2.IMREAD_GRAYSCALE)
    lap = cv2.Laplacian(img, cv2.CV_64F, ksize=3)
    lap = np.uint8(np.absolute(lap))
    sobelX = cv2.Sobel(img, cv2.CV_64F, 1, 0)
    sobelY = cv2.Sobel(img, cv2.CV_64F, 0, 1)

    sobelX = np.uint8(np.absolute(sobelX))
    sobelY = np.uint8(np.absolute(sobelY))

    sobelCombined = cv2.bitwise_or(sobelX, sobelY)

    titles = ['image', 'Laplacian', 'sobelX', 'sobelY', 'sobelCombined']
    images = [img, lap, sobelX, sobelY, sobelCombined]
    for i in range(5):
        plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray')
        plt.title(titles[i])
        plt.xticks([]), plt.yticks([])

    plt.show()
コード例 #13
0
ファイル: gradient.py プロジェクト: SSKim76/Python_Test
def grad():
    img = cv2.imread(selImg.select_img(6), cv2.IMREAD_GRAYSCALE)
    #img = cv2.imread(select_img('2'), cv2.IMREAD_GRAYSCALE)

    laplacian = cv2.Laplacian(img, cv2.CV_64F)
    sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize = 3)
    sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize = 3)

    plt.subplot(2, 2, 1), plt.imshow(img, cmap = 'gray')
    plt.title('orignal'), plt.xticks([]), plt.yticks([])

    plt.subplot(2, 2, 2), plt.imshow(laplacian, cmap = 'gray')
    plt.title('Laplacian'), plt.xticks([]), plt.yticks([])

    plt.subplot(2, 2, 3), plt.imshow(sobelx, cmap = 'gray')
    plt.title('sobel X'), plt.xticks([]), plt.yticks([])

    plt.subplot(2, 2, 4), plt.imshow(sobely, cmap = 'gray')
    plt.title('sobel Y'), plt.xticks([]), plt.yticks([])

    plt.show()
コード例 #14
0
ファイル: hand_fD.py プロジェクト: watson1101/AI-project
    def fourierDesciptor(self, res):
        """计算傅里叶描述子

        :param res: 输入图片
        :return: 图像,描述子点
        """
        # Laplacian算子进行八邻域检测
        gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
        dst = cv2.Laplacian(gray, cv2.CV_16S, ksize=3)
        Laplacian = cv2.convertScaleAbs(dst)
        contour = self.find_contours(Laplacian)  # 提取轮廓点坐标
        contour_array = contour[0][:, 0, :]  # 注意这里只保留区域面积最大的轮廓点坐标
        contours_complex = np.empty(contour_array.shape[:-1], dtype=complex)
        contours_complex.real = contour_array[:, 0]  # 横坐标作为实数部分
        contours_complex.imag = contour_array[:, 1]  # 纵坐标作为虚数部分
        fourier_result = np.fft.fft(contours_complex)  # 进行傅里叶变换
        #fourier_result = np.fft.fftshift(fourier_result)
        descirptor_in_use = self.truncate_descriptor(
            fourier_result)  # 截短傅里叶描述子
        img1 = res.copy()
        self.reconstruct(res, descirptor_in_use)  # 绘图显示描述子点
        self.draw_circle(img1, descirptor_in_use)  # 相关关定位框架
        return res, descirptor_in_use
コード例 #15
0
ファイル: FlyShoot.py プロジェクト: hama1185/tello
def main():
    drone = tellopy.Tello()
    os.makedirs('raw_data', exist_ok=True)  #生データの保存するディレクトリの作成
    os.makedirs('take_picture', exist_ok=True)  #撮影時のディレクトリ
    os.makedirs('process_picture', exist_ok=True)  #撮影時の加工画像を入れるディレクトリ
    SCREEN_WIDTH = 640
    SCREEN_HEIGHT = 480

    pygame.joystick.init()
    try:
        joy = pygame.joystick.Joystick(0)  # create a joystick instance
        joy.init()  # init instance
        print('Joystickの名称: ' + joy.get_name())
        print('ボタン数 : ' + str(joy.get_numbuttons()))
        pygame.init()
        screen = pygame.display.set_mode(
            (SCREEN_WIDTH, SCREEN_HEIGHT))  # 画面を作る
        pygame.display.set_caption('Joystick')  # タイトル
        pygame.display.flip()  # 画面を反映
    except pygame.error:
        print('Joystickが見つかりませんでした。')

    try:
        drone.connect()
        drone.wait_for_connection(20.0)

        retry = 3
        container = None
        while container is None and 0 < retry:
            retry -= 1
            try:
                container = av.open(drone.get_video_stream())
            except av.AVError as ave:
                print(ave)
                print('retry...')

        fly_sw = False  #takeoffとlandの切り替え
        scale = 4  #適時変更
        # skip first 300 frames
        frame_skip = 300

        raw_count = 0  #rawfile_no
        picture_count = 0  #picturefile_no

        while True:

            for frame in container.decode(video=0):
                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue
                start_time = time.time()
                image = cv2.cvtColor(numpy.array(frame.to_image()),
                                     cv2.COLOR_RGB2BGR)
                cv2.imshow('Original', image)
                cv2.waitKey(1)
                if frame.time_base < 1.0 / 60:
                    time_base = 1.0 / 60
                else:
                    time_base = frame.time_base
                frame_skip = int((time.time() - start_time) / time_base)
                dir_write('raw_data', 'frame_{:04d}.png'.format(raw_count),
                          image)
                raw_count += 1

                for e in pygame.event.get():  # イベントチェック
                    if e.type == QUIT:  # 終了が押された?
                        drone.quit()
                        return
                    if e.type == KEYDOWN and e.key == K_ESCAPE:  # ESCが押された?
                        drone.quit()
                        return

                    # Joystick関連のイベントチェック
                    if e.type == pygame.locals.JOYAXISMOTION:
                        x1, y1 = joy.get_axis(0), joy.get_axis(
                            1)  #左スティックのx,yに値の格納
                        x2, y2 = joy.get_axis(4), joy.get_axis(
                            3)  #右スティックのx,yに値の格納
                        #print('x and y : ' + str(x) +' , '+ str(y))

                        drone.left_x = -x1
                        drone.left_y = -y1

                        drone.right_x = x2 / scale
                        drone.right_y = -y2 / scale
                    elif e.type == pygame.locals.JOYBALLMOTION:
                        print('ball motion')
                    elif e.type == pygame.locals.JOYHATMOTION:
                        print('hat motion')
                    elif e.type == pygame.locals.JOYBUTTONDOWN:
                        print(str(e.button) + '番目のボタンが押された')
                        if int(e.button) == 7 and fly_sw == False:  #start
                            drone.takeoff()
                            fly_sw = True

                        elif int(e.button) == 7 and fly_sw == True:  #start
                            drone.land()
                            drone.quit()
                            cv2.destroyAllWindows()
                            filepath = os.path.join('raw_data')

                            files = os.listdir(filepath)
                            raw_count = 0

                            for file in files:
                                index = re.search('.png', file)
                                if index:
                                    raw_count += 1

                            print(raw_count)
                            #ビデオとして結合
                            fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
                            video = cv2.VideoWriter('replay.mp4', fourcc, 20.0,
                                                    (640, 480))

                            for i in range(0, raw_count):
                                filepath = os.path.join(
                                    'raw_data', 'frame_{:04d}.png'.format(i))
                                img = cv2.imread(filepath)
                                img = cv2.resize(img, (640, 480))
                                video.write(img)

                            video.release()

                            for i in range(0, picture_count):
                                filepath = os.path.join(
                                    'take_picture',
                                    'picture_{:04d}.png'.format(i))
                                img = cv2.imread(filepath)
                                print(cv2.Laplacian(
                                    img, cv2.CV_64F).var())  #ラプラシアン微分
                                pixel = pic.pixelArt(img)
                                dir_write('process_picture',
                                          'dot_{:04d}.png'.format(i), pixel)
                                water = pic.waterColor(img)
                                dir_write('process_picture',
                                          'water_{:04d}.png'.format(i), water)
                                oil = pic.oilPaint(img)
                                dir_write('process_picture',
                                          'oil_{:04d}.png'.format(i), oil)

                            fly_sw = False

                        if int(e.button) == 3:  #Y
                            dir_write(
                                'take_picture',
                                'picture_{:04d}.png'.format(picture_count),
                                image)
                            picture_count += 1

                    elif e.type == pygame.locals.JOYBUTTONUP:
                        print(str(e.button) + '番目のボタンが離された')

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
コード例 #16
0
    def accumulate_contours(self, labels, scale=4):  # noqa: C901

        label_list = {}
        for label_obj in labels:
            files = eval(label_obj['files'])[0]
            path = label_obj['path']
            dcm = pydicom.dcmread(os.path.join(path, files),
                                  stop_before_pixels=True)
            label = label_obj['data']

            lv_label = remove_small_3d(label, 2)

            series_uid = label_obj['series']
            study_uid = label_obj['study']
            print(f"load dcm:{os.path.join(path, files)}")

            label_list[series_uid] = {
                "SeriesDescription": label_obj['description'],
                "ImageOrientationPatient": dcm.ImageOrientationPatient,
                "ImagePositionPatient": dcm.ImagePositionPatient,
                "PixelSpacing": dcm.PixelSpacing,
                "label": lv_label
            }
            if "SpacingBetweenSlices" in dcm:
                SpacingDistance = dcm.SpacingBetweenSlices
            print(
                f"desc:{dcm.SeriesDescription}, position:{dcm.ImagePositionPatient}, orientation:{dcm.ImageOrientationPatient}, study uid:{study_uid}, series uid:{series_uid}"
            )

            contours_list = []
            for i in range(lv_label.shape[-1]):
                label_edge = lv_label[:, :, i]
                if len(np.unique(label_edge)) > 1:
                    label_edge = (label_edge * 255).astype(np.uint8)

                    # cv2.imwrite(f"/dresden/users/qc58/work/ATMI/output/2/demo/{item}-{i}-orig.jpg", label_edge)

                    label_edge = cv2.Laplacian(label_edge, cv2.CV_8U)
                    # cv2.imwrite(f"/dresden/users/qc58/work/ATMI/output/2/demo/{item}-{i}-edge.jpg", label_edge)
                    # label_edge = cv2.Canny(label_edge, 30, 200)
                    contours, hierarchy = cv2.findContours(
                        label_edge, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
                    sorted(contours, key=len)
                    if scale > 1:
                        contours = [c_i / scale for c_i in contours]

                    if len(contours) > 0:
                        if contours[-1].shape[0] > 30:
                            append_list = [{
                                "data": contours[-1],
                                "desc": dcm.SeriesDescription
                            }]
                            if "SAX" in dcm.SeriesDescription and len(
                                    contours
                            ) > 1 and contours[-2].shape[0] > 30:
                                # print(f"SAX, and include the contour.")
                                if len(contours) > 2:
                                    append_list.append({
                                        "data":
                                        contours[-3],
                                        "desc":
                                        "INNER_" + dcm.SeriesDescription
                                    })
                                # append_list.append(contours[-2])

                            contours_list.append(append_list)
                else:
                    contours_list.append([])
            label_list[series_uid]['contours'] = contours_list

        for serie in label_list:
            label_list[serie]['SliceDistance'] = SpacingDistance

        return label_list
コード例 #17
0
from cv2 import cv2 as cv
import numpy as np

img = cv.imread('Photos/cat.jpg')
cv.imshow('Cat', img)

gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('Gray', gray)

# Laplacian
lap = cv.Laplacian(gray, cv.CV_64F)
lap = np.uint8(np.absolute(lap))
cv.imshow('Laplacian', lap)

# Sobel
sobelx = cv.Sobel(gray, cv.CV_64F, 1, 0)
sobely = cv.Sobel(gray, cv.CV_64F, 0, 1)
combined_sobel = cv.bitwise_or(sobelx, sobely)

cv.imshow('Sobel X', sobelx)
cv.imshow('Sobel Y', sobely)
cv.imshow('Combined Sobel', combined_sobel)

canny = cv.Canny(gray, 150, 175)
cv.imshow('Canny', canny)

cv.waitKey(0)
コード例 #18
0
ファイル: Sobel.py プロジェクト: roc-n/Opencv_learning
from cv2 import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('star.jpg', 0)
# cv2.CV_64F 输出图像的深度(数据类型)
# , 可以使用 - 1, 与原图像保持一致 np.uint8
laplacian = cv2.Laplacian(img, cv2.CV_64F)
# 参数 1,0 为只在 x 方向求一阶导数,最大可以求 2 阶导数。
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=5)
# 参数 0,1 为只在 y 方向求一阶导数,最大可以求 2 阶导数。
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5)


plt.subplot(2, 2, 1), plt.imshow(img, cmap='gray')
plt.title('Original'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 2), plt.imshow(laplacian, cmap='gray')
plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 3), plt.imshow(sobelx, cmap='gray')
plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 4), plt.imshow(sobely, cmap='gray')
plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])
plt.show()
コード例 #19
0
Sobel operators is a joint Gausssian smoothing plus differentiation operation, so it is more resistant to noise. You can specify the direction of derivatives to be taken, vertical or horizontal (by the arguments, yorder and xorder respectively). You can also specify the size of kernel by the argument ksize. If ksize = -1, a 3x3 Scharr filter is used which gives better results than 3x3 Sobel filter. Please see the docs for kernels used.

2. Laplacian Derivatives

It calculates the Laplacian of the image given by the relation, \Delta src = \frac{\partial ^2{src}}{\partial x^2} + \frac{\partial ^2{src}}{\partial y^2} where each derivative is found using Sobel derivatives.
'''

# Below code shows all operators in a single diagram. All kernels are of 5x5 size. Depth of output image is passed -1 to get the result in np.uint8 type.

from cv2 import cv2
import numpy as np
from matplotlib import pyplot as plt

img = cv2.imread('resource/sudo.png', 0)
whatdatatype = cv2.CV_8U
laplacian = cv2.Laplacian(img, whatdatatype)
sobelx = cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize=5)
sobely = cv2.Sobel(img, whatdatatype, 0, 1, ksize=5)

plt.subplot(2, 2, 1), plt.imshow(img, cmap='gray')
plt.title('Original'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 2), plt.imshow(laplacian, cmap='gray')
plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 3), plt.imshow(sobelx, cmap='gray')
plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 4), plt.imshow(sobely, cmap='gray')
plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])

plt.show()
コード例 #20
0
from cv2 import cv2 as cv

img = cv.imread("predio.JPG", 0)

sobelx = cv.Sobel(img, cv.CV_8U, 1, 0, ksize=7)
sobely = cv.Sobel(img, cv.CV_8U, 0, 1, ksize=7)
laplacian = cv.Laplacian(img, cv.CV_8U)

cv.imshow("sobelx", sobelx)
cv.imshow("sobely", sobely)
cv.imshow("laplacian", laplacian)

cv.waitKey(0)
cv.destroyAllWindows()
コード例 #21
0
def laplace_sharp(image_in):
    depth = cv2.CV_16S
    kernel_size = (3, 3)  # 模板大小
    scale = 1  # 扩散系数
    image_out = cv2.Laplacian(image_in, depth, kernel_size, scale=scale)
    return image_out
コード例 #22
0
# Extract Sobel Edges
sobel_x = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=5)
sobel_y = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=5)

cv2.imshow("Original", image)
cv2.waitKey()
cv2.imshow("Sobel X", sobel_x)
cv2.waitKey()
cv2.imshow("Sobel Y", sobel_y)
cv2.waitKey()

sobel_OR = cv2.bitwise_or(sobel_x, sobel_y)
cv2.imshow("sobel_OR", sobel_OR)
cv2.waitKey()

laplacian = cv2.Laplacian(image, cv2.CV_64F)
cv2.imshow("Laplacian", laplacian)
cv2.waitKey()

# Then, we need to provide two values: threshold1 and threshold2. any gradient value larger than threshold2
# is considered to be an edge. Any value below threshold1 is considered not to be an edge.
# Values in between threshold1 and threshold2 are either classified as edges or non-edges based on how their
# intensities are "connected". In this case, any gradient values below 60 are considered non-edges
# whereas any values above 120 are considered edges.

# Canny Edge Detection uses gradient values as thresholds
# The first threshold gradient
canny = cv2.Canny(image, 20, 170)
cv2.imshow("Canny", canny)
cv2.waitKey()
コード例 #23
0
# Sobel(src, dst, ddepth, dx, dy)

# src − An object of the class Mat representing the source (input) image.
# dst − An object of the class Mat representing the destination (output) image.
# ddepth − An integer variable representing the depth of the image.
# NOTE: When depth=  (-1) or (CV_64F) , the destination image will have the same depth as the source.
# dx − An integer variable representing the x-derivative. (0 or 1). 1 when we want gradient in x-direction
# dy − An integer variable representing the y-derivative. (0 or 1). 1 when we want gradient in y-direction

from cv2 import cv2
import numpy as np
import matplotlib.pyplot as plt

img = cv2.imread('ronaldo.jpg', 0)

lap_img = cv2.Laplacian(img, cv2.CV_64F, ksize=3)
# NOTE: As when converting to grayscale the slope becomes negative and of floating type so first we take the
# absolute value to remove the signs and then convert the type from floating to uint8 which is required.
lap_img = np.uint8(np.absolute(lap_img))

sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0)  # from left to right
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1)  # from top to bottom

sobelx = np.uint8(np.absolute(sobelx))
sobely = np.uint8(np.absolute(sobely))

title = ['Original Image', 'Laplacian Image', 'Sobelx Image', 'Sobely Image']
images = [img, lap_img, sobelx, sobely]

for i in range(len(images)):
    plt.subplot(2, 2, i + 1)
コード例 #24
0
def laplace_gradient(image):
    dst = cv.Laplacian(image, cv.CV_32F)
    lpls = cv.convertScaleAbs(dst)
    cv.imshow("laplace", lpls)
コード例 #25
0
ファイル: makeImSet.py プロジェクト: lichnovskad/imRegPy
N = 8

inputPath = os.path.abspath('../dataset/video/vid'+str(N)+'.mov')
outputPath = os.path.abspath('output/vidSet'+str(N))
height = 500

#%%
cap = cv2.VideoCapture(inputPath)

w, h = cap.get(3), cap.get(4)
numFrame = (int(cap.get(7)))
variance = np.zeros(numFrame)
for n in range(numFrame):
    ret,frame = cap.read()
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    variance[n] = cv2.Laplacian(frame, cv2.CV_64F).var()

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
#%%
minDist = 5
window = 15
modulo = numFrame % window
K = int((numFrame-modulo)/window)
frameList = np.zeros(K+1)
#plt.plot(variance)
#plt.show()

for i in range(K+1):
    
コード例 #26
0
import numpy as np
from adaptive_histo_equilization import adapt_hist_equilization
from cv2 import cv2

img = cv2.imread("test_001.jpg")
#To get the input 1 from Simple color balance
image_1 = simplest_cb(img, 50)
#CLAHE
lab1 = cv2.cvtColor(image_1, cv2.COLOR_BGR2LAB)
lab_temp = lab1.copy()
lab2 = adapt_hist_equilization(lab_temp)
image_2 = cv2.cvtColor(lab2, cv2.COLOR_LAB2BGR)
#For input 1
R1 = np.double(lab1[:, :, 0]) / 255
# calculate laplacian contrast weight
WL1 = cv2.Laplacian(R1, cv2.CV_64F)
h = np.array([1, 4, 6, 4, 1]) / 16
filt = (h.T).dot(h)
WC1 = cv2.filter2D(R1, cv2.CV_64F, filt)
for i in np.where(WC1 > (math.pi / 2.75)):
    # Formula to calculate laplacian contrast weight
    WC1[i] = math.pi / 2.75
WC1 = (R1 - WC1) * (R1 - WC1)
# calculate the saliency weight for input 1
WS1 = saliencyDetection(image_1)
sigma = 0.25
aver = 0.5
# calculate the exposedness weight for input 1
##Formula for the exposdness weight from book 'Progress in Patter recognition,
#Image Analysis,Computer Vision and Application'
WE1 = np.exp(-(R1 - aver)**2 / (2 * np.square(sigma)))
コード例 #27
0
def estimate_blur(image: np.array, threshold: int = 100):
    if image.ndim == 3:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blur_map = cv2.Laplacian(image, cv2.CV_64F)
    score = np.var(blur_map)
    return blur_map, score, bool(score < threshold)
コード例 #28
0
ファイル: lab5_1.py プロジェクト: DanielMikh/labsTechVision
# img = cv2.imread('Putin.png', cv2.IMREAD_REDUCED_GRAYSCALE_4)
# img = cv2.imread('forest.png', cv2.IMREAD_REDUCED_COLOR_4)
# img = cv2.imread('chameleon.png', cv2.IMREAD_REDUCED_COLOR_2)
img = cv2.imread('open-logo.png', cv2.IMREAD_GRAYSCALE)
"""
Чем порядок производной больше, тем границы на изображении более размыты, появляется шум на изображении; 
Порядок производной должен быть СТРОГО меньше размера ядра Собеля(ksize). По умолчанию ksize = 3;
Оператор Собеля 'ksize' вычисляет градиент яркости изображения в каждой точке, 
участки с большой величиной градиента (в основном, грани) будут видны как белые линии;
Параметр ddepth - глубина выходного изображения. Возьмем 'cv2.CV_8U', что означает создание 8bit unsigned numpy array
"""
sobel_x = cv2.Sobel(src=img, ddepth=cv2.CV_8U, dx=1, dy=0, ksize=3)
sobel_y = cv2.Sobel(img, cv2.CV_8U, 0, 1)
sobel_xy = cv2.Sobel(img, cv2.CV_8U, 1, 1)
'''Оператор Лапласа идеально подходит для изображений с четкими границами'''
laplacian = cv2.Laplacian(img, cv2.CV_8U, cv2.BORDER_DEFAULT)
'''
Перед использованием детектора границ Кенни рекомендуется выполнить размытие;
Отклонение от ядра по осям X,Y -> (x,y), где x,y целые нечетные числа
'''
blur = cv2.GaussianBlur(img, (1, 1), cv2.BORDER_DEFAULT)
'''
threshold1 - порог минимума, threshold2 - порог максимума;
apertureSize - размер для оператора Собеля. apertureSize = 3 по дефолту;
L2gradient если True, то вычисляется точно, если False, то вычисляется упрощенно
'''
canny = cv2.Canny(image=blur,
                  threshold1=200,
                  threshold2=225,
                  apertureSize=3,
                  L2gradient=True)
コード例 #29
0
ファイル: kenar.py プロジェクト: gtopcular/OPENCVPRACTISE
from cv2 import cv2
import numpy as np

img = cv2.imread("./sources/contour.png")
w, h = img.shape[:2]

print(w)
print(h)

kernel = np.array([[1, 1, 1], [1, -8, 1], [1, 1, 1]], np.uint8)

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

deneme = cv2.Laplacian(gray, cv2.CV_8U, (3, 3))

cv2.imshow("orginal", img)

cv2.imshow("deneme", deneme)

cv2.waitKey(0)
cv2.destroyAllWindows
コード例 #30
0
 def la_place():
     dst = cv2.Laplacian(img, cv2.CV_16S, ksize=3)
     return cv2.convertScaleAbs(dst)