Пример #1
0
        row, col= source_gray.shape[:2]
        bottom= source_gray[row-2:row, 0:col]
        mean= cv2.mean(bottom)[0]
        bordersize = 3
        border = cv2.copyMakeBorder(source_gray, top=bordersize, bottom=bordersize, left=0, right=0, borderType= cv2.BORDER_CONSTANT, value=[0,0,0] )
        bordersize = 2
        borderWhite = cv2.copyMakeBorder(border, top=bordersize, bottom=bordersize, left=0, right=0, borderType= cv2.BORDER_CONSTANT, value=[255,255,255] )
        ret,source_thresh = cv2.threshold(borderWhite,230,255,0)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2,2) ,(1, 1))
        source_dilated = cv2.dilate(source_thresh, kernel, iterations=1)

        kernel_size = 3
        scale = 1
        delta = 0
        ddepth = cv2.CV_16S
        gray_lap = cv2.Laplacian(source_dilated,ddepth,ksize = kernel_size,scale = scale,delta = delta)
        dst = cv2.convertScaleAbs(gray_lap)
        im2, contours, hierarchy = cv2.findContours(dst,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
        #cv2.drawContours(img, contours, -1, (0,255,0), 3)
        # Find the index of the largest contour
        pageFacesWidths = []
        faces = faceCascade.detectMultiScale(
            borderWhite,
            scaleFactor=1.1,
            minNeighbors=2,
            minSize=(100, 100)
            )
        # Draw a rectangle around the faces
        for (x, y, w, h) in faces:
            faceFeatures = []
            crop_face_img = borderWhite[y:y+h, x:x+w]
Пример #2
0
# -*- coding: utf-8 -*

import cv2

try:
    img = cv2.imread('c:/temp/Lenna.jpg')

    if img is None:
        print('ファイルを読み込めません。')
        import sys
        sys.exit()

    dst = cv2.Laplacian(img, -1)
    cv2.imwrite('c:/temp/laplacian.jpg', dst)

    cv2.imshow('dst', dst)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
except:
    import sys
    print("Error:", sys.exc_info()[0])
    print(sys.exc_info()[1])
    import traceback
    print(traceback.format_tb(sys.exc_info()[2]))
Пример #3
0
def variance_of_laplacian(image):
    return cv2.Laplacian(image, cv2.CV_64F).var()
Пример #4
0
import glob
import imutils
import os

user = os.path.expanduser('~')
path = os.getcwd()
path = os.path.join(user, path)
imagePath = r'task3\*.jpg'

template_path = r'task3\templates\*.jpg'
print(os.path.join(path, template_path))
templates = []
for temp in glob.glob(os.path.join(path, template_path)):
    template = cv2.imread(temp)
    #cv2.imshow('orig-temp',template)
    laplacian_template = cv2.Laplacian(template, cv2.CV_32F)
    templates.append(laplacian_template)
    #cv2.imshow('laplacian_template',laplacian_template)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()

for imagePath in glob.glob(os.path.join(path, imagePath)):
    image = cv2.imread(os.path.join(path, imagePath))

    blur = cv2.GaussianBlur(image, (3, 3), 0)
    laplacian_output = cv2.Laplacian(blur, cv2.CV_32F)
    for temp in templates:
        gray_temp = cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY)
        w, h = gray_temp.shape[::-1]

        res = cv2.matchTemplate(laplacian_output, temp, cv2.TM_CCOEFF_NORMED)
Пример #5
0
import cv2
import numpy as np

cap = cv2.VideoCapture(-1)

while True:
    _, frame = cap.read()

    laplacian = cv2.Laplacian(frame, cv2.CV_64F)
    #cv2.CV_64F is the datatype
    sobelx = cv2.Sobel(frame, cv2.CV_64F, 1, 0, ksize=5)
    sobely = cv2.Sobel(frame, cv2.CV_64F, 0, 1, ksize=5)

    #edge detector
    edges = cv2.Canny(frame, 80, 80)
    #100 * 200

    cv2.imshow('original', frame)
    cv2.imshow('laplacian', laplacian)
    cv2.imshow('edges', edges)
    cv2.imshow('sobely', sobely)

    k = cv2.waitKey(5) & 0xFF
    if k == 27:
        break

cv2.destroyAllWindows()
cap.release()
#closes the camera
def find_color_card(rgb_img,
                    threshold_type='adaptgauss',
                    threshvalue=125,
                    blurry=False,
                    background='dark',
                    record_chip_size="median"):
    """Automatically detects a color card and output info to use in create_color_card_mask function

    Algorithm written by Brandon Hurr. Updated and implemented into PlantCV by Haley Schuhl.

        Inputs:
    rgb_img          = Input RGB image data containing a color card.
    threshold_type   = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss')
    threshvalue      = Thresholding value, optional (default 125)
    blurry           = Bool (default False) if True then image sharpening applied
    background       = Type of image background either 'dark' or 'light' (default 'dark'); if 'light' then histogram
                        expansion applied to better detect edges, but histogram expansion will be hindered if there
                        is a dark background
    record_chip_size = Optional str for choosing chip size measurement to be recorded, either "median",
                        "mean", or None

    Returns:
    df             = Dataframe containing information about the filtered contours
    start_coord    = Two element tuple of starting coordinates, location of the top left pixel detected
    spacing        = Two element tuple of spacing between centers of chips

    :param rgb_img: numpy.ndarray
    :param threshold_type: str
    :param threshvalue: int
    :param blurry: bool
    :param background: str
    :param record_chip_size: str
    :return df: pandas.core.frame.DataFrame
    :return start_coord: tuple
    :return spacing: tuple
    """
    # Imports
    import skimage
    import pandas as pd
    from scipy.spatial.distance import squareform, pdist

    # Get image attributes
    height, width, channels = rgb_img.shape
    total_pix = float(height * width)

    # Minimum and maximum square size based upon 12 MP image
    min_area = 1000. / 12000000. * total_pix
    max_area = 8000000. / 12000000. * total_pix

    # Create gray image for further processing
    gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)

    # Laplacian Fourier Transform detection of blurriness
    blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var()

    # If image is blurry then try to deblur using kernel
    if blurry:
        # from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening
        kernel = np.array([[-1, -1, -1, -1, -1], [-1, 2, 2, 2, -1],
                           [-1, 2, 8, 2, -1], [-1, 2, 2, 2, -1],
                           [-1, -1, -1, -1, -1]]) / 8.0
        # Store result back out for further processing
        gray_img = cv2.filter2D(gray_img, -1, kernel)

    # In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu
    # thresholding. If your image has a bright background then apply
    if background == 'light':
        clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4))
        # apply CLAHE histogram expansion to find squares better with canny edge detection
        gray_img = clahe.apply(gray_img)
    elif background != 'dark':
        fatal_error('Background parameter ' + str(background) +
                    ' is not "light" or "dark"!')

    # Thresholding
    if threshold_type.upper() == "OTSU":
        # Blur slightly so defects on card squares and background patterns are less likely to be picked up
        gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
        ret, threshold = cv2.threshold(gaussian, 0, 255,
                                       cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    elif threshold_type.upper() == "NORMAL":
        # Blur slightly so defects on card squares and background patterns are less likely to be picked up
        gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
        ret, threshold = cv2.threshold(gaussian, threshvalue, 255,
                                       cv2.THRESH_BINARY)
    elif threshold_type.upper() == "ADAPTGAUSS":
        # Blur slightly so defects on card squares and background patterns are less likely to be picked up
        gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
        threshold = cv2.adaptiveThreshold(gaussian, 255,
                                          cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                          cv2.THRESH_BINARY_INV, 51, 2)
    else:
        fatal_error('Input threshold_type=' + str(threshold_type) +
                    ' but should be "otsu", "normal", or "adaptgauss"!')

    # Apply automatic Canny edge detection using the computed median
    canny_edges = skimage.feature.canny(threshold)
    canny_edges.dtype = 'uint8'

    # Compute contours to find the squares of the card
    contours, hierarchy = cv2.findContours(canny_edges, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)[-2:]
    # Variable of which contour is which
    mindex = []
    # Variable to store moments
    mu = []
    # Variable to x,y coordinates in tuples
    mc = []
    # Variable to x coordinate as integer
    mx = []
    # Variable to y coordinate as integer
    my = []
    # Variable to store area
    marea = []
    # Variable to store whether something is a square (1) or not (0)
    msquare = []
    # Variable to store square approximation coordinates
    msquarecoords = []
    # Variable to store child hierarchy element
    mchild = []
    # Fitted rectangle height
    mheight = []
    # Fitted rectangle width
    mwidth = []
    # Ratio of height/width
    mwhratio = []

    # Extract moments from contour image
    for x in range(0, len(contours)):
        mu.append(cv2.moments(contours[x]))
        marea.append(cv2.contourArea(contours[x]))
        mchild.append(int(hierarchy[0][x][2]))
        mindex.append(x)

    # Cycle through moment data and compute location for each moment
    for m in mu:
        if m['m00'] != 0:  # This is the area term for a moment
            mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00']))
            mx.append(int(m['m10'] / m['m00']))
            my.append(int(m['m01'] / m['m00']))
        else:
            mc.append((0, 0))
            mx.append((0))
            my.append((0))

    # Loop over our contours and extract data about them
    for index, c in enumerate(contours):
        # Area isn't 0, but greater than min-area and less than max-area
        if marea[index] != 0 and min_area < marea[index] < max_area:
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.1 * peri, True)
            center, wh, angle = cv2.minAreaRect(c)  # Rotated rectangle
            mwidth.append(wh[0])
            mheight.append(wh[1])
            # In different versions of OpenCV, width and height can be listed in a different order
            # To normalize the ratio we sort them and take the ratio of the longest / shortest
            wh_sorted = list(wh)
            wh_sorted.sort()
            mwhratio.append(wh_sorted[1] / wh_sorted[0])
            msquare.append(len(approx))
            # If the approx contour has 4 points then we can assume we have 4-sided objects
            if len(approx) == 4 or len(approx) == 5:
                msquarecoords.append(approx)
            else:  # It's not square
                # msquare.append(0)
                msquarecoords.append(0)
        else:  # Contour has area of 0, not interesting
            msquare.append(0)
            msquarecoords.append(0)
            mwidth.append(0)
            mheight.append(0)
            mwhratio.append(0)

    # Make a pandas df from data for filtering out junk
    all_contours = {
        'index': mindex,
        'x': mx,
        'y': my,
        'width': mwidth,
        'height': mheight,
        'res_ratio': mwhratio,
        'area': marea,
        'square': msquare,
        'child': mchild
    }
    df = pd.DataFrame(all_contours)

    # Add calculated blur factor to output
    df['blurriness'] = blurfactor

    # Filter df for attributes that would isolate squares of reasonable size
    df = df[(df['area'] > min_area) & (df['area'] < max_area) &
            (df['child'] != -1) & (df['square'].isin([4, 5])) &
            (df['res_ratio'] < 1.2) & (df['res_ratio'] > 0.85)]

    # Filter nested squares from dataframe, was having issues with median being towards smaller nested squares
    df = df[~(df['index'].isin(df['index'] + 1))]

    # Count up squares that are within a given radius, more squares = more likelihood of them being the card
    # Median width of square time 2.5 gives proximity radius for searching for similar squares
    median_sq_width_px = df["width"].median()

    # Squares that are within 6 widths of the current square
    pixeldist = median_sq_width_px * 6
    # Computes euclidean distance matrix for the x and y contour centroids
    distmatrix = pd.DataFrame(squareform(pdist(df[['x', 'y']])))
    # Add up distances that are less than  ones have distance less than pixeldist pixels
    distmatrixflat = distmatrix.apply(
        lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)

    # Append distprox summary to dataframe
    df = df.assign(distprox=distmatrixflat.values)

    # Compute how similar in area the squares are. lots of similar values indicates card isolate area measurements
    filtered_area = df['area']
    # Create empty matrix for storing comparisons
    sizecomp = np.zeros((len(filtered_area), len(filtered_area)))
    # Double loop through all areas to compare to each other
    for p in range(0, len(filtered_area)):
        for o in range(0, len(filtered_area)):
            big = max(filtered_area.iloc[p], filtered_area.iloc[o])
            small = min(filtered_area.iloc[p], filtered_area.iloc[o])
            pct = 100. * (small / big)
            sizecomp[p][o] = pct

    # How many comparisons given 90% square similarity
    sizematrix = pd.DataFrame(sizecomp).apply(
        lambda sim: sim[sim >= 90].count() - 1, axis=1)

    # Append sizeprox summary to dataframe
    df = df.assign(sizeprox=sizematrix.values)

    # Reorder dataframe for better printing
    df = df[[
        'index', 'x', 'y', 'width', 'height', 'res_ratio', 'area', 'square',
        'child', 'blurriness', 'distprox', 'sizeprox'
    ]]

    # Loosely filter for size and distance (relative size to median)
    minsqwidth = median_sq_width_px * 0.80
    maxsqwidth = median_sq_width_px * 1.2
    df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) &
            (df['width'] > minsqwidth) & (df['width'] < maxsqwidth)]

    # Filter for proximity again to root out stragglers. Find and count up squares that are within given radius,
    # more squares = more likelihood of them being the card. Median width of square time 2.5 gives proximity radius
    # for searching for similar squares
    median_sq_width_px = df["width"].median()

    # Squares that are within 6 widths of the current square
    pixeldist = median_sq_width_px * 5
    # Computes euclidean distance matrix for the x and y contour centroids
    distmatrix = pd.DataFrame(squareform(pdist(df[['x', 'y']])))
    # Add up distances that are less than  ones have distance less than pixeldist pixels
    distmatrixflat = distmatrix.apply(
        lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)

    # Append distprox summary to dataframe
    df = df.assign(distprox=distmatrixflat.values)

    # Filter results for distance proximity to other squares
    df = df[(df['distprox'] >= 4)]
    # Remove all not numeric values use to_numeric with parameter, errors='coerce' - it replace non numeric to NaNs:
    df['x'] = pd.to_numeric(df['x'], errors='coerce')
    df['y'] = pd.to_numeric(df['y'], errors='coerce')

    # Remove NaN
    df = df.dropna()

    if df['x'].min() is np.nan or df['y'].min() is np.nan:
        fatal_error('No color card found under current parameters')
    else:
        # Extract the starting coordinate
        start_coord = (df['x'].min(), df['y'].min())

        # start_coord = (int(df['X'].min()), int(df['Y'].min()))
        # Calculate the range
        spacingx_short = (df['x'].max() - df['x'].min()) / 3
        spacingy_short = (df['y'].max() - df['y'].min()) / 3
        spacingx_long = (df['x'].max() - df['x'].min()) / 5
        spacingy_long = (df['y'].max() - df['y'].min()) / 5
        # Chip spacing since 4x6 card assumed
        spacing_short = min(spacingx_short, spacingy_short)
        spacing_long = max(spacingx_long, spacingy_long)
        # Smaller spacing measurement might have a chip missing
        spacing = int(max(spacing_short, spacing_long))
        spacing = (spacing, spacing)

    if record_chip_size is not None:
        if record_chip_size.upper() == "MEDIAN":
            chip_size = df.loc[:, "area"].median()
            chip_height = df.loc[:, "height"].median()
            chip_width = df.loc[:, "width"].median()
        elif record_chip_size.upper() == "MEAN":
            chip_size = df.loc[:, "area"].mean()
            chip_height = df.loc[:, "height"].mean()
            chip_width = df.loc[:, "width"].mean()
        else:
            print(
                str(record_chip_size) +
                " Is not a valid entry for record_chip_size." +
                " Must be either 'mean', 'median', or None.")
            chip_size = None
            chip_height = None
            chip_width = None
        # Store into global measurements
        outputs.add_observation(
            variable='color_chip_size',
            trait='size of color card chips identified',
            method='plantcv.plantcv.transform.find_color_card',
            scale='none',
            datatype=float,
            value=chip_size,
            label=str(record_chip_size))
        method = record_chip_size.lower()
        outputs.add_observation(
            variable=f'{method}_color_chip_height',
            trait=f'{method} height of color card chips identified',
            method='plantcv.plantcv.transform.find_color_card',
            scale='none',
            datatype=float,
            value=chip_height,
            label=str(record_chip_size))
        outputs.add_observation(
            variable=f'{method}_color_chip_width',
            trait=f'{method} size of color card chips identified',
            method='plantcv.plantcv.transform.find_color_card',
            scale='none',
            datatype=float,
            value=chip_width,
            label=str(record_chip_size))

    return df, start_coord, spacing
Пример #7
0
I_CLAHE = clahe.apply(lena_gray)
cv2.imshow("clahe", I_CLAHE)
cv2.waitKey(0)
cv2.destroyAllWindows()

gauss = cv2.GaussianBlur(lena_gray, (5, 5), 3)
cv2.imshow('gauss', gauss)
cv2.waitKey(0)
cv2.destroyAllWindows()

sobel = cv2.Sobel(lena_gray, cv2.CV_64F, 1, 1)
cv2.imshow('sobel', sobel)
cv2.waitKey(0)
cv2.destroyAllWindows()

laplacian = cv2.Laplacian(lena_gray, cv2.CV_64F)
cv2.imshow('laplacian', laplacian)
cv2.waitKey(0)
cv2.destroyAllWindows()

median = cv2.medianBlur(lena_gray, 5)
cv2.imshow('median', median)
cv2.waitKey(0)
cv2.destroyAllWindows()
# #Matplotlib
#
# image_1 = plt.imread('img/mandril.jpg')
# fig, ax = plt.subplots(1)
# # plt.figure(1)
# rect = Rectangle((50,50), 50, 100, fill=False, ec='r')
# ax.add_patch(rect)
Пример #8
0
sobel_x_abs = cv2.convertScaleAbs(sobel_x, alpha=2, beta=1)  # 对图像进行增强,参数值变大会曝光
sobel_y_abs = cv2.convertScaleAbs(sobel_y, alpha=2, beta=1)  # 对图像进行增强,参数值变大会曝光

sobel = cv2.addWeighted(sobel_x_abs, 0.5, sobel_y_abs, 0.5,
                        gamma=0)  # 近似有|G|=|Gx|+|Gy|

# 2. Scharr算子:dx和dy表示的是求导的阶数,0表示在这个方向上没有求导,一般为0,1,2
scharr_x = cv2.Scharr(gray, ddepth=-1, dx=1, dy=0)  # x轴方向上的一阶导数
scharr_y = cv2.Scharr(gray, ddepth=-1, dx=0, dy=1)  # y轴方向上的一阶导数
scharr_x_abs = cv2.convertScaleAbs(scharr_x)  # 进行图像增强
scharr_y_abs = cv2.convertScaleAbs(scharr_y)  # 进行梯度的增强
scharr = cv2.addWeighted(scharr_x_abs, 0.5, scharr_y_abs, 0.5,
                         0)  # 近似有|G|=|Gx|+|Gy|

# 3. Laplacian算子
laplacian = cv2.Laplacian(gray, ddepth=-1)

# cv2.imshow("gray", gray)
# cv2.imshow("sobel_x", sobel_x)
# cv2.imshow("sobel_y", sobel_y)
# cv2.imshow("sobel_x_abs", sobel_x_abs)
# cv2.imshow("sobel_y_abs", sobel_y_abs)
# cv2.imshow("sobel", sobel)
# cv2.imshow("scharr_x", scharr_x)
# cv2.imshow("scharr_x_abs", scharr_x_abs)
# cv2.imshow("laplacian", laplacian)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

# 结合matplotlib显示多张图片
titles = [
Пример #9
0
from PIL import Image
import pytesseract
import cv2
import os

blur_threshold = 100

image = cv2.imread('./image_02.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# Tính toán focus của ảnh (cạnh)
focus_measure = cv2.Laplacian(gray, cv2.CV_64F).var()

# Thresh: Phân tách đen trắng
gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, gray)

result = pytesseract.image_to_string(Image.open(filename), lang='eng')
print(result)

os.remove(filename)

if focus_measure < blur_threshold:
    text = "Blurry pix"
    print("\n---" + text + "---")
    cv2.putText(gray, "{} - FM = {:.2f}".format(text, focus_measure), (30, 50),
                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
else:
    text = "Fine pix"
Пример #10
0
import numpy as np
import argparse
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow("Original", image)
lap = cv2.Laplacian(image, cv2.CV_64F)
# 이미지(single channel), 출력 이미지의 데이터 타입(여기서는 64bit float)
lap = np.uint8(np.abs(lap))
cv2.imshow("Laplacian", lap)
cv2.waitKey(0)
sobelX = cv2.Sobel(image, cv2.CV_64F, 1, 0)  # vertical
sobelY = cv2.Sobel(image, cv2.CV_64F, 0, 1)  # horizontal
sobelX = np.uint8(np.abs(sobelX))
sobelY = np.uint8(np.abs(sobelY))
sobelCombined = cv2.bitwise_or(sobelX, sobelY)
cv2.imshow("Sobel X", sobelX)
cv2.imshow("Sobel Y", sobelY)
cv2.imshow("Sobel Combined", sobelCombined)
cv2.waitKey(0)
Пример #11
0
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt

img = cv.imread('../images/opencv.png', 0)
laplacian = cv.Laplacian(img, cv.CV_64F)
sobelx = cv.Sobel(img, cv.CV_64F, 1, 0, ksize=5)
sobely = cv.Sobel(img, cv.CV_64F, 0, 1, ksize=5)
plt.subplot(2, 2, 1), plt.imshow(img, cmap='gray')
plt.title('Original'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 2), plt.imshow(laplacian, cmap='gray')
plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 3), plt.imshow(sobelx, cmap='gray')
plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 4), plt.imshow(sobely, cmap='gray')
plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])

plt.show()
Пример #12
0
kernel = get_kernel(sigma)
dst = cv2.GaussianBlur(img, (kernel,kernel), sigma, sigma, 0)

plt.subplot(211),plt.imshow(img),plt.title('Input Image')
plt.subplot(212),plt.imshow(dst),plt.title('Blurred Image')
plt.show()
########################################################

sigma1 = 3
sigma2 = 4
sigma3 = 5

kernel1 = get_kernel(sigma1)
dst1 = cv2.GaussianBlur(dst, (kernel1,kernel1), sigma1, sigma1, 0)
#cv2.Laplacian(src,ddepth,ksize = kernel_size,scale = scale,delta = delta)
dst_1 = cv2.Laplacian(dst1, ddepth = -1, ksize = kernel1, scale = 1, delta = 0)

kernel2 = get_kernel(sigma2)
dst2 = cv2.GaussianBlur(dst, (kernel2,kernel2), sigma2, sigma2, 0)
dst_2 = cv2.Laplacian(dst2, ddepth = -1, ksize = kernel2, scale = 1, delta = 0)

kernel3 = get_kernel(sigma3)
dst3 = cv2.GaussianBlur(dst, (kernel3,kernel3), sigma3, sigma3, 0)
dst_3 = cv2.Laplacian(dst3, ddepth = -1, ksize = kernel3, scale = 1, delta = 0)

# merge the channels in volume
volume[:,:,0]=dst_1
volume[:,:,1]=dst_2
volume[:,:,2]=dst_3

plt.subplot(322),plt.imshow(dst_1),plt.title('Level 1')
import cv2
import numpy as np

#Demonstration on video
cap = cv2.VideoCapture(0)
while True:
    _, frame = cap.read()
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    blurred_frame = cv2.GaussianBlur(frame, (5, 5), 0)

    laplacian = cv2.Laplacian(blurred_frame, cv2.CV_64F)
    canny = cv2.Canny(blurred_frame, 50, 150)

    cv2.imshow("Default", frame)
    cv2.imshow("Laplacian", laplacian)
    cv2.imshow("Canny", canny)

    key = cv2.waitKey(1)
    if key == 27:
        break
cap.release()
cv2.destroyAllWindows()

#Demonstration on image
#img = cv2.imread("white_panda.jpg", cv2.IMREAD_GRAYSCALE)

#Sobel operator using gradient, apply gaussian to remove noise
#img = cv2.GaussianBlur(img, (5,5), 0)
#sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0)
#sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1)
Пример #14
0
    def camera_run(self, path):
        if path is None:
            cap = cv2.VideoCapture(0)
        else:
            cap = cv2.VideoCapture(path)

        while True:
            ret, frame = cap.read()
            frame = cv2.bilateralFilter(frame, 5, 50, 100)

            # change the window size
            src = cv2.resize(frame, (800, 600), interpolation=cv2.INTER_CUBIC)  # window size
            # a rectangle which show you use to identify the hand pose
            cv2.rectangle(src, (200, 150), (600, 450), (0, 0, 255))
            cv2.imshow("the original image", src)

            roi = src[150:450, 200:600]  # the area you want to use identify the hand pose
            resb = self.remove_background(roi)
            res = self.skin_color(resb)  # detect the skin
            cv2.imshow("skin detection", res)

            gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
            self.dst = cv2.Laplacian(gray, cv2.CV_16S, ksize=3)
            Laplacian = cv2.convertScaleAbs(self.dst)

            contour, array = self.get_contour(Laplacian)  # Get contour and draw it
            cv2.imshow("draw finger contour", contour)

            largecont = max(array, key=lambda contour: cv2.contourArea(contour))
            frame_res, self.finger_num = self.get_convex(contour, largecont)
            print('The number of finger: {}'.format(self.finger_num))
            cv2.imshow("draw the point of fingertips", frame_res)
            if self.count < 100:
                self.finger_num_lst.append(self.finger_num)
                self.count = self.count + 1
            elif self.count == 100:
                # Make sure which servo you want to move
                if not self.choose_arm_sig:
                    self.choose_arm_sig = self.choose_arm(self.finger_num_lst)
                    if self.choose_arm_sig:
                        self.mot.setBuzzer(1)
                        print('You have choose servo {} to control\n'.format(self.servo_num))
                    else:
                        print('You should continue to choose servo\n')
                else:
                    # you want to control palm or gripper
                    if self.servo_num == 5 and not self.choose_palm_sig:
                        self.choose_palm_sig = self.choose_palm_gripper(self.finger_num_lst)
                        if self.choose_palm_sig:
                            self.mot.setBuzzer(1)
                            print('You have choose servo {} to control'.format(self.servo_num))
                        else:
                            print('You should continue to choose servo\n')
                    else:
                        # after make sure which servo you want to control
                        # move the servo
                        self.move_arm(self.finger_num_lst)
            else:
                self.finger_num_lst = []
                self.count = 0
            # time.sleep(0.1)
            key = cv2.waitKey(50) & 0xFF
            if key == ord('q'):
                break
        cap.release()
        cv2.destroyAllWindows()
Пример #15
0
import cv2
import numpy as np

img = cv2.imread('Images\drew_selfie.jpg')
img = cv2.GaussianBlur(img, (3, 3), 0)
grayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
LapImg = cv2.Laplacian(grayImg, cv2.CV_8U, ksize=3, scale=1, delta=0)

cv2.imshow('Laplacian', LapImg)
cv2.waitKey()
Пример #16
0
def focusmeasure(image):
    return cv2.Laplacian(cv2.GaussianBlur(image, (5, 5), 0), cv2.CV_64F).var()
Пример #17
0
def laplacianSketch(img):  # laplacian边缘检测
    gray_lap = cv2.Laplacian(img, cv2.CV_16S)  # ksize这个参数暂不设为可调
    return cv2.convertScaleAbs(gray_lap)
Пример #18
0
def variance_of_laplacian(image):
    # compute the Laplacian of the image and then return the focus
    # measure, which is simply the variance of the Laplacian
    return cv2.Laplacian(image, cv2.CV_64F).var()
Пример #19
0
        ret, frame = cap.read()

        # Display the resulting frame
        cv2.imshow('frame',frame)

        file_name = "temp/" + str(num) + "capture.jpg"

        # Store frame by frames
        cv2.imwrite(file_name,frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()

# Detect the blur, find most relevent frames
def ret_index()
    val_list = []

    for img in glob.iglob("temp/*.jpg"):

        image = cv2.imread(img)
        val = cv2.Laplacian(image, cv2.CV_64F).var()
        val_list.append(val)

    # find the index of max val
    index = max(range(len(list)), key=list.__getitem__)
    return index
Пример #20
0
        img = np.zeros([subset.shape[0], subset.shape[1], 3], dtype=np.int8)
        img[:, :, 0] = np.multiply(subset, 255)
        img[:, :, 1] = np.multiply(subset, 255)
        img[:, :, 2] = np.multiply(subset, 255)
        img = img.astype(np.uint8)

        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        cv2.imshow('Binary', img)
        kernel = np.ones((math.floor(
            subset.shape[0] * 0.05), math.floor(subset.shape[0] * 0.05)),
                         np.uint8)  # structuring element also called kernel
        opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
        cv2.imshow('Opening', opening)
        closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
        cv2.imshow('Closing', closing)
        laplacian = cv2.Laplacian(closing, cv2.CV_8U)
        borders_index = np.argwhere(laplacian == np.max(laplacian))

        # cv2.waitKey()
        dict = {}
        for i in range(len(borders_index)):
            dict[borders_index[i, 0]] = {'min': 0, 'max': 0, 'array': []}

        for i in range(len(borders_index)):
            dict[borders_index[i, 0]]['array'].append(borders_index[i, 1])
            dict[borders_index[i, 0]]['min'] = np.min(
                dict[borders_index[i, 0]]['array'])
            dict[borders_index[i, 0]]['max'] = np.max(
                dict[borders_index[i, 0]]['array'])

        for key, values in dict.items():
Пример #21
0
def laplacian(img: np.ndarray, type: int):
    """ Method that calculates the Laplacian of the input image.

    This method takes two parameters, the image to calculate the Laplacian of, img, and an int 
    to select the type of Laplacian kernel to use. This is because a laplacian kernel has many
    types. All of which are 3x3 in this example. With 4 positive and negative peak, and 8 based
    positive and negative peak. The selected kernel is then convolved with the input image,
    and displayed in three imshow windows, one for the original input image, one for the output
    Laplacian, and one for the OpenCV version of the Laplacian. The method will wait for any
    key to be pressed to close the windows.

    Parameters
    ----------

    img : np.ndarray
        Image to calculate the Laplacian of.

    type : int
        Type of Laplacian to calculate.
        
            0 (default) = [ 0, 1, 0]
                          [ 1,-4, 1]
                          [ 0, 1, 0]

            1 =           [ 0,-1, 0]
                          [-1, 4,-1]
                          [ 0,-1, 0]

            2 =           [ 1, 1, 1]
                          [ 1,-8, 1]
                          [ 1, 1, 1]

            3 =           [-1,-1,-1]
                          [-1, 8,-1]
                          [-1,-1,-1]
    """

    print("Applying Laplacian kernel number {}".format(type))
    
    # Construct the Laplacian kernels. There are multiple types, as referenced from:
    # R. Fisher, S. Perkins, A. Walker and E. Wolfart. (2003),
    # Laplacian/Laplacian of Gaussian, URL: https://homepages.inf.ed.ac.uk/rbf/HIPR2/log.htm, Accessed 29/05/2020
    laplacian_type = {
        0: np.array(([0, 1, 0],[1, -4, 1],[0, 1, 0]), dtype="int"),
        1: np.array(([0, -1, 0],[-1, 4, -1],[0, -1, 0]), dtype="int"),
        2: np.array(([1, 1, 1],[1, -8, 1],[1, 1, 1]), dtype="int"),
        3: np.array(([-1, -1, -1],[-1, 8, -1],[-1, -1, -1]), dtype="int")
    }

    # Convolve our input image with the laplacian filter
    convolve_output = conv(img, laplacian_type.get(type))

    # Show the original image
    cv2.imshow("Original", gray)

    # Show the image after applying the laplacian filter
    cv2.imshow("Custom Laplacian Filter", convolve_output)

    # Show the OpenCV version of the Laplacian too, for comparison.
    opencv_laplacian = cv2.convertScaleAbs(cv2.Laplacian(img, cv2.CV_32F, ksize=3))
    cv2.imshow("OpenCV Laplacian", opencv_laplacian)

    # Press any key to exit
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Пример #22
0
# -*- coding:utf-8 -*-
# @author :adolf
import cv2
import numpy as np

img = cv2.imread("test_data/gaoda/gao_complete/imgs/IMG_20191119_152848.JPEG")
cv2.imwrite('opencv/ori_img.png', img)

img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # 将图像转化为灰度图像

lap = cv2.Laplacian(img, cv2.CV_64F)  # 拉普拉斯边缘检测
lap = np.uint8(np.absolute(lap))  ##对lap去绝对值

contours, hierarchy = cv2.findContours(lap, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

newImg = np.zeros_like(img)
newImg.fill(255)

# 画图
cv2.drawContours(newImg, contours, -1, (0, 0, 0), 3)
cv2.imwrite("opencv/test.png", newImg)
Пример #23
0
def laplace_segmentation_old(image, threshold=0.52):
    lp = -cv2.Laplacian(image, cv2.CV_64F, ksize=150)
    lp = lp - np.min(lp)
    lp = lp / np.max(lp)
    return lp > threshold
Пример #24
0
"""
LoGフィルタ(sigma=3、カーネルサイズ=5)を実装し、
imori_noise.jpgのエッジを検出せよ。
LoGフィルタとはLaplacian of Gaussianであり、
ガウシアンフィルタで画像を平滑化した後にラプラシアンフィルタで輪郭を取り出すフィルタである。
Laplcianフィルタは二次微分をとるのでノイズが強調されるのを防ぐために、
予めGaussianフィルタでノイズを抑える。
LoGフィルタは次式で定義される。
"""

import cv2
import numpy as np

FILE_PATH = "HashimotoKanna.jpg"
src_img = cv2.imread(FILE_PATH)

gray = cv2.cvtColor(src_img, cv2.COLOR_BGR2GRAY)
kernel_size = (3, 3)
#gasussian filter
Gaussian_img = cv2.GaussianBlur(src_img, ksize=kernel_size, sigmaX=1.3)
LoG_img = cv2.Laplacian(Gaussian_img, -1, ksize=3)
LoG_img = LoG_img.astype(np.float32)
cv2.imshow("LoG Filter", LoG_img)

cv2.waitKey(0)
cv2.destroyAllWindows()
"""
How to use Image Gradients and Edge Detection with OpenCV.
OpenCV provides three types of gradient methods or High-pass filters, Sobel, Scharr and Laplacian.
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt

img = cv2.imread("sudoku.png", cv2.IMREAD_GRAYSCALE)

lap = cv2.Laplacian(img, cv2.CV_64F, ksize=3)
lap = np.uint8(np.absolute(lap))
# convert from float to integer
sobelX = cv2.Sobel(img, cv2.CV_64F, 1, 0)
# 1 is dx and x direction, 0 is dy and y direction. sobelX is change intensity in y direction
sobelY = cv2.Sobel(img, cv2.CV_64F, 0, 1)
# 0 is dx and x direction, 1 is dy and y direction. sobelY is change intensity in x direction

sobelX = np.uint8(np.absolute(sobelX))
sobelY = np.uint8(np.absolute(sobelY))

sobelCombined = cv2.bitwise_or(sobelX, sobelY)

titles = ['image', 'Laplacian', 'sobelX', 'sobelY', 'sobelCombined']
images = [img, lap, sobelX, sobelY, sobelCombined]
for i in range(5):
    plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray')
    plt.title(titles[i])
    plt.xticks([]), plt.yticks([])

plt.show()
Пример #26
0
import cv2 as cv
import sys
import numpy as np

if len(sys.argv) != 2:
    exit(f"Usage: {sys.argv[0]} FILENAME")

filename = sys.argv[1]

original = cv.imread(filename)
print(original.shape)

cv.imshow('Original', original)

gray = cv.cvtColor(original, code=cv.COLOR_BGR2GRAY)
cv.imshow('Gray', gray)

lap = cv.Laplacian(gray, ddepth=cv.CV_64F)
cv.imshow('Laplacian', lap)

lap2 = np.uint8(np.absolute(lap))
cv.imshow('Laplacian 2', lap2)

cv.waitKey(0)
Пример #27
0
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('Capture.PNG', 0)
laplacian = cv2.Laplacian(img, cv2.CV_64F)
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=5)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5)
plt.subplot(2, 2, 1), plt.imshow(img, cmap='gray')
plt.title('Original'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 2), plt.imshow(laplacian, cmap='gray')
plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 3), plt.imshow(sobelx, cmap='gray')
plt.title('Sobel X'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 4), plt.imshow(sobely, cmap='gray')
plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])

plt.show()
Пример #28
0
def infer_plot_and_save_3D_pcl(input_files, output_folder, model_wrappers, image_shape, half, save, stop):
    """
    Process a single input file to produce and save visualization

    Parameters
    ----------
    input_file : list (number of cameras) of lists (number of files) of str
        Image file
    output_file : str
        Output file, or folder where the output will be saved
    model_wrapper : nn.Module
        Model wrapper used for inference
    image_shape : Image shape
        Input image shape
    half: bool
        use half precision (fp16)
    save: str
        Save format (npz or png)
    """
    N_cams = len(input_files)
    N_files = len(input_files[0])

    camera_names = []
    for i_cam in range(N_cams):
        camera_names.append(get_camera_name(input_files[i_cam][0]))

    cams = []
    not_masked = []

    cams_x = []
    cams_y = []
    cams_z = []

    alpha_mask = 0.5

    # change to half precision for evaluation if requested
    dtype = torch.float16 if half else None

    bbox = o3d.geometry.AxisAlignedBoundingBox(min_bound=(-1000, -1000, -1), max_bound=(1000, 1000, 5))

    # let's assume all images are from the same sequence (thus same cameras)
    for i_cam in range(N_cams):
        base_folder_str = get_base_folder(input_files[i_cam][0])
        split_type_str  = get_split_type(input_files[i_cam][0])
        seq_name_str    = get_sequence_name(input_files[i_cam][0])
        camera_str      = get_camera_name(input_files[i_cam][0])

        calib_data = {}
        calib_data[camera_str] = read_raw_calib_files_camera_valeo(base_folder_str, split_type_str, seq_name_str, camera_str)

        cams_x.append(float(calib_data[camera_str]['extrinsics']['pos_x_m']))
        cams_y.append(float(calib_data[camera_str]['extrinsics']['pos_y_m']))
        cams_z.append(float(calib_data[camera_str]['extrinsics']['pos_y_m']))

        path_to_theta_lut = get_path_to_theta_lut(input_files[i_cam][0])
        path_to_ego_mask = get_path_to_ego_mask(input_files[i_cam][0])
        poly_coeffs, principal_point, scale_factors = get_intrinsics(input_files[i_cam][0], calib_data)

        poly_coeffs = torch.from_numpy(poly_coeffs).unsqueeze(0)
        principal_point = torch.from_numpy(principal_point).unsqueeze(0)
        scale_factors = torch.from_numpy(scale_factors).unsqueeze(0)
        pose_matrix = torch.from_numpy(get_extrinsics_pose_matrix(input_files[i_cam][0], calib_data)).unsqueeze(0)
        pose_tensor = Pose(pose_matrix)

        cams.append(CameraFisheye(path_to_theta_lut=[path_to_theta_lut],
                             path_to_ego_mask=[path_to_ego_mask],
                             poly_coeffs=poly_coeffs.float(),
                             principal_point=principal_point.float(),
                             scale_factors=scale_factors.float(),
                             Tcw=pose_tensor))
        if torch.cuda.is_available():
            cams[i_cam] = cams[i_cam].to('cuda:{}'.format(rank()), dtype=dtype)

        ego_mask = np.load(path_to_ego_mask)
        not_masked.append(ego_mask.astype(bool).reshape(-1))

    cams_middle = np.zeros(3)
    cams_middle[0] = (cams_x[0] + cams_x[1] + cams_x[2] + cams_x[3]) / 4
    cams_middle[1] = (cams_y[0] + cams_y[1] + cams_y[2] + cams_y[3]) / 4
    cams_middle[2] = (cams_z[0] + cams_z[1] + cams_z[2] + cams_z[3]) / 4

    # create output dirs for each cam
    seq_name = get_sequence_name(input_files[0][0])
    for i_cam in range(N_cams):
        os.makedirs(os.path.join(output_folder, seq_name, 'depth', camera_names[i_cam]), exist_ok=True)
        os.makedirs(os.path.join(output_folder, seq_name, 'rgb', camera_names[i_cam]), exist_ok=True)



    for i_file in range(0, N_files, 25):

        base_0, ext_0 = os.path.splitext(os.path.basename(input_files[0][i_file]))
        print(base_0)

        images = []
        images_numpy = []
        pred_inv_depths = []
        pred_depths = []
        world_points = []
        input_depth_files = []
        has_gt_depth = []
        input_full_masks = []
        has_full_mask = []
        gt_depth = []
        gt_depth_3d = []
        pcl_full = []
        pcl_only_inliers = []
        pcl_only_outliers = []
        pcl_gt = []
        rgb = []
        viz_pred_inv_depths = []
        great_lap = []
        for i_cam in range(N_cams):
            images.append(load_image(input_files[i_cam][i_file]).convert('RGB'))
            images[i_cam] = resize_image(images[i_cam], image_shape)
            images[i_cam] = to_tensor(images[i_cam]).unsqueeze(0)
            if torch.cuda.is_available():
                images[i_cam] = images[i_cam].to('cuda:{}'.format(rank()), dtype=dtype)

            pred_inv_depths.append(model_wrappers[i_cam].depth(images[i_cam]))
            pred_depths.append(inv2depth(pred_inv_depths[i_cam]))
            pred_depth_copy = pred_depths[i_cam].squeeze(0).squeeze(0).cpu().numpy()
            pred_depth_copy = np.uint8(pred_depth_copy)
            lap = np.uint8(np.absolute(cv2.Laplacian(pred_depth_copy,cv2.CV_64F,ksize=3)))
            great_lap.append(lap < 4)
            great_lap[i_cam] = great_lap[i_cam].reshape(-1)
            images_numpy.append(images[i_cam][0].cpu().numpy())
            images_numpy[i_cam] = images_numpy[i_cam].reshape((3, -1)).transpose()
            images_numpy[i_cam] = images_numpy[i_cam][not_masked[i_cam]*great_lap[i_cam]]

        for i_cam in range(1):
            print(i_cam)
            mix_depths = True
            if mix_depths:
                depths = 200 * torch.ones(1, 3, 800, 1280).cuda()
                depths[0, 1, :, :] = torch.clone(pred_depths[i_cam])[0, 0, :, :]
                not_masked1s = torch.zeros(3, 800, 1280).to(dtype=bool)
                not_masked1 = torch.ones(1, 3, 800, 1280).to(dtype=bool)
                for relative in [0]:
                    print((i_cam + relative) % 4)
                    dd = torch.clone(pred_depths[(i_cam + relative) % 4])
                    #path_to_ego_mask = get_path_to_ego_mask(input_files[(i_cam + relative) % 4][0])
                    #ego_mask = np.load(path_to_ego_mask)
                    #m = torch.from_numpy(ego_mask.astype(bool))
                    #dd[0,0,:,:][~m] = 200

                    #pred_depth_copy = pred_depths[(i_cam + relative) % 4].squeeze(0).squeeze(0).cpu().numpy()


                    relative_points_3d = cams[(i_cam + relative) % 4].reconstruct(dd, frame='w')
                    a = np.zeros((3, 800, 1280))
                    a[0, :, :] = cams_x[i_cam]
                    a[1, :, :] = cams_y[i_cam]
                    a[2, :, :] = cams_z[i_cam]
                    #dists = np.linalg.norm(relative_points_3d[0, :, :, :].cpu().numpy() - a, axis=0)



                    #distances_3d = torch.from_numpy(dists).unsqueeze(0).cuda().float()
                    #distances_3d[0,:500:,:700] = 0
                    #distances_3d = torch.sqrt(torch.sum(torch.pow(relative_points_3d - torch.from_numpy(np.array([cams_x[i_cam], cams_y[i_cam], cams_z[i_cam]])).cuda().unsqueeze(0).unsqueeze(-1).unsqueeze(-1), 2), dim =1)).float()
                    print(relative_points_3d[0,:,20:25,20:25])
                    projected_points_2d = cams[i_cam].project(relative_points_3d, frame='w')
                    print(projected_points_2d.shape)
                    print(projected_points_2d[0, 20:25, 20:25, :])
                    interpolated_3d_points = funct.grid_sample(relative_points_3d, projected_points_2d, mode='bilinear',
                                                               padding_mode='zeros', align_corners=True)
                    print(interpolated_3d_points[0, :, 20:25, 20:25])
                    dists2 = np.linalg.norm(interpolated_3d_points[0, :, :, :].cpu().numpy() - a, axis=0)
                    #print(dists2)
                    # projected_points_2d_small = torch.ones(800, 1280).to(dtype=bool).cuda()
                    # print(projected_points_2d)
                    #
                    #
                    # projected_points_2d_small = projected_points_2d_small * projected_points_2d[0, :, : ,0] < 1
                    # projected_points_2d_small = projected_points_2d_small * projected_points_2d[0, :, :, 1] < 1
                    # projected_points_2d_small = projected_points_2d_small * projected_points_2d[0, :, :, 0] > -1
                    # projected_points_2d_small = projected_points_2d_small * projected_points_2d[0, :, :, 1] > -1

                    #interpolated_distances = funct.grid_sample(distances_3d.unsqueeze(1), projected_points_2d, mode='nearest', padding_mode='border', align_corners=True)
                    depths[0, 1 + relative, :, :] = torch.from_numpy(dists2[:, :]).cuda().float()


                    #not_masked1s[1+relative] = torch.from_numpy(ego_mask.astype(bool))
                    #not_masked1[0,:,:,:] = not_masked1[0,:,:,:]  * not_masked1s[1+relative]
                    #depths[0, 1 + relative, :, :][~not_masked1s[1+relative]] = 500
                    #depths[0, 1 + relative, :, :][~projected_points_2d_small] = 500

                depths[depths == 0] = 500
                reconstructed = cams[i_cam].reconstruct(depths.min(dim=1, keepdim=True)[0], frame='w')
                #reconstructed[~not_masked1] = 0
                world_points.append(reconstructed)
            else:
                world_points.append(cams[i_cam].reconstruct(pred_depths[i_cam], frame='w'))

        for i_cam in range(1):
            world_points[i_cam] = world_points[i_cam][0].cpu().numpy()
            world_points[i_cam] = world_points[i_cam].reshape((3, -1)).transpose()
            world_points[i_cam] = world_points[i_cam][not_masked[i_cam]*great_lap[i_cam]]
            cam_name = camera_names[i_cam]
            cam_int = cam_name.split('_')[-1]
            input_depth_files.append(get_depth_file(input_files[i_cam][i_file]))
            has_gt_depth.append(os.path.exists(input_depth_files[i_cam]))
            if has_gt_depth[i_cam]:
                gt_depth.append(np.load(input_depth_files[i_cam])['velodyne_depth'].astype(np.float32))
                gt_depth[i_cam] = torch.from_numpy(gt_depth[i_cam]).unsqueeze(0).unsqueeze(0)
                if torch.cuda.is_available():
                    gt_depth[i_cam] = gt_depth[i_cam].to('cuda:{}'.format(rank()), dtype=dtype)
                gt_depth_3d.append(cams[i_cam].reconstruct(gt_depth[i_cam], frame='w'))
                gt_depth_3d[i_cam] = gt_depth_3d[i_cam][0].cpu().numpy()
                gt_depth_3d[i_cam] = gt_depth_3d[i_cam].reshape((3, -1)).transpose()
                #gt_depth_3d[i_cam] = gt_depth_3d[i_cam][not_masked[i_cam]]
            else:
                gt_depth.append(0)
                gt_depth_3d.append(0)
            input_full_masks.append(get_full_mask_file(input_files[i_cam][i_file]))
            has_full_mask.append(os.path.exists(input_full_masks[i_cam]))

            pcl_full.append(o3d.geometry.PointCloud())
            pcl_full[i_cam].points = o3d.utility.Vector3dVector(world_points[i_cam])
            if has_full_mask[i_cam]:
                full_mask = np.load(input_full_masks[i_cam])
                mask_colors = label_colors[correspondence[full_mask]].reshape((-1, 3))#.transpose()
                mask_colors = mask_colors[not_masked[i_cam]*great_lap[i_cam]]
                pcl_full[i_cam].colors = o3d.utility.Vector3dVector(alpha_mask * mask_colors + (1-alpha_mask) * images_numpy[i_cam])
            else:
                pcl_full[i_cam].colors = o3d.utility.Vector3dVector(images_numpy[i_cam])

            pcl = pcl_full[i_cam]#.select_by_index(ind)
            points_tmp = np.asarray(pcl.points)
            colors_tmp = images_numpy[i_cam]#np.asarray(pcl.colors)
            # remove points that are above
            mask_height = points_tmp[:, 2] > 1.5# * (abs(points_tmp[:, 0]) < 10) * (abs(points_tmp[:, 1]) < 3)
            mask_colors_blue = np.sum(np.abs(colors_tmp - np.array([0.6, 0.8, 1])), axis=1) < 0.6  # bleu ciel
            mask_colors_green = np.sum(np.abs(colors_tmp - np.array([0.2, 1, 0.4])), axis=1) < 0.8
            mask_colors_green2 = np.sum(np.abs(colors_tmp - np.array([0, 0.5, 0.15])), axis=1) < 0.2
            mask = 1-mask_height*mask_colors_blue
            mask2 = 1-mask_height*mask_colors_green
            mask3 = 1- mask_height*mask_colors_green2
            mask = mask*mask2*mask3
            pcl = pcl.select_by_index(np.where(mask)[0])
            cl, ind = pcl.remove_statistical_outlier(nb_neighbors=7, std_ratio=1.2)
            pcl = pcl.select_by_index(ind)
            pcl = pcl.voxel_down_sample(voxel_size=0.02)
            #if has_full_mask[i_cam]:
            #    pcl.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.2, max_nn=15))
            pcl_only_inliers.append(pcl)#pcl_full[i_cam].select_by_index(ind)[mask])
            if has_gt_depth[i_cam]:
                pcl_gt.append(o3d.geometry.PointCloud())
                pcl_gt[i_cam].points = o3d.utility.Vector3dVector(gt_depth_3d[i_cam])
                gt_inv_depth = 1 / (np.linalg.norm(gt_depth_3d[i_cam] - cams_middle, axis=1) + 1e-6)
                cm = get_cmap('plasma')
                normalizer = .35#np.percentile(gt_inv_depth, 95)
                gt_inv_depth /= (normalizer + 1e-6)
                pcl_gt[i_cam].colors = o3d.utility.Vector3dVector(cm(np.clip(gt_inv_depth, 0., 1.0))[:, :3])
            else:
                pcl_gt.append(0)

        remove_close_points_lidar_semantic = False
        threshold = 0.5
        threshold2 = 0.1
        if remove_close_points_lidar_semantic:
            for i_cam in range(4):
                if has_full_mask[i_cam]:
                    for relative in [-1, 1]:
                        if not has_full_mask[(i_cam + relative) % 4]:
                            dists = pcl_only_inliers[(i_cam + relative) % 4].compute_point_cloud_distance(pcl_only_inliers[i_cam])
                            p1 = pcl_only_inliers[(i_cam + relative) % 4].select_by_index(np.where(np.asarray(dists) > threshold)[0])
                            p2 = pcl_only_inliers[(i_cam + relative) % 4].select_by_index(np.where(np.asarray(dists) > threshold)[0], invert=True).uniform_down_sample(15)#.voxel_down_sample(voxel_size=0.5)
                            pcl_only_inliers[(i_cam + relative) % 4] = p1 + p2
                if has_gt_depth[i_cam]:
                    if has_full_mask[i_cam]:
                        down = 15
                    else:
                        down = 30
                    dists = pcl_only_inliers[i_cam].compute_point_cloud_distance(pcl_gt[i_cam])
                    p1 = pcl_only_inliers[i_cam].select_by_index(np.where(np.asarray(dists) > threshold2)[0])
                    p2 = pcl_only_inliers[i_cam].select_by_index(np.where(np.asarray(dists) > threshold2)[0], invert=True).uniform_down_sample(down)#.voxel_down_sample(voxel_size=0.5)
                    pcl_only_inliers[i_cam] = p1 + p2

        for i_cam2 in range(1):
            for suff in ['', 'bis', 'ter']:
                vis_only_inliers = o3d.visualization.Visualizer()
                vis_only_inliers.create_window(visible = True, window_name = 'inliers'+str(i_file))
                for i_cam in range(1):
                    vis_only_inliers.add_geometry(pcl_only_inliers[i_cam])
                for i, e in enumerate(pcl_gt):
                    if e != 0:
                        vis_only_inliers.add_geometry(e)
                ctr = vis_only_inliers.get_view_control()
                ctr.set_lookat(lookat_vector)
                ctr.set_front(front_vector)
                ctr.set_up(up_vector)
                ctr.set_zoom(zoom_float)
                param = o3d.io.read_pinhole_camera_parameters('/home/vbelissen/Downloads/test/cameras_jsons/test'+str(i_cam2+1)+suff+'.json')
                ctr.convert_from_pinhole_camera_parameters(param)
                opt = vis_only_inliers.get_render_option()
                opt.background_color = np.asarray([0, 0, 0])
                opt.point_size = 4.0
                #opt.light_on = False
                #vis_only_inliers.update_geometry('inliers0')
                vis_only_inliers.poll_events()
                vis_only_inliers.update_renderer()
                if stop:
                    vis_only_inliers.run()
                    pcd1 = pcl_only_inliers[0]+pcl_only_inliers[1]+pcl_only_inliers[2]+pcl_only_inliers[3]
                    for i_cam3 in range(1):
                        if has_gt_depth[i_cam3]:
                            pcd1 += pcl_gt[i_cam3]
                    if i_cam2==0 and suff=='':
                        o3d.io.write_point_cloud(os.path.join(output_folder, seq_name, 'open3d', base_0 + '.pcd'), pcd1)
                #param = vis_only_inliers.get_view_control().convert_to_pinhole_camera_parameters()
                #o3d.io.write_pinhole_camera_parameters('/home/vbelissen/Downloads/test.json', param)
                image = vis_only_inliers.capture_screen_float_buffer(False)
                plt.imsave(os.path.join(output_folder, seq_name, 'pcl',  'normal',  str(i_cam2) + suff, base_0 + '_normal_' + str(i_cam2) + suff + '.png'),
                           np.asarray(image), dpi=1)
                vis_only_inliers.destroy_window()
                del ctr
                del vis_only_inliers
                del opt

        # vis_inliers_outliers = o3d.visualization.Visualizer()
        # vis_inliers_outliers.create_window(visible = True, window_name = 'inout'+str(i_file))
        # for i_cam in range(N_cams):
        #     vis_inliers_outliers.add_geometry(pcl_only_inliers[i_cam])
        #     vis_inliers_outliers.add_geometry(pcl_only_outliers[i_cam])
        # for i, e in enumerate(pcl_gt):
        #     if e != 0:
        #         vis_inliers_outliers.add_geometry(e)
        # ctr = vis_inliers_outliers.get_view_control()
        # ctr.set_lookat(lookat_vector)
        # ctr.set_front(front_vector)
        # ctr.set_up(up_vector)
        # ctr.set_zoom(zoom_float)
        # #vis_inliers_outliers.run()
        # vis_inliers_outliers.destroy_window()
        # for i_cam2 in range(4):
        #     for suff in ['', 'bis', 'ter']:
        #         vis_inliers_cropped = o3d.visualization.Visualizer()
        #         vis_inliers_cropped.create_window(visible = True, window_name = 'incrop'+str(i_file))
        #         for i_cam in range(N_cams):
        #             vis_inliers_cropped.add_geometry(pcl_only_inliers[i_cam].crop(bbox))
        #         for i, e in enumerate(pcl_gt):
        #             if e != 0:
        #                 vis_inliers_cropped.add_geometry(e)
        #         ctr = vis_inliers_cropped.get_view_control()
        #         ctr.set_lookat(lookat_vector)
        #         ctr.set_front(front_vector)
        #         ctr.set_up(up_vector)
        #         ctr.set_zoom(zoom_float)
        #         param = o3d.io.read_pinhole_camera_parameters(
        #             '/home/vbelissen/Downloads/test/cameras_jsons/test' + str(i_cam2 + 1) + suff + '.json')
        #         ctr.convert_from_pinhole_camera_parameters(param)
        #         opt = vis_inliers_cropped.get_render_option()
        #         opt.background_color = np.asarray([0, 0, 0])
        #         vis_inliers_cropped.poll_events()
        #         vis_inliers_cropped.update_renderer()
        #         #vis_inliers_cropped.run()
        #         image = vis_inliers_cropped.capture_screen_float_buffer(False)
        #         plt.imsave(os.path.join(output_folder, seq_name, 'pcl', 'cropped',  str(i_cam2) + suff, base_0 + '_cropped_' + str(i_cam2) + suff + '.png'),
        #                    np.asarray(image), dpi=1)
        #         vis_inliers_cropped.destroy_window()
        #         del ctr
        #         del opt
        #         del vis_inliers_cropped

        #del ctr
        #del vis_full
        #del vis_only_inliers
        #del vis_inliers_outliers
        #del vis_inliers_cropped

        for i_cam in range(N_cams):
            rgb.append(images[i_cam][0].permute(1, 2, 0).detach().cpu().numpy() * 255)
            viz_pred_inv_depths.append(viz_inv_depth(pred_inv_depths[i_cam][0], normalizer=0.8) * 255)
            viz_pred_inv_depths[i_cam][not_masked[i_cam].reshape(image_shape) == 0] = 0
            concat = np.concatenate([rgb[i_cam], viz_pred_inv_depths[i_cam]], 0)
            # Save visualization
            output_file1 = os.path.join(output_folder, seq_name, 'depth', camera_names[i_cam], os.path.basename(input_files[i_cam][i_file]))
            output_file2 = os.path.join(output_folder, seq_name, 'rgb', camera_names[i_cam], os.path.basename(input_files[i_cam][i_file]))
            imwrite(output_file1, viz_pred_inv_depths[i_cam][:, :, ::-1])
            if has_full_mask[i_cam]:
                full_mask = np.load(input_full_masks[i_cam])
                mask_colors = label_colors[correspondence[full_mask]]
                imwrite(output_file2, (1-alpha_mask) * rgb[i_cam][:, :, ::-1] + alpha_mask * mask_colors[:, :, ::-1]*255)
            else:
                imwrite(output_file2, rgb[i_cam][:, :, ::-1])
Пример #29
0
import cv2
import numpy as np

filename = 'image002.jpg'
img = cv2.imread(filename, 0)
equ = cv2.equalizeHist(img)
laplacian = cv2.Laplacian(equ, cv2.CV_64F)
sharp = img - laplacian
blur = cv2.GaussianBlur(sharp, (5, 5), 0)
blur = blur.astype(np.uint8)
cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

circles = cv2.HoughCircles(blur,
                           cv2.HOUGH_GRADIENT,
                           1,
                           80,
                           param1=128,
                           param2=30,
                           minRadius=10,
                           maxRadius=80)

circles = np.uint16(np.around(circles))
for i in circles[0, :]:
    # draw the outer circle
    cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
    # draw the center of the circle
    cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)

cv2.imshow('equ', equ)
cv2.waitKey(0)
cv2.imshow('detected circles', cimg)
Пример #30
0
def laplacian_demo(image):
    dst = cv.Laplacian(image, cv.CV_32F)
    lpls = cv.convertScaleAbs(dst)
    cv.imshow("laplacian_demo", lpls)