row_apple,col_apple, channel_apple  = apple_db.shape
row_orange, col_orange, channel_orange = orange_db.shape

mask = np.zeros((row_apple,col_apple,channel_apple))
mask[:, :int(col_apple/2)] = 1
cv2.imshow('Mask',mask)
cv2.waitKey(0)    
cv2.destroyAllWindows()

########################################################## Direct blending ######################
direct_blend =  mask*apple_db + (1 - mask)*orange_db 
plt.figure()
plt.imshow(direct_blend)
plt.title("Direct Blending")
plt.show()

########################################################## Alpha blending ######################
mask_blur = cv2.GaussianBlur((mask) , (15,15), 15, cv2.BORDER_WRAP)
alpha_blend = mask_blur*apple_db + (1 - mask_blur)*orange_db 
plt.figure()
plt.imshow(alpha_blend)
plt.title("Alpha Blending")
plt.show()

########################################################## Multiresolution blending ######################
reconstructed_image= multiresolution(apple_db,orange_db)
plt.figure()
plt.imshow(reconstructed_image)
plt.title("Multiresolution Blending")
plt.show()
Пример #2
0
def get_contours_dict(channel_image, blur_size):

    # Blur the image to make the ranges smoother
    smoothed_image = cv2.GaussianBlur(channel_image,
                                      ksize=(blur_size, blur_size),
                                      sigmaX=0)

    # Find contours on the smoothed image
    contours_list = []
    # contours_image_debug = np.zeros(current_image.shape, dtype=np.uint8)
    # contours_image_debug[:, :, 1] = 255
    for color_num in range(len(ParamsConfig.ColorRanges) - 1):
        channel_min_value = ParamsConfig.ColorRanges[color_num]
        channel_max_value = ParamsConfig.ColorRanges[color_num + 1]

        # Create the mask using the min and max values obtained from trackbar and apply bitwise and operation to get the results
        image_mask = cv2.inRange(smoothed_image, channel_min_value,
                                 channel_max_value)

        # # find contours in thresholded image, then keep only the largest
        # hierarchy=[Next, Previous, First_Child, Parent]
        contours, hierarchies = cv2.findContours(image_mask.copy(),
                                                 cv2.RETR_TREE,
                                                 cv2.CHAIN_APPROX_SIMPLE)

        # cnts = sorted(cnts, key=lambda x: cv2.contourArea(x), reverse=True)
        # a = sorted(zip(contour_list, hierarchies), key=_get_contour_area_in_tuple, reverse=True)
        # cnts = cnts[:num_contours]

        if len(contours) > 0:
            _contours_dict = {}
            for idx, (contour,
                      hierarchy) in enumerate(zip(contours, hierarchies[0])):
                contour_area = cv2.contourArea(contour)

                is_hole = False
                _hierarchy = hierarchy
                while _hierarchy[3] != -1:
                    is_hole = not is_hole
                    parent_idx = _hierarchy[3]
                    _hierarchy = hierarchies[0][parent_idx]

                # In case this contour is a hole, subtract its area from the parents area
                if is_hole:
                    parent_idx = hierarchy[3]
                    if parent_idx != -1:
                        _contours_dict[parent_idx][
                            ContourKeys.AreaFinal] -= contour_area

                _contours_dict[idx] = {
                    ContourKeys.Contour: contour,
                    ContourKeys.ColorNum: color_num,
                    ContourKeys.AreaFinal: contour_area,
                    ContourKeys.AreaFilled: contour_area,
                    ContourKeys.IsHole: is_hole,
                }
                # contours_dict.append((color_num, contour))

                # hull = cv2.convexHull(cnt)
                # all_contours.append((color_num, hull))

                # epsilon = 0.001 * cv2.arcLength(cnt, True)
                # approx = cv2.approxPolyDP(cnt, epsilon, True)
                # all_contours.append((color_num, approx))

                # cv2.fillPoly(contours_image_debug, pts=pts, color=(color, color, color))
            for idx, obj in _contours_dict.items():
                is_hole = obj[ContourKeys.IsHole]

                if is_hole is False:
                    contours_list.append(obj)

                    # contour = obj[ContourKeys.Contour]
                    # area_final = obj[ContourKeys.AreaFinal]
                    # area_filled = obj[ContourKeys.AreaFilled]
                    # pts = [np.array(contour)]
                    # if is_hole:
                    #     cv2.fillPoly(contours_image_debug, pts=pts, color=(0, 0, 0))
                    # else:
                    #     cv2.fillPoly(contours_image_debug, pts=pts, color=(255, 255, 255))
                    # image_title = f"{color_num}.{idx}.{is_hole}, Area={int(area_final):,}({int(area_filled):,})"
                    # cv2.imshow(image_title, contours_image_debug)  # Show the results
                    # k = cv2.waitKey(1) & 0xFF
                    # # while True:
                    # if k == ord("b"):
                    #     break
    sorted_contours = sorted(contours_list,
                             key=lambda x: x[ContourKeys.AreaFinal],
                             reverse=True)
    # sorted_contours = sorted_contours[:num_contours]
    return sorted_contours
"""
#--------------------------------------------------------------------#


conf = json.load(open("conf.json"))
print(conf["input"])
cap = cv2.VideoCapture(conf["input"])
print(cap)
avg = None

while(True):
	ret, frame = cap.read()
	#print(ret)
	frame = cv2.resize(frame, (640,480))
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	gray = cv2.GaussianBlur(gray, (21, 21), 0)
	text = "Normal"

	# if the average frame is None, initialize it
	if avg is None:
		print("[INFO] starting background model...")
		avg = gray.copy().astype("float")
		continue
	cv2.accumulateWeighted(gray, avg, 0.7)
	frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

	thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255, cv2.THRESH_BINARY)[1]
	thresh = cv2.dilate(thresh, None, iterations=2)
	#cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
	_, contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
	#cnts = imutils.grab_contours(cnts)
Пример #4
0
flann=cv2.FlannBasedMatcher(indexParams,searchParams)

ratio_l=[]
vis_l=[]
applno_l=[]
result = []


outputPath='./Output/' #path of database
tmpfiles = os.listdir(outputPath)
for f in tmpfiles:
    os.remove('./Output/'+str(f))

sampleImage = cv2.imread(samplePath,0)
sampleImage1 = imutils.resize(sampleImage, width = 300)
sampleImage1 = cv2.GaussianBlur(sampleImage1, (5, 5), 0)
kp1_1, des1_1 = sift.detectAndCompute(sampleImage1, None) #detect the features of sample

sampleImage = cv2.imread(samplePath,0)
sampleImage2 = imutils.resize(sampleImage, width = 300)
sampleImage2 = cv2.GaussianBlur(sampleImage2, (1, 1), 0)
ret,sampleImage2 = cv2.threshold(sampleImage2,220,255,cv2.THRESH_BINARY)
#sampleImage = cv2.Canny(sampleImage, 30, 150)
kp1_2, des1_2 = sift.detectAndCompute(sampleImage2, None) #detect the features of sample
index = 0
for t in tmark_l:
    index = index + 1
    print(index)
    f = t['file']
    queryImage=cv2.imread(f,0)
    try:
def main():
    imgTrainingNumbers = cv2.imread("training_chars.png")            # read in training numbers image

    if imgTrainingNumbers is None:                          # if image was not read successfully
        print("error: image not read from file \n\n")        # print error message to std out
        os.system("pause")                                  # pause so user can see error message
        return                                              # and exit function (which exits program)
    # end if

    imgGray = cv2.cvtColor(imgTrainingNumbers, cv2.COLOR_BGR2GRAY)          # get grayscale image
    imgBlurred = cv2.GaussianBlur(imgGray, (5,5), 0)                        # blur

                                                        # filter image from grayscale to black and white
    imgThresh = cv2.adaptiveThreshold(imgBlurred,                           # input image
                                      255,                                  # make pixels that pass the threshold full white
                                      cv2.ADAPTIVE_THRESH_GAUSSIAN_C,       # use gaussian rather than mean, seems to give better results
                                      cv2.THRESH_BINARY_INV,                # invert so foreground will be white, background will be black
                                      11,                                   # size of a pixel neighborhood used to calculate threshold value
                                      2)                                    # constant subtracted from the mean or weighted mean

    cv2.imshow("imgThresh", imgThresh)      # show threshold image for reference

    imgThreshCopy = imgThresh.copy()        # make a copy of the thresh image, this in necessary b/c findContours modifies the image

    imgContours, npaContours, npaHierarchy = cv2.findContours(imgThreshCopy,        # input image, make sure to use a copy since the function will modify this image in the course of finding contours
                                                 cv2.RETR_EXTERNAL,                 # retrieve the outermost contours only
                                                 cv2.CHAIN_APPROX_SIMPLE)           # compress horizontal, vertical, and diagonal segments and leave only their end points

                                # declare empty numpy array, we will use this to write to file later
                                # zero rows, enough cols to hold all image data
    npaFlattenedImages =  np.empty((0, RESIZED_IMAGE_WIDTH * RESIZED_IMAGE_HEIGHT))

    intClassifications = []         # declare empty classifications list, this will be our list of how we are classifying our chars from user input, we will write to file at the end

                                    # possible chars we are interested in are digits 0 through 9, put these in list intValidChars
    intValidChars = [ord('0'), ord('1'), ord('2'), ord('3'), ord('4'), ord('5'), ord('6'), ord('7'), ord('8'), ord('9'),
                     ord('A'), ord('B'), ord('C'), ord('D'), ord('E'), ord('F'), ord('G'), ord('H'), ord('I'), ord('J'),
                     ord('K'), ord('L'), ord('M'), ord('N'), ord('O'), ord('P'), ord('Q'), ord('R'), ord('S'), ord('T'),
                     ord('U'), ord('V'), ord('W'), ord('X'), ord('Y'), ord('Z')]

    for npaContour in npaContours:                          # for each contour
        if cv2.contourArea(npaContour) > MIN_CONTOUR_AREA:          # if contour is big enough to consider
            [intX, intY, intW, intH] = cv2.boundingRect(npaContour)         # get and break out bounding rect

                                                # draw rectangle around each contour as we ask user for input
            cv2.rectangle(imgTrainingNumbers,           # draw rectangle on original training image
                          (intX, intY),                 # upper left corner
                          (intX+intW,intY+intH),        # lower right corner
                          (0, 0, 255),                  # red
                          2)                            # thickness

            imgROI = imgThresh[intY:intY+intH, intX:intX+intW]                                  # crop char out of threshold image
            imgROIResized = cv2.resize(imgROI, (RESIZED_IMAGE_WIDTH, RESIZED_IMAGE_HEIGHT))     # resize image, this will be more consistent for recognition and storage

            cv2.imshow("imgROI", imgROI)                    # show cropped out char for reference
            cv2.imshow("imgROIResized", imgROIResized)      # show resized image for reference
            cv2.imshow("training_numbers.png", imgTrainingNumbers)      # show training numbers image, this will now have red rectangles drawn on it

            intChar = cv2.waitKey(0)                     # get key press

            if intChar == 27:                   # if esc key was pressed
                sys.exit()                      # exit program
            elif intChar in intValidChars:      # else if the char is in the list of chars we are looking for . . .

                intClassifications.append(intChar)                                                # append classification char to integer list of chars (we will convert to float later before writing to file)

                npaFlattenedImage = imgROIResized.reshape((1, RESIZED_IMAGE_WIDTH * RESIZED_IMAGE_HEIGHT))  # flatten image to 1d numpy array so we can write to file later
                npaFlattenedImages = np.append(npaFlattenedImages, npaFlattenedImage, 0)                    # add current flattened impage numpy array to list of flattened image numpy arrays
            # end if
        # end if
    # end for

    fltClassifications = np.array(intClassifications, np.float32)                   # convert classifications list of ints to numpy array of floats

    npaClassifications = fltClassifications.reshape((fltClassifications.size, 1))   # flatten numpy array of floats to 1d so we can write to file later

    print ("\n\ntraining complete !!\n")

    np.savetxt("classifications.txt", npaClassifications)           # write flattened images to file
    np.savetxt("flattened_images.txt", npaFlattenedImages)          #

    cv2.destroyAllWindows()             # remove windows from memory

    return
Пример #6
0
def sketch(image):
       gray_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
       blur= cv2.GaussianBlur(gray_image,ksize=(3,3),sigmaX=0,sigmaY=0)
       edge = cv2.Canny(blur,5,70)
       ret,th = cv2.threshold(edge,100,255,cv2.THRESH_BINARY_INV)
       return th
Пример #7
0
##### first
mask = cv2.threshold(gray, 255, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
stats = cv2.connectedComponentsWithStats(mask, 8)[2]
label_area = stats[1:, cv2.CC_STAT_AREA]

min_area, max_area = 150, 200 # min/max for a single circle
singular_mask = (min_area < label_area) & (label_area <= max_area)
circle_area = np.mean(label_area[singular_mask])

#this is should be the number of circles retreived by the treshold but I am not sure about
n_circles = int(np.sum(np.round(label_area / circle_area)))

print('Total circles:', n_circles)

##### second findContours
blur = cv2.GaussianBlur(gray, (7, 7), 2)
ret,thresh = cv2.threshold(gray,127,255,1)
contours,h = cv2.findContours(thresh,1,2)

print(' Lenght contours ', len(contours) )
#imCopy = imgArray.copy()
#ret,thresh = cv2.threshold(imgArray,127,255,0)from skimage import morphology

##image, contours, hierarchy =  cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

#cv2.drawContours(imCopy,contours,-1,(0,255,0), 3)
#imCopy = cv2.resize(imCopy, (2000, 1500))
#cv2.imshow('draw contours',imCopy)
#cv2.waitKey(0)
index = 1
Пример #8
0
                    os.mkdir(log1Path + "/" + file_name + "/" + folder1 + "/" +
                             folder2)
                    os.mkdir(log05Path + "/" + file_name + "/" + folder1 +
                             "/" + folder2)
                    os.mkdir(dogPath + "/" + file_name + "/" + folder1 + "/" +
                             folder2)

                img = cv2.imread(imagePath, 0)

                # HE
                HE = cv2.equalizeHist(img)
                # LoG
                LoG05 = ndimage.gaussian_laplace(img, sigma=0.5)
                LoG1 = ndimage.gaussian_laplace(img, sigma=1)
                # DoG
                blur5 = cv2.GaussianBlur(img, (5, 5), 0)
                blur3 = cv2.GaussianBlur(img, (3, 3), 0)
                DoG = blur5 - blur3
                # SIFT
                ''' descs= None
                    sift = cv2.xfeatures2d.SIFT_create()
                    (kps, descs) = sift.detectAndCompute(img, None)'''

                # Paths
                siftImage = siftPath + "/" + file_name + "/" + folder1 + "/" + folder2 + imagePath[
                    -8:-4] + "SIFT.txt"
                heImage = hePath + "/" + file_name + "/" + folder1 + "/" + folder2 + imagePath[
                    -8:-4] + "HE.png"
                log1Image = log1Path + "/" + file_name + "/" + folder1 + "/" + folder2 + imagePath[
                    -8:-4] + "LoG1.png"
                log05Image = log05Path + "/" + file_name + "/" + folder1 + "/" + folder2 + imagePath[
Пример #9
0
from skimage.color import rgb2gray, gray2rgb
from skimage.filters import difference_of_gaussians
import cv2
from sklearn.utils import shuffle
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
               'dog', 'frog', 'horse', 'ship', 'truck']


train_imagesClear=np.copy(train_images)
for i in range(len(train_images)):
#     gauss = np.random.normal(0,.1,(32,32,3))
#     gauss = gauss.reshape(32,32,3)
#     image=(train_imagesBlur[i]+gauss)
#     image=np.clip(image, 0, 1)
    image=train_imagesClear[i]
    image= cv2.GaussianBlur(image,(3,3),0)
    image= rgb2gray(image)
    image= gray2rgb(image)
    train_imagesClear[i]=image

train_imagesNoisy=np.copy(train_images)
# for i in range(len(train_images)):
#     gauss = np.random.normal(0,.1,(32,32,3))
#     gauss = gauss.reshape(32,32,3)
#     image=(train_imagesNoisy[i]+gauss)
#     image=np.clip(image, 0, 1)
#     train_imagesNoisy[i]=image

train_imagesGray=np.copy(train_images)
for i in range(len(train_images)):
    image=train_imagesGray[i]
Пример #10
0
#!/usr/bin/python3

import cv2
import matplotlib.pyplot as plt

img = cv2.imread("images/rose.png")
img_blur = cv2.GaussianBlur(img, (5, 5), 0)

plt.imshow(img_blur)
plt.show()
Пример #11
0
                     50: 0.2,
                     70: 0.4,
                     90: 0.2,
                     100: 0.1
                 },
                 input_noise_std=0.03,
                 plot_every=0,
                 device='cuda'),
    transforms.ToTensor(),
    transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])

gaussian_blur_transform = transforms.Compose([
    transforms.RandomResizedCrop(32),
    transforms.Lambda(lambda img: pil_to_np(img)),
    transforms.Lambda(lambda img: cv2.GaussianBlur(img, (5, 5), 0)),
    transforms.Lambda(lambda img: np_to_pil(img)),
    transforms.ToTensor(),
    transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])

random_blur_transform = transforms.Compose([
    transforms.RandomResizedCrop(32),
    transforms.Lambda(lambda img: pil_to_np(img)),
    transforms.RandomChoice([
        transforms.Lambda(lambda img: cv2.blur(img, (5, 5))),
        transforms.Lambda(lambda img: cv2.GaussianBlur(img, (5, 5), 0)),
        transforms.Lambda(lambda img: cv2.medianBlur(img, 5)),
    ]),
    transforms.Lambda(lambda img: np_to_pil(img)),
    transforms.ToTensor(),
Пример #12
0
def find_banana(image, ruta='resultados'):
    #Se invierte el esquema RGB a BGR, ya que las funciones son más compatibles
    #teniendo el azul con más relevancia
    # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    # cv2.imwrite('resultados/bgr.jpg', image)

    #Un tamaño fijo
    #con la dimension mas grande
    max_dimension = max(image.shape)
    #La escala de la imagen de salida no será mayor a 700px
    scale = 700 / max_dimension
    #Se redimenciona la imagen para que sea cuadrada.
    image = cv2.resize(image, None, fx=scale, fy=scale)

    #Reducimos el ruido de la imagen usando el filtro Gaussiano, con la escala
    #maxima cuadrada.
    # image_blur = cv2.bilateralFilter(image,9,75,75)
    image_blur = cv2.GaussianBlur(image, (7, 7), 0)
    cv2.imwrite(ruta + '/blur.jpg', image_blur)

    #Tratamos de enfocarnos en el color, y por esta razón nos enfocamos en
    #el esquema HSV, pues resalta el color y maneja solo saturacion y
    #valor
    image_blur_hsv = cv2.cvtColor(image_blur, cv2.COLOR_RGB2HSV)
    cv2.imwrite(ruta + '/hsv.jpg', image_blur_hsv)

    #kernel = np.ones((5,5),np.uint8)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    erosion = cv2.erode(image_blur_hsv, kernel, iterations=1)
    cv2.imwrite(ruta + '/erosionado.jpg', erosion)
    dilation = cv2.dilate(image_blur_hsv, kernel, iterations=1)
    cv2.imwrite(ruta + '/dilatado.jpg', dilation)
    dilation_blur = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, kernel)
    cv2.imwrite(ruta + '/dilatado_blur.jpg', dilation_blur)

    # Filtro por color
    # 20-30 hue
    """Aqui tenemos un problema, pues decidimos colocar un rango de amarillos
    pero no lo reconoce en la imagen y tenemos dificultad al reconocer este
    rango, ya que a veces toma colores que no debe. Según el tono es de
    50 a 70 para amarillos"""
    #hsv(15, 80, 50)
    #hsv(105, 120, 255)
    min_yellow = np.array([15, 100, 80])
    max_yellow = np.array([105, 255, 255])
    # min_yellow = np.array([20, 100, 80])
    # max_yellow = np.array([30, 255, 255])
    #layer
    mask1 = cv2.inRange(dilation_blur, min_yellow, max_yellow)

    #hsv(230, 0, 0)
    #hsv(270, 255, 255)
    black_min = np.array([130, 0, 0])
    black_max = np.array([170, 255, 255])
    black_mask = cv2.inRange(dilation_blur, black_min, black_max)
    cv2.imwrite(ruta + '/mascara_negro.jpg', black_mask)

    #Filtro por brillo
    # 170-180 hue
    #Tratamos de resaltar el brillo para tener un mejor reconocimiento de
    #colores.
    #hsv(170,100,80)
    #hsv(180,255,255)
    min_yellow2 = np.array([170, 100, 80])
    max_yellow2 = np.array([180, 255, 255])
    mask2 = cv2.inRange(dilation_blur, min_yellow2, max_yellow2)
    cv2.imwrite(ruta + '/mascara1.jpg', mask1)
    cv2.imwrite(ruta + '/mascara2.jpg', mask2)

    #Combinamos las mascaras de colores.
    mask = mask1 + mask2 + black_mask
    cv2.imwrite(ruta + '/mask.jpg', mask)
    # opening = cv2.morphologyEx(dilation, cv2.MORPH_OPEN, kernel)
    # cv2.imwrite('resultados/opening.jpg', opening)

    # Se limpia la imagen y se crea la elipse.

    #Se erosiona la imagen para reducir espacios sin color. Y luego se dilata,
    #Esto dentro de lo que buscamos encerrar.
    mask_closed = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
    mask_closed = cv2.dilate(mask_closed, kernel, iterations=3)
    # mask_closed = cv2.dilate(mask_closed, kernel, iterations = 1)
    # mask_closed = cv2.morphologyEx(mask_closed, cv2.MORPH_CLOSE, kernel)
    cv2.imwrite(ruta + '/closed.jpg', mask_closed)
    #Se dilata para reducr ruido afuera de lo que identificamos, y luego se erosiona.
    mask_clean = cv2.morphologyEx(mask_closed, cv2.MORPH_OPEN, kernel)
    cv2.imwrite(ruta + '/open.jpg', mask_clean)

    # Se encuentra el mejor patron y se recibe el contorno
    big_banana_contour, mask_bananas = find_biggest_contour(mask_clean)

    # Se resalta la mascara limpia y se aclara en la imagen.
    overlay = overlay_mask(mask_clean, image)
    cv2.imwrite(ruta + '/overlay.jpg', overlay)

    #Se circula el patron con mejor coincidencia.
    circled, cropped = circle_contour(image, big_banana_contour, ruta)

    #Y convertimos al esquema de original de colores.
    cropped = cv2.cvtColor(cropped, cv2.COLOR_BGR2RGB)

    return circled, cropped
Пример #13
0
        raise NotImplementedError("noise type {} not supported!".format(noise_type))


# Image Blurring (Image Smoothing)

noise_type_list = ["", "gauss", "s&p", "poisson", "speckle"]
for i, noise_type in enumerate(noise_type_list, start=1):
    if noise_type == "":
        src_title = "Original"
        src_img = img
    else:
        src_title = noise_type
        src_img = add_noise(img, noise_type=noise_type)

    averaging_blur = cv.blur(src_img, (5, 5))
    gassian_blur = cv.GaussianBlur(src_img, (5, 5), 0)
    median_blur = cv.medianBlur(src_img, 5)
    bilateral_filtering = cv.bilateralFilter(src_img, 9, 75, 75)

    plt.subplot(1, 5, 1), plt.imshow(cv.cvtColor(src_img, cv.COLOR_BGR2RGB)), plt.title(src_title)
    plt.xticks([]), plt.yticks([])
    plt.subplot(1, 5, 2), plt.imshow(cv.cvtColor(averaging_blur, cv.COLOR_BGR2RGB)), plt.title("Averaging Blurred")
    plt.xticks([]), plt.yticks([])
    plt.subplot(1, 5, 3), plt.imshow(cv.cvtColor(gassian_blur, cv.COLOR_BGR2RGB)), plt.title("Gaussian Blurred")
    plt.xticks([]), plt.yticks([])
    plt.subplot(1, 5, 4), plt.imshow(cv.cvtColor(median_blur, cv.COLOR_BGR2RGB)), plt.title("Median Blurred")
    plt.xticks([]), plt.yticks([])
    plt.subplot(1, 5, 5), plt.imshow(cv.cvtColor(bilateral_filtering, cv.COLOR_BGR2RGB)), plt.title(
        "Bilateral Filtering"
    )
    plt.xticks([]), plt.yticks([])
Пример #14
0
found_images = np.empty((len(file_class_mapping), 32, 32, 3), dtype=np.uint8)

num_cols = 5
num_rows = math.ceil(len(file_class_mapping)/num_cols)
subplot_setup(num_cols, num_rows)

for index, (filename, class_number) in enumerate(file_class_mapping.items()):
    print(index, filename, class_number)
    found_images[index] = cv2.imread(file_dir + "/" + filename, cv2.IMREAD_COLOR)
    found_images[index] = cv2.cvtColor(found_images[index], cv2.COLOR_BGR2RGB)
    subplot_next(found_images[index])

plt.show()

cv2.GaussianBlur(found_images[0], (5, 5), 0)

found_images_gray = convert_to_gray(found_images)

# Poor-man's normalization.
found_images_gray = (found_images_gray-128)/128

subplot_setup(num_cols, num_rows)

for image in found_images_gray:
    subplot_next(image)

plt.show()

print('done')
Пример #15
0
cap = cv2.VideoCapture(
    "forest5.avi")  # If you want to use webcam use Index like 0,1.
print("video read successssssss", "\n")
fps = int(cap.get(cv2.CAP_PROP_FPS))
print("FPS is ", fps)
length = cap.get(7)
print("Frame count is ", length)

while True:
    (grabbed, frame) = cap.read()
    if not grabbed:
        break

    frame = cv2.resize(frame, (320, 240))

    blur = cv2.GaussianBlur(frame, (21, 21), 0)
    hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
    #Image.fromarray(hsv).save('hsv_frame_testing.jpg')

    lower = [0, 10, 165]
    upper = [30, 255, 255]
    lower = np.array(lower, dtype="uint8")
    upper = np.array(upper, dtype="uint8")

    mask = cv2.inRange(hsv, lower, upper)

    output = cv2.bitwise_and(frame, hsv, mask=mask)

    no_of_red = cv2.countNonZero(mask)

    if int(no_of_red) > 1000:
Пример #16
0
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os, sys

OriginalImage = cv2.imread('hand.jpg', 0)  # get the a gray-scale image
plt.figure(1)
plt.subplot(221), plt.imshow(OriginalImage, 'gray')

GaussianImage = cv2.GaussianBlur(OriginalImage, (3, 3), 0)
plt.subplot(222), plt.imshow(GaussianImage, 'gray')

ret, thrsh_GaussianImage = cv2.threshold(GaussianImage, 225, 255,
                                         cv2.THRESH_BINARY)
plt.subplot(223), plt.imshow(thrsh_GaussianImage, 'gray')
plt.show()

im, contours, hierarchy = cv2.findContours(thrsh_GaussianImage, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(thrsh_GaussianImage, contours, -1, (0, 255, 0), 3)
cv2.imshow("j", im)
cv2.waitKey()
Пример #17
0
		img = img[i1_start:i1_stop, i2_start:i2_stop,:] #Retain the central 1280x1280 sub-region to create 25 central patches later
		cv2.imwrite('img_CV2_70.jpg', img[:,:,1], [int(cv2.IMWRITE_JPEG_QUALITY), 70]) #JPEG QF=70
		tmprs = misc.imresize(img[:,:,1], 1.5) #resampling with scaling 150% of the green channel
		i1_start = (tmprs.shape[0]-n)/2 #Find the new coordinates of the central 1280x1280 sub-region 
		i1_stop = i1_start + n
		i2_start = (tmprs.shape[1]-m)/2
		i2_stop = i2_start + m
		tmprs = tmprs[i1_start:i1_stop, i2_start:i2_stop] #Retain the central 1280x1280 image patch
		tmpj= cv2.imread('img_CV2_70.jpg')
		os.remove('img_CV2_70.jpg') #Delete the saved JPEG image from dresden images folder
		tmp = img[:(img.shape[0]/256)*256,:(img.shape[1]/256)*256,1] #Retain the green channel
		tmprs = tmprs[:(tmprs.shape[0]/256)*256,:(tmprs.shape[1]/256)*256]
		tmpj = tmpj[:(tmpj.shape[0]/256)*256,:(tmpj.shape[1]/256)*256,1] #Retain the green channel
		del(img)
		tmpm = cv2.medianBlur(tmp,5) #Median filtering with 5x5 kernel
		tmpg = cv2.GaussianBlur(tmp,(5,5),0) #Gaussian blur with default sigma=1.1 and kernel size 5x5
		awgn = 2.0*np.random.randn(tmp.shape[0],tmp.shape[1]) #Additive While Gaussian Noise with sigma=2
		tmpw = (tmp+awgn)
		tmpw = np.clip(tmpw,0,255) #Keep image pixel values within [0,255] range
		tmpw = tmpw.astype(np.uint8)
		vblocks = np.vsplit(tmp, tmp.shape[0]/256) #split image patch into vertical blocks
		shuffle(vblocks)
		vblocks = vblocks[:len(vblocks)]
		imcount = 0
		for v in vblocks:
			hblocks = np.hsplit(v, v.shape[1]/256) #split each vertical block into horizantal blocks
			shuffle(hblocks)
			hblocks = hblocks[:len(hblocks)]
			for h in hblocks:
				X[count-1] = h.reshape((1,1,256,256))
				y[count-1] = 0
Пример #18
0
    cv2.putText(frame_rev, str(park['id']), centroid, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)


while (cap.isOpened()):
    current_count = 0
    video_cur_pos = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0  # Vị trí hiện tại của video file tính theo giây
    video_cur_frame = cap.get(cv2.CAP_PROP_POS_FRAMES)  # Vị trí tính theo frame
    ret, frame_initial = cap.read()
    if ret == True:
        frame = cv2.resize(frame_initial, None, fx=0.6, fy=0.6)
    if ret == False:
        print("Video ended")
        break

    # Background Subtraction
    frame_blur = cv2.GaussianBlur(frame.copy(), (5, 5), 3)
    frame_gray = cv2.cvtColor(frame_blur, cv2.COLOR_BGR2GRAY)
    frame_out = frame.copy()

    # Hiển thị số frame trên góc trái video
    if dict['text_overlay']:
        str_on_frame = "%d/%d" % (video_cur_frame, video_info['num_of_frames'])
        cv2.putText(frame_out, str_on_frame, (5, 30), cv2.FONT_HERSHEY_SIMPLEX,
                    0.8, (0, 255, 255), 2, cv2.LINE_AA)
        cv2.putText(frame_out, global_str + str(round(change_pos, 2)) + 'sec', (5, 60), cv2.FONT_HERSHEY_SIMPLEX,
                    0.8, (255, 0, 0), 2, cv2.LINE_AA)

    # motion detection cho mọi objects
    if dict['motion_detection']:
        fgmask = fgbg.apply(frame_blur)
        bw = np.uint8(fgmask == 255) * 255
Пример #19
0
def main():

    # initialize frame dimensions, resized frame dimensions, and the ratio between them
    (W, H) = (None, None)
    (newW, newH) = (320, 320)  # needs to be multiples of 32
    (rW, rH) = (None, None)

    # layer names that will be used, one for output probabilities and the other for box coordinates
    layerNames = ["feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"]

    # laod the text detector
    net = cv2.dnn.readNet('frozen_east_text_detection.pb')

    # start the video stream
    vs = VideoStream(src=0).start()
    #vs = cv2.VideoCapture('test_video.mp4')

    # start fps calculator
    fps = FPS().start()

    fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
    out = cv2.VideoWriter('video_demo.avi', fourcc, 10, (1000, 562))

    # video processing loop
    while True:
        frame = vs.read()  # get a frame
        frame = frame[1]
        if frame is None:
            break

        # resize the frame
        frame = imutils.resize(frame, width=1000)
        frame_original = frame.copy()

        if W is None or H is None:
            (H, W) = frame.shape[:2]
            print(H, W)
            rW = W / float(newW)
            rH = H / float(newH)

        frame = cv2.resize(frame, (newW, newH))

        # create a blob and use it for text detection
        blob = cv2.dnn.blobFromImage(frame,
                                     1.0, (newW, newH),
                                     (123.68, 116.78, 103.94),
                                     swapRB=True,
                                     crop=False)
        net.setInput(blob)
        (scores, geometry) = net.forward(layerNames)

        # get bounding boxes for predictions that pass the minimum confidence requirement
        (rects, confidences) = decode_predictions(scores, geometry)
        # eliminate unnecessary overlapping boxes
        boxes = non_max_suppression(np.array(rects), probs=confidences)

        # loop over each box
        for (startX, startY, endX, endY) in boxes:
            # 2 px buffer added to each side of each box
            startX = int(startX * rW - 2)
            startY = int(startY * rH - 2)
            endX = int(endX * rW + 2)
            endY = int(endY * rH + 2)

            cv2.rectangle(frame_original, (startX, startY), (endX, endY),
                          (0, 255, 0), 2)
            # crop the image to just the bounding box
            image_cropped = frame_original[startY:endY, startX:endX]
            # uncomment the line below to detect black text instead of white text
            #image_cropped = cv2.bitwise_not(image_cropped)
            # attempt preprocessing on the image
            try:
                image_gray = cv2.cvtColor(image_cropped, cv2.COLOR_BGR2GRAY)
            except:
                continue
            image_blur = cv2.GaussianBlur(image_gray, (1, 1), 0)
            # thresh, image_thresh = cv2.threshold(image_blur, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
            thresh, image_thresh = cv2.threshold(image_blur, 150, 255,
                                                 cv2.THRESH_BINARY)
            # print(pytesseract.image_to_string(image_thresh, config='-c tessedit_char_whitelist=0123456789 -psm 7'))
            # detect text in the cropped image after preprocessing is done
            text = pytesseract.image_to_string(image_thresh, config='-psm 7')
            cv2.putText(frame_original,
                        text, (endX, endY),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        1.0, (0, 255, 0),
                        lineType=cv2.LINE_AA)
        # show the frame with bounding boxes drawn
        cv2.imshow("Text Detection", frame_original)
        out.write(frame_original)

        # if q is pressed, quit
        key = cv2.waitKey(1) & 0xFF

        if key == ord("q"):
            break

    fps.stop()
    print("Elapsed Time: ", fps.elapsed())
    print("FPS: ", fps.fps())

    # stop the webcam and destroy all windows
    #vs.stop()
    vs.release()
    out.release()
    cv2.destroyAllWindows()
    # commencing subtraction
    while True:
        try:
            # fetching each frame
            flag, frame = capture.read()

            if frame is None:
                break
            # Display camera input
            image = frame
            #define the processing reagion
            crop_img = frame[450:700, 450:780]

            # convert to grayscale, gaussian blur, and threshold
            gray = cv.cvtColor(crop_img, cv.COLOR_BGR2GRAY)
            blur = cv.GaussianBlur(gray, (5, 5), 0)
            ret, th = cv.threshold(blur, 60, 255, cv.THRESH_BINARY_INV)
            edged = cv.Canny(blur, 85, 85)

            # Erode to eliminate noise, Dilate to restore eroded parts of image
            mask1 = cv.erode(th, None, iterations=2)
            mask = cv.dilate(mask1, None, iterations=2)

            # Find all contours in frame
            contours, hierarchy = cv.findContours(th.copy(), 1,
                                                  cv.CHAIN_APPROX_NONE)
            # Find x-axis centroid of largest contour and cut power to appropriate motor
            # to recenter camera on centroid.
            if len(contours) > 0:
                c = max(contours, key=cv.contourArea)
                M = cv.moments(c)
Пример #21
0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#@TIME  :2020/5/19 22:24
#@Author:Michael.ma
# Chapter-2
# 灰度,高斯模糊,边缘,扩张,侵蚀

import cv2
import numpy as np

img = cv2.imread("Resources/lena.png")
kernel = np.ones((5, 5), np.uint8)

# 灰度
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 高斯模糊
imgBlur = cv2.GaussianBlur(imgGray, (7, 7), sigmaX=0)
# 边缘
imgCanny = cv2.Canny(img, 150, 150)
# 扩张
imgDilation = cv2.dilate(imgCanny, kernel, iterations=1)
# 侵蚀
imgEroded = cv2.erode(imgDilation, kernel, iterations=1)

cv2.imshow("Gray", imgGray)
cv2.imshow("Blur", imgBlur)
cv2.imshow('Canny', imgCanny)
cv2.imshow('Dilation', imgDilation)
cv2.imshow('Eroded', imgEroded)

cv2.waitKey(0)
Пример #22
0
def canny(img):
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    kernel = 5
    blur = cv2.GaussianBlur(gray, (kernel, kernel), 0)
    canny = cv2.Canny(blur, 50, 150)
    return canny
Пример #23
0
 def __blur(img, sigma):
     if sigma > 0.01:
         img[0] = cv2.GaussianBlur(img[0], (7, 7), sigma)
         img[1] = cv2.GaussianBlur(img[1], (7, 7), sigma)
         img[2] = cv2.GaussianBlur(img[2], (7, 7), sigma)
     return img
Пример #24
0
            #print ("Straight")
            valServo = int(degree)
            #print (valServo)
            outServo = valServo
        return outServo
    else:
        #reverse
        print ("REVERSE")

cap = cv2.VideoCapture(0)
#print (cap)
while(True):
    ret, frame = cap.read()
    #print(frame.shape)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    blur = cv2.GaussianBlur(hsv,(15,15),0)

    low_yellow = np.array([20, 110, 180 ])
    high_yellow = np.array ([50, 200, 230])
    yellow_mask = cv2.inRange(blur, low_yellow, high_yellow)
    yellow = cv2.bitwise_and(frame, frame, mask=yellow_mask)

    edge = cv2.Canny(yellow, 100, 200)

    pix = np.asarray(edge)
    #print(edge.shape)
    get = np.where(pix[lineCY-1] == 255) # find 255 value at 480th rows
    print(get[0])
    center = show_center()
    val_from_center = drawText()
    #val_from_line = int(lineC - center)
Пример #25
0
import numpy as np
import cv2

img  = cv2.imread('flower.jpg')
matrix = (7,7)
blur = cv2.GaussianBlur(img, matrix, 0)
cv2.imshow('blur', blur)

cv2.waitKey(0)
cv2.destroyAllWindows()
Пример #26
0

# 대비 극대화
structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

imgTopHat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, structuringElement)
imgBlackHat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, structuringElement)

imgGrayscalePlusTopHat = cv2.add(gray, imgTopHat)
gray = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)

plt.figure(figsize=(12, 10))
plt.imshow(gray, cmap='gray')

# Adaptive Thresholding = 적응형 임계값
img_blurred = cv2.GaussianBlur(gray, ksize=(5, 5), sigmaX=0)

img_thresh = cv2.adaptiveThreshold(
    img_blurred, 
    maxValue=255.0, 
    adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C, 
    thresholdType=cv2.THRESH_BINARY_INV, 
    blockSize=19, 
    C=9
)

plt.figure(figsize=(12, 10))
plt.imshow(img_thresh, cmap='gray')

# 컨투어링 찾기
contours, hierarchy = cv2.findContours(
Пример #27
0
    def process_image(self, line, data_dir, mode):
        """ process_image """
        img, grt, img_name, grt_name = self.load_image(
            line, data_dir, mode=mode)
        if mode == ModelPhase.TRAIN:
            img, grt = aug.resize(img, grt, mode)
            # 使用不同增强
            if cfg.AUG.MY_AUG:
                try:
                    if cfg.AUG.MY_AUG_TYPE == 'aug_baseline':
                        img, grt = aug_baseline(image=img, mask=grt)
                    elif cfg.AUG.MY_AUG_TYPE == 'aug_baseline_randRotate90':
                        img, grt = aug_baseline_randRotate90(image=img, mask=grt)
                    elif cfg.AUG.MY_AUG_TYPE == 'aug_baseline_randCrop':
                        img, grt = aug_baseline_randCrop(image=img, mask=grt)
                    elif cfg.AUG.MY_AUG_TYPE == 'aug_baseline_CLAHE_Sharpen':
                        img, grt = aug_baseline_CLAHE_Sharpen(image=img, mask=grt)
                    elif cfg.AUG.MY_AUG_TYPE == 'aug_baseline_simple':
                        img, grt = aug_baseline_simple(image=img, mask=grt)
                    elif cfg.AUG.MY_AUG_TYPE == 'aug_baseline_simple_motionBlur':
                        img, grt = aug_baseline_simple_motionBlur(image=img, mask=grt)
                    elif cfg.AUG.MY_AUG_TYPE == 'aug_baseline_simple_gaussNoise':
                        img, grt = aug_baseline_simple_gaussNoise(image=img, mask=grt)
                    elif cfg.AUG.MY_AUG_TYPE == 'aug_baseline_simple_maskDropout':
                        img, grt = aug_baseline_simple_maskDropout(image=img, mask=grt)
                    elif cfg.AUG.MY_AUG_TYPE == 'aug_baseline_simple_gridDropout':
                        img, grt = aug_baseline_simple_gridDropout(image=img, mask=grt)
                    elif cfg.AUG.MY_AUG_TYPE == 'aug_baseline_simple_randBC':
                        img, grt = aug_baseline_simple_randBC(image=img, mask=grt)
                    elif cfg.AUG.MY_AUG_TYPE == 'aug_baseline_simple_randCrop':
                        img, grt = aug_baseline_simple_randCrop(image=img, mask=grt)
                    elif cfg.AUG.MY_AUG_TYPE == 'aug_baseline_simple_randRotate90':
                        img, grt = aug_baseline_simple_randRotate90(image=img, mask=grt)
                    else:
                        raise NotImplementedError
                except Exception as e:
                    print(str(e))
            else:
                if cfg.AUG.RICH_CROP.ENABLE:
                    if cfg.AUG.RICH_CROP.BLUR:
                        if cfg.AUG.RICH_CROP.BLUR_RATIO <= 0:
                            n = 0
                        elif cfg.AUG.RICH_CROP.BLUR_RATIO >= 1:
                            n = 1
                        else:
                            n = int(1.0 / cfg.AUG.RICH_CROP.BLUR_RATIO)
                        if n > 0:
                            if np.random.randint(0, n) == 0:
                                radius = np.random.randint(3, 10)
                                if radius % 2 != 1:
                                    radius = radius + 1
                                if radius > 9:
                                    radius = 9
                                img = cv2.GaussianBlur(img, (radius, radius), 0, 0)
                                
                    img, grt = aug_gaussNoise(image=img, mask=grt)
                    img, grt = aug_RandomRotate90(image=img, mask=grt)

                    img, grt = aug.random_rotation(
                        img,
                        grt,
                        rich_crop_max_rotation=cfg.AUG.RICH_CROP.MAX_ROTATION,
                        mean_value=cfg.DATASET.PADDING_VALUE)

                    img, grt = aug.rand_scale_aspect(
                        img,
                        grt,
                        rich_crop_min_scale=cfg.AUG.RICH_CROP.MIN_AREA_RATIO,
                        rich_crop_aspect_ratio=cfg.AUG.RICH_CROP.ASPECT_RATIO)
                    img = aug.hsv_color_jitter(
                        img,
                        brightness_jitter_ratio=cfg.AUG.RICH_CROP.
                        BRIGHTNESS_JITTER_RATIO,
                        saturation_jitter_ratio=cfg.AUG.RICH_CROP.
                        SATURATION_JITTER_RATIO,
                        contrast_jitter_ratio=cfg.AUG.RICH_CROP.
                        CONTRAST_JITTER_RATIO)

                if cfg.AUG.FLIP:
                    if cfg.AUG.FLIP_RATIO <= 0:
                        n = 0
                    elif cfg.AUG.FLIP_RATIO >= 1:
                        n = 1
                    else:
                        n = int(1.0 / cfg.AUG.FLIP_RATIO)
                    if n > 0:
                        if np.random.randint(0, n) == 0:
                            img = img[::-1, :, :]
                            grt = grt[::-1, :]

                if cfg.AUG.MIRROR:
                    if np.random.randint(0, 2) == 1:
                        img = img[:, ::-1, :]
                        grt = grt[:, ::-1]

            img, grt = aug.rand_crop(img, grt, mode=mode)
        elif ModelPhase.is_eval(mode):
            img, grt = aug.resize(img, grt, mode=mode)
            img, grt = aug.rand_crop(img, grt, mode=mode)
        elif ModelPhase.is_visual(mode):
            org_shape = [img.shape[0], img.shape[1]]
            img, grt = aug.resize(img, grt, mode=mode)
            valid_shape = [img.shape[0], img.shape[1]]
            img, grt = aug.rand_crop(img, grt, mode=mode)
        else:
            raise ValueError("Dataset mode={} Error!".format(mode))

        # Normalize image
        if cfg.AUG.TO_RGB:
            img = img[..., ::-1]
        img = self.normalize_image(img)

        if ModelPhase.is_train(mode) or ModelPhase.is_eval(mode):
            grt = np.expand_dims(np.array(grt).astype('int32'), axis=0)
            ignore = (grt != cfg.DATASET.IGNORE_INDEX).astype('int32')

        if ModelPhase.is_train(mode):
            return (img, grt, ignore)
        elif ModelPhase.is_eval(mode):
            return (img, grt, ignore)
        elif ModelPhase.is_visual(mode):
            return (img, grt, img_name, valid_shape, org_shape)
#Start video capture
video_capture = cv2.VideoCapture(0)

#Sleep for 2 seconds to let camera initialize properly.
time.sleep(2)

#Loop until OpenCV window is not closed
while True:
    #Store the readed frame in frame, ret defines return value
    ret, frame = video_capture.read()
    #Flip the frame to avoid mirroring effect
    frame = cv2.flip(frame,1)
    #Resize the given frame to a 600*600 window
    frame = imutils.resize(frame, width = 600)
    #Blur the frame using Gaussian Filter of kernel size 5, to remove excessivve noise
    blurred_frame = cv2.GaussianBlur(frame, (5,5), 0)
    #Convert the frame to HSV, as HSV allow better segmentation.
    hsv_converted_frame = cv2.cvtColor(blurred_frame, cv2.COLOR_BGR2HSV)

    #Create a mask for the frame, showing green values
    mask = cv2.inRange(hsv_converted_frame, greenLower, greenUpper)
    #Erode the masked output to delete small white dots present in the masked image
    mask = cv2.erode(mask, None, iterations = 2)
    #Dilate the resultant image to restore our target
    mask = cv2.dilate(mask, None, iterations = 2)

    #Find all contours in the masked image
    cnts,_ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    #Define center of the ball to be detected as None
    center = None
def GetFiveFeatures(frame):
    # 判空
    if frame == None:
        return [False, 0.0, 0.0, 0.0, 0.0, 0.0, frame]
    origin = frame
    grayimage = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    #  高斯滤波
    blur = cv2.GaussianBlur(grayimage, (5, 5), 0)

    #  二值化:用大津法,此处选项若是THRESH_BINARY_INV,则同意选用白色背景的图片样本
    ret, otsu = cv2.threshold(blur, 0, 255,
                              cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    # 找轮廓
    contours = cv2.findContours(otsu, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # 轮廓集数目
    largest_area = 0
    largest_contour_index = 0
    num = len(contours[1])
    for i in range(num):
        area = cv2.contourArea(contours[1][i], False)
        if area > largest_area:
            largest_area = area
            largest_contour_index = i

    print "最大面积" + str(largest_area)
    maxContour = contours[1][largest_contour_index]

    # 画轮廓
    cv2.drawContours(origin, maxContour, -1, (0, 0, 255), 2)

    # 1. 矩形度

    # 查找最小外接矩形
    minAreaRect = cv2.minAreaRect(maxContour)
    box = cv2.boxPoints(minAreaRect)
    box = np.int0(box)

    # 画轮廓
    cv2.drawContours(origin, [box], 0, (0, 255, 0), 2)

    # 计算最小外接矩形面积
    minAreaRect_Area = int(cv2.contourArea(box, False))
    print "最小外接矩形面积" + str(minAreaRect_Area)

    # 错误判断,如果分母为零,直接报错
    if minAreaRect_Area == 0.0:
        return [False, 0.0, 0.0, 0.0, 0.0, 0.0, origin]

    # 特征一:矩形度的计算
    P_rect = largest_area * 1.0 / minAreaRect_Area
    # 统一结果为3位有理数
    P_rect = round(P_rect, 3)
    print "矩形度" + str(P_rect)

    # 2. 延长度
    # 质心计算
    M = cv2.moments(maxContour)
    Centroid_x = int(M['m10'] / M['m00'])
    Centroid_y = int(M['m01'] / M['m00'])
    print "质心" + str(Centroid_x) + " " + str(Centroid_y)
    cv2.circle(origin, (Centroid_x, Centroid_y), 3, (255, 255, 255), -1)

    # 获取长轴
    Major_Axis_Length = 0
    Major_Axis_Angle = 0
    Major_Axis_End_x = 0
    Major_Axis_End_y = 0
    Major_Axis_Begin_x = 0
    Major_Axis_Begin_y = 0

    # 此处需要注意质心是否在轮廓内
    # 找长轴
    for angle in range(180):
        theta = angle * 3.14 / 180.0
        lengthBackward = 0
        lengthForward = 0
        point_End_x = Centroid_x
        point_End_y = Centroid_y
        point_Begin_x = Centroid_x
        point_Begin_y = Centroid_y

        # 步进越小准确率越高,设置成1可以先找到准确的直线角度,再根据角度和质心得到的直线计算出正确的长轴和轮廓交点
        while cv2.pointPolygonTest(maxContour,
                                   (point_End_x, point_End_y), False) > 0:
            lengthForward = lengthForward + 0.1
            point_End_x = int(
                round(point_End_x + lengthForward * math.cos(theta)))
            point_End_y = int(
                round(point_End_y + lengthForward * math.sin(theta)))

        while cv2.pointPolygonTest(maxContour,
                                   (point_Begin_x, point_Begin_y), False) > 0:
            lengthBackward = lengthBackward + 0.1
            point_Begin_x = int(
                round(point_Begin_x - lengthBackward * math.cos(theta)))
            point_Begin_y = int(
                round(point_Begin_y - lengthBackward * math.sin(theta)))

        if lengthForward + lengthBackward >= Major_Axis_Length:
            Major_Axis_Length = lengthForward + lengthBackward
            Major_Axis_Angle = angle
            Major_Axis_End_x = point_End_x
            Major_Axis_End_y = point_End_y
            Major_Axis_Begin_x = point_Begin_x
            Major_Axis_Begin_y = point_Begin_y

    # 计算实际长轴长度
    Real_Major_Axis_Length = math.sqrt(
        math.pow((Major_Axis_End_x - Major_Axis_Begin_x), 2) +
        math.pow((Major_Axis_End_y - Major_Axis_Begin_y), 2))

    Real_Major_Axis_Length = round(Real_Major_Axis_Length, 1)
    print "长轴长度 = " + str(Real_Major_Axis_Length)
    print "长轴角度 = " + str(Major_Axis_Angle)
    # print "起点 = " + "x: " + str(Major_Axis_Begin_x) + "  y: " + str(Major_Axis_Begin_y)
    # print "起点 = " + "x: " + str(Major_Axis_End_x) + "  y: " + str(Major_Axis_End_y)

    # 画长轴
    cv2.line(origin, (Major_Axis_Begin_x, Major_Axis_Begin_y),
             (Major_Axis_End_x, Major_Axis_End_y), (255, 0, 0), 2)

    # 找短轴
    # 1. 先得到长轴直线的表达式y=k*x+b,用来计算点到直线距离和判断点在直线上方还是下方
    Major_Axis_k = math.tan(Major_Axis_Angle * 3.14 / 180.0)
    Major_Axis_b = Centroid_y - Major_Axis_k * Centroid_x

    # 2. 点(x0,y0)到直线(Ax+By+C=0)的距离为d =abs (A*x0+B*y0+C)/sqrt(A^2+B^2)
    Minor_Axis_A = Major_Axis_k
    Minor_Axis_B = -1
    Minor_Axis_C = Major_Axis_b

    # 3. 遍历轮廓上的点
    Minor_Axis_Under_length = 0
    Minor_Axis_Above_length = 0
    Minor_Axis_Under_End_x = 0
    Minor_Axis_Under_End_y = 0
    Minor_Axis_Above_End_x = 0
    Minor_Axis_Above_End_y = 0

    # 轮廓点集
    ContourItems = maxContour.shape[0]
    for item in range(ContourItems):
        point_x = maxContour[item][0][0]
        point_y = maxContour[item][0][1]
        # 判断点在直线哪一侧
        # 上侧
        if point_y > int(Major_Axis_k * point_x + Major_Axis_b):
            # 点到直线距离
            dis = abs((Minor_Axis_A * point_x + Minor_Axis_B * point_y +
                       Minor_Axis_C) / math.sqrt(Minor_Axis_A * Minor_Axis_A +
                                                 Minor_Axis_B * Minor_Axis_B))
            if dis >= Minor_Axis_Above_length:
                Minor_Axis_Above_length = dis
                Minor_Axis_Above_End_x = point_x
                Minor_Axis_Above_End_y = point_y
        # 下侧
        elif point_y < int(Major_Axis_k * point_x + Major_Axis_b):
            # 点到直线距离
            dis = abs((Minor_Axis_A * point_x + Minor_Axis_B * point_y +
                       Minor_Axis_C) / math.sqrt(Minor_Axis_A * Minor_Axis_A +
                                                 Minor_Axis_B * Minor_Axis_B))
            if dis >= Minor_Axis_Under_length:
                Minor_Axis_Under_length = dis
                Minor_Axis_Under_End_x = point_x
                Minor_Axis_Under_End_y = point_y
                # 第三种可能就是轮廓与直线的交点

    # # 标记两个点,可以忽略
    cv2.circle(origin, (Minor_Axis_Above_End_x, Minor_Axis_Above_End_y), 4,
               (255, 255, 255), -1)
    cv2.circle(origin, (Minor_Axis_Under_End_x, Minor_Axis_Under_End_y), 4,
               (255, 255, 255), -1)
    # 画出两点直线
    cv2.line(origin, (Minor_Axis_Under_End_x, Minor_Axis_Under_End_y),
             (Minor_Axis_Above_End_x, Minor_Axis_Above_End_y), (0, 255, 255),
             3)

    # 计算实际短轴长度
    Real_Minor_Axis_Length = math.sqrt(
        math.pow((Minor_Axis_Above_End_x - Minor_Axis_Under_End_x), 2) +
        math.pow((Minor_Axis_Above_End_y - Minor_Axis_Under_End_y), 2))

    Real_Minor_Axis_Length = round(Real_Minor_Axis_Length, 1)
    print "短轴长度 = " + str(Real_Minor_Axis_Length)

    # 错误判断,如果分母为零,直接报错
    if Real_Minor_Axis_Length == 0.0:
        return [False, P_rect, 0.0, 0.0, 0.0, 0.0, origin]
    # 计算延长度
    P_extend = Real_Major_Axis_Length * 1.0 / Real_Minor_Axis_Length
    P_extend = round(P_extend, 3)

    print "延长度 = " + str(P_extend)
    # 画出与长轴距离最远的两点的辅助线,使用时可以不用,画图用作论文使用
    # 画出长轴右方
    line_above_k = math.tan((Major_Axis_Angle - 90) * 3.14 / 180.0)
    line_above_b = Minor_Axis_Above_End_y - line_above_k * Minor_Axis_Above_End_x
    Minor_Axis_Above_Begin_x = int(
        (line_above_b - Major_Axis_b) / (Major_Axis_k - line_above_k))
    Minor_Axis_Above_Begin_y = int(line_above_k * Minor_Axis_Above_Begin_x +
                                   line_above_b)
    cv2.line(origin, (Minor_Axis_Above_Begin_x, Minor_Axis_Above_Begin_y),
             (Minor_Axis_Above_End_x, Minor_Axis_Above_End_y), (255, 0, 255),
             3)

    line_under_k = math.tan((Major_Axis_Angle - 90) * 3.14 / 180.0)
    line_under_b = Minor_Axis_Under_End_y - line_under_k * Minor_Axis_Under_End_x
    Minor_Axis_Under_Begin_x = int(
        (line_under_b - Major_Axis_b) / (Major_Axis_k - line_under_k))
    Minor_Axis_Under_Begin_y = int(line_under_k * Minor_Axis_Under_Begin_x +
                                   line_under_b)
    cv2.line(origin, (Minor_Axis_Under_Begin_x, Minor_Axis_Under_Begin_y),
             (Minor_Axis_Under_End_x, Minor_Axis_Under_End_y), (255, 255, 0),
             3)

    # 3. 球状型
    # 遍历轮廓每个点,求与质心最近的距离,作为最大内接圆半径,
    # 初始化一个距离
    min_radius = math.pow((maxContour[0][0][0] - Centroid_x), 2) + math.pow(
        (maxContour[0][0][1] - Centroid_y), 2)
    for item in range(ContourItems):
        point_x = maxContour[item][0][0]
        point_y = maxContour[item][0][1]
        local_radius = math.pow((point_x - Centroid_x), 2) + math.pow(
            (point_y - Centroid_y), 2)
        if local_radius <= min_radius:
            min_radius = local_radius

    min_radius = int(math.sqrt(min_radius))
    cv2.circle(origin, (Centroid_x, Centroid_y), min_radius, (0, 255, 255), 2)

    # 遍历轮廓每个点,求与质心最远的距离,作为最小外接圆半径,
    # 初始化一个距离
    max_radius = math.pow((maxContour[0][0][0] - Centroid_x), 2) + math.pow(
        (maxContour[0][0][1] - Centroid_y), 2)
    for item in range(ContourItems):
        point_x = maxContour[item][0][0]
        point_y = maxContour[item][0][1]
        local_radius = math.pow((point_x - Centroid_x), 2) + math.pow(
            (point_y - Centroid_y), 2)
        if local_radius >= max_radius:
            max_radius = local_radius

    max_radius = int(math.sqrt(max_radius))
    cv2.circle(origin, (Centroid_x, Centroid_y), max_radius, (255, 0, 255), 2)

    # 错误判断,如果分母为零,直接报错
    if max_radius == 0.0:
        return [False, P_rect, P_extend, 0.0, 0.0, 0.0, origin]

    P_spherical = min_radius * 1.0 / max_radius
    P_spherical = round(P_spherical, 3)
    print "球状型: " + str(P_spherical)

    # 错误判断,如果分母为零,直接报错
    if Real_Major_Axis_Length == 0.0:
        return [False, P_rect, P_extend, P_spherical, 0.0, 0.0, origin]
    # 4. 叶状型
    P_leaf = min_radius * 1.0 / Real_Major_Axis_Length
    P_leaf = round(P_leaf, 3)
    print "叶状型 = " + str(P_leaf)

    # 错误判断,如果分母为零,直接报错
    if Real_Major_Axis_Length == 0.0:
        return [False, P_rect, P_extend, P_spherical, P_leaf, 0.0, origin]
    # 5. 似圆度
    P_circle = largest_area * 4.0 / (3.14 * Real_Major_Axis_Length *
                                     Real_Major_Axis_Length)
    P_circle = round(P_circle, 2)
    print "似圆度 = " + str(P_circle)

    # 错误判断,如果分母为零,直接报错
    if Real_Major_Axis_Length == 0.0:
        return [False, P_rect, P_extend, P_spherical, P_leaf, P_circle, origin]
    # 6. 复杂度
    ArcLength = cv2.arcLength(maxContour, True)
    P_complecate = ArcLength * ArcLength * 1.0 / (4 * 3.14 * largest_area)
    P_complecate = round(P_complecate, 2)
    print "复杂度 = " + str(P_complecate)

    # 可以捎带返回处理完之后的画完辅助线的图
    return [True, P_rect, P_extend, P_spherical, P_leaf, P_circle, origin]
Пример #30
0
import cv2
import numpy as np

img_ori = cv2.imread('1.jpg')
img_gray = cv2.cvtColor(img_ori, cv2.COLOR_BGR2GRAY)
img_blurred = cv2.GaussianBlur(img_gray, ksize=(5, 5), sigmaX=0)
img_thresh = cv2.adaptiveThreshold(
    img_blurred, maxValue=255.0, adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
        thresholdType=cv2.THRESH_BINARY_INV,
        blockSize=19,
        C=9
)
#ret, img_binary = cv.threshold(img_gray, 127, 255, 0) #검은창에 도형두개잇는거
contours, hierachy = cv2.findContours(img_thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

min_x=[]
max = 0
mix = 0
may = 0
miy = 0
min_y=[]
max_w=[]
max_h=[]
a = []
cnt =0
xy=[]
#xy = [[0 for rows in range(2)] for cols in range(len(contours))]   #len(contours) x 2 행렬
for t in range(len(contours)):
    for u in range(len(contours[t])):
        #xy[cnt][0] = contours[t][u][0][0]
        #xy[cnt][1] = contours[t][u][0][1]