Exemple #1
0
 def process_image(self, image):
     top = image[:10]
     image = image[10:]
     grayscale = cv.cvtColor(image, cv.COLOR_RGB2GRAY)
     canny1 = cv.Canny(grayscale, 0, 100)
     canny2 = cv.Canny(grayscale, 130, 150)
     canny3 = cv.Canny(grayscale, 180, 200)
     median = np.median(grayscale)
     mean = np.mean(grayscale)
     # cv.imwrite('./img/top.jpg', top)
     # cv.imwrite('./img/image.jpg', image)
     # cv.imwrite('./img/gray.jpg', grayscale)
     # cv.imwrite('./img/canny1.jpg', canny1)
     # cv.imwrite('./img/canny2.jpg', canny2)
     # cv.imwrite('./img/canny3.jpg', canny3)
     # cv.imwrite('./img/cannyMean.jpg', cmean)
     # cv.imwrite('./img/cannyMedian.jpg', cmedian)
     tp_image = image.transpose()
     return top.flatten(), mean, median, [
         tp_image[0],
         tp_image[1],
         tp_image[2],
         grayscale,
         canny1,
         canny2,
         canny3
     ]
def match(small_pic_path, large_pic):
    small_pic = cv2.imread(small_pic_path)
    small_pic = cv2.cvtColor(small_pic, cv2.COLOR_BGR2GRAY)
    small_pic = cv2.Canny(small_pic, 50, 200)

    large_pic = cv2.cvtColor(large_pic, cv2.COLOR_BGR2GRAY)
    large_pic = cv2.Canny(large_pic, 50, 200)

    result = cv2.matchTemplate(large_pic, small_pic, cv2.TM_CCOEFF)
    _, max, _, max_location = cv2.minMaxLoc(result)
    return (max, max_location)
 def edgeDetectionImg(self):
     try:
         img = cv2.imread(self.filename)
         gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
         edges = cv2.Canny(gray, 20, 30)
         edges_high_thresh = cv2.Canny(gray, 60, 120)
         images = np.hstack((edges, edges_high_thresh))
         cv2.imwrite('img/dist/testEdge.jpg',images, [cv2.IMWRITE_JPEG_QUALITY, 100])
         cv2.waitKey()
     except:
         pass
Exemple #4
0
def canny_edge_detection_preview():
    """
    edge detection preview
    ---
    tags:
        -   image
    parameters:
        -   in: formData
            name: image
            type: file
            required: true
            description: The image to upload.
    responses:
        200:
            description: the edge detection preview image
            content:
                image/png:
                    schema:
                        type: string
                        format: binary
    """
    if 'image' not in request.files:
        raise ParameterLostError("image")

    img = cv2.imdecode(
        numpy.fromstring(request.files['image'].read(), numpy.uint8),
        cv2.IMREAD_UNCHANGED)

    edges = cv2.Canny(img, 100, 200)

    _, f = cv2.imencode(".png", edges)

    return send_file(io.BytesIO(f.tobytes()), "image/png")
Exemple #5
0
def thresh_callback(val, src_gray):
    threshold = val
    rng.seed(12345)
    canny_output = cv2.Canny(src_gray, threshold, threshold * 2)

    contours, hierarchy = cv2.findContours(canny_output, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)[-2:]

    contours_poly = [None] * len(contours)
    boundRect = [None] * len(contours)
    centers = [None] * len(contours)
    radius = [None] * len(contours)
    for i, c in enumerate(contours):
        contours_poly[i] = cv2.approxPolyDP(c, 3, True)
        boundRect[i] = cv2.boundingRect(contours_poly[i])
        centers[i], radius[i] = cv2.minEnclosingCircle(contours_poly[i])

    drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3),
                       dtype=np.uint8)

    for i in range(len(contours)):
        color = (rng.randint(0, 256), rng.randint(0, 256), rng.randint(0, 256))
        cv2.drawContours(drawing, contours_poly, i, color)
        cv2.rectangle(drawing, (int(boundRect[i][0]), int(boundRect[i][1])), \
           (int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2)
        cv2.circle(drawing, (int(centers[i][0]), int(centers[i][1])),
                   int(radius[i]), color, 2)
Exemple #6
0
def method7():
    while c.isOpened():
        rd, image = c.read()

        if rd:
            # ---------> 前處理 <----------
            m1 = image[:, :, 0]  # 取 藍色通道
            m2 = image[:, :, 2]  # 取 紅色通道
            m3 = cv2.subtract(m1, m2)  # 紅色通道 - 藍色通道

            # ---------> 二值化 <----------
            m3 = cv2.adaptiveThreshold(m3, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                       cv2.THRESH_BINARY, 11, 9)

            # ---------> Canny 運算 <----------
            m4 = cv2.Canny(m3, 100, 30)

            # ---------> 最小方框點 <----------
            x, y, w, h = cv2.boundingRect(m4)

            # ---------> 畫方框在原始圖片上 <----------
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 255), 3)

            cv2.imshow("image", image)
            cv2.imshow("m1", m1)
            cv2.imshow("m2", m2)
            cv2.imshow("m3", m3)
            cv2.imshow("m4", m4)

        else:
            break

        if cv2.waitKey(10) != -1:
            break
Exemple #7
0
def edgeDetectionCanny(img: np.ndarray, thrs_1: float,
                       thrs_2: float) -> (np.ndarray, np.ndarray):
    """
    Detecting edges usint "Canny Edge" method
    :param img: Input image
    :param thrs_1: T1
    :param thrs_2: T2
    :return: opencv solution, my implementation
    """

    mag, div = sobleForCanny(img)
    nms = non_max_suppression(mag, div)

    for i in range(0, nms.shape[0]):
        for j in range(0, nms.shape[1]):
            try:
                if nms[i][j] <= thrs_2:
                    nms[i][j] = 0
                elif thrs_2 < nms[i][j] < thrs_1:
                    neighbor = nms[i - 1:i + 2, j - 1:j + 2]
                    if neighbor.max() < thrs_1:
                        nms[i][j] = 0
                    else:
                        nms[i][j] = 255
                else:
                    nms[i][j] = 255
            except IndexError as e:
                pass

    cvc = cv.Canny(img.astype(np.uint8), thrs_1, thrs_2)
    return cvc, nms
Exemple #8
0
def method6():
    while c.isOpened():
        rd, image = c.read()

        if rd:
            # ---------> 前處理 <----------
            m1 = image[:, :, 0]  # 取 藍色通道
            m2 = image[:, :, 2]  # 取 紅色通道
            m3 = cv2.subtract(m1, m2)  # 紅色通道 - 藍色通道

            # ---------> 二值化 <----------
            th, m3 = cv2.threshold(m3, 50, 255, cv2.THRESH_BINARY)
            m4 = cv2.bitwise_not(m3)

            m5 = cv2.Canny(m4, 100, 30)

            # ---------> 最小方框點 <----------
            x, y, w, h = cv2.boundingRect(m5)

            # ---------> 畫方框在原始圖片上 <----------
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 255), 3)

            cv2.imshow("image", image)
            cv2.imshow("m1", m1)
            cv2.imshow("m2", m2)
            cv2.imshow("m3", m3)
            cv2.imshow("m4", m4)
            cv2.imshow("m5", m5)
        else:
            break

        if cv2.waitKey(10) != -1:
            break
Exemple #9
0
def pre_process(image, threshold_value_1, threshold_value_2):

    #convert the image to gray to reduce computational complexity as
    #only dealing with one colour channel
    gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    gray = np.asarray(gray)

    #blur the image, sufficiently enough to remove some of the higher frequency noise, and smooth the edges.
    gray = cv2.GaussianBlur(gray, (5, 5), 0)

    ##changing thresholds doesn't affect area proportions but can affect  to fill shape and identify
    ##edges with lower frequency change
    edges = cv2.Canny(gray, threshold_value_1, threshold_value_2)

    #closing the holes that may appear inside the edges to potentially close leaking
    #in the flood fill stage
    kernel = np.ones((5, 5), np.uint8)
    edges = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)

    #3x3 kernel  and one iteration to only fill potential gaps between exterior edges
    #so that the exterior contour points form a closed polygon and can be filled appropriately. Keep this as minimal as possible to not
    #overly amplify shape differences
    kernel = np.ones((3, 3), np.uint8)
    edges = cv2.dilate(edges, kernel, iterations=1)
    #returns the edge image

    return edges
Exemple #10
0
def center_of_mass(image):
    threshold = thresh

    # Convert image to gray and blur it
    src_gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
    src_gray = cv.blur(src_gray, (3, 3))

    # Edge detection
    canny_output = cv.Canny(src_gray, threshold, threshold * 2)

    contours, _ = cv.findContours(canny_output, cv.RETR_TREE,
                                  cv.CHAIN_APPROX_SIMPLE)

    # For every found contour we now apply approximation to polygons with accuracy +-3 and stating that the curve must be closed.
    # After that we find a bounding rect for every polygon and save it to boundRect.
    # At last we find a minimum enclosing circle for every polygon and save it to center and radius vectors.
    contours_poly = [None] * len(contours)
    boundRect = [None] * len(contours)
    centers = [None] * len(contours)
    radius = [None] * len(contours)

    for i, c in enumerate(contours):
        i = 0
        contours_poly[i] = cv.approxPolyDP(c, 3, True)
        boundRect[i] = cv.boundingRect(contours_poly[i])
        centers[i], radius[i] = cv.minEnclosingCircle(contours_poly[i])
        return centers[0]
Exemple #11
0
def setStandardValues(base64Image, values):

	d = []
	for i in values:
		d.append(int(i['value']))
	(h_min, h_max, s_min, s_max, v_min, v_max, threshold1, threshold2, area_min) = tuple(d)

	img_str = base64.b64decode(base64Image)

	nparr = np.fromstring(img_str, np.uint8)
	image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
	imgHSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
	lower = np.array([h_min, s_min, v_min])
	upper = np.array([h_max, s_max, v_max])
	mask = cv2.inRange(imgHSV, lower, upper)
	result = cv2.bitwise_and(image, image, mask = mask)
	mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
	imgCanny = cv2.Canny(mask, threshold1, threshold2)
	kernel = np.ones((5,5))
	imgDil = cv2.dilate(imgCanny, kernel, iterations=1)
	found, standardHeight = getContour(imgDil, image)

	img_str = cv2.imencode('.png', imgHSV)[1].tobytes()
	base64ImageReturn = base64.b64encode(img_str) 
	
	imgD_str = cv2.imencode('.png', result)[1].tobytes()
	base64ImgDilReturn = base64.b64encode(imgD_str)

	return base64ImageReturn, base64ImgDilReturn
Exemple #12
0
def draw_bound_box():
    image = cv2.imread(
        "/home/sam/catkin_ws/src/swarm_localization/swarm_localization/images/test_img2.jpg"
    )

    # Let's load a simple image with 3 black squares

    cv2.waitKey(0)

    # Grayscale
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # Find Canny edges
    edged = cv2.Canny(gray, 30, 200)
    cv2.waitKey(0)

    # Finding Contours
    # Use a copy of the image e.g. edged.copy()
    # since findContours alters the image
    contours, hierarchy = cv2.findContours(edged, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_NONE)

    cv2.imshow('Canny Edges After Contouring', edged)
    cv2.waitKey(0)

    print("Number of Contours found = " + str(len(contours)))

    # Draw all contours
    # -1 signifies drawing all contours
    cv2.drawContours(image, contours, -1, (255, 0, 0), 3)

    cv2.imshow('Contours', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
def main():
    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        container = av.open(drone.get_video_stream())
        frame_count = 0
        while True:
            for frame in container.decode(video=0):
                frame_count = frame_count + 1
                # skip first 300 frames
                if frame_count < 300:
                    continue
                image = cv2.cvtColor(numpy.array(frame.to_image()),
                                     cv2.COLOR_RGB2BGR)
                cv2.imshow('Original', image)
                cv2.imshow('Canny', cv2.Canny(image, 100, 200))
                cv2.waitKey(1)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
def circles_hough(path):
    image = cv2.imread(path)
    edge_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    edge_image = cv2.Canny(edge_image, 100, 200)
    # cv2.imshow("input",edge_image)
    # cv2.waitKey(0)
    find_hough_circles(image, edge_image)
Exemple #15
0
def main():
    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        container = av.open(drone.get_video_stream())
        # skip first 300 frames
        frame_skip = 300
        while True:
            for frame in container.decode(video=0):
                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue
                start_time = time.time()
                image = cv2.cvtColor(numpy.array(frame.to_image()),
                                     cv2.COLOR_RGB2BGR)
                cv2.imshow('Original', image)
                cv2.imshow('Canny', cv2.Canny(image, 100, 200))
                cv2.waitKey(1)
                if frame.time_base < 1.0 / 60:
                    time_base = 1.0 / 60
                else:
                    time_base = frame.time_base
                frame_skip = int((time.time() - start_time) / time_base)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
Exemple #16
0
    def __findNumberPlate__(self, image):
        ''' Handles all the pre-processing of image before passing it to Tesseract OCR '''
        resizedImage = imutils.resize(image, width=1000)
        grayImage = cv2.cvtColor(resizedImage, cv2.COLOR_BGR2GRAY)
        filteredImage = cv2.bilateralFilter(grayImage, 11, 17, 17)

        cannyEdges = cv2.Canny(filteredImage, 170, 200)
        contours, _ = cv2.findContours(cannyEdges, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

        sortedContours = sorted(contours, key=cv2.contourArea, reverse=True)[:30]
        #cv2.drawContours(cannyEdges, sortedContours, 0, 255, -1)
        #cv2.imshow("Top 30 Contours", cannyEdges)          #Show the top 30 contours.
        #cv2.waitKey(0)

        NumberPlateCount = 0
        for contour in sortedContours:
            perimeter = cv2.arcLength(contour, True)
            approx = cv2.approxPolyDP(contour, 0.02*perimeter, True)
            if len(approx) == 4:
                NumberPlateCount = approx
                break

        imageMask = np.zeros(grayImage.shape, np.uint8)
        x = cv2.drawContours(imageMask, [NumberPlateCount], 0, 255, -1)
        finalImage = cv2.bitwise_and(resizedImage, resizedImage, mask=imageMask)
        cv2.imwrite('numberPlateImage.jpg', finalImage)
        #finalGrayImage = cv2.cvtColor(finalImage, cv2.COLOR_BGR2GRAY)
        _, thresh2 = cv2.threshold(finalImage, 127, 255, cv2.THRESH_BINARY)
        cv2.imshow("Detected Number Plate", imutils.resize(finalImage, width=200))
        cv2.waitKey(0)
        return thresh2
def getContours(img,cThr=[100,100],showCanny=False,minArea=1000,filter=0,draw =False):
    imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    imgBlur = cv2.GaussianBlur(imgGray,(5,5),1)
    imgCanny = cv2.Canny(imgBlur,cThr[0],cThr[1])
    kernel = np.ones((5,5))
    imgDial = cv2.dilate(imgCanny,kernel,iterations=3)
    imgThre = cv2.erode(imgDial,kernel,iterations=2)
    if showCanny:cv2.imshow('Canny',imgThre)
    contours,hiearchy = cv2.findContours(imgThre,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    finalCountours = []
    for i in contours:
        area = cv2.contourArea(i)
        if area > minArea:
            peri = cv2.arcLength(i,True)
            approx = cv2.approxPolyDP(i,0.02*peri,True)
            bbox = cv2.boundingRect(approx)
            if filter > 0:
                if len(approx) == filter:
                    finalCountours.append([len(approx),area,approx,bbox,i])
            else:
                finalCountours.append([len(approx),area,approx,bbox,i])
    finalCountours = sorted(finalCountours,key = lambda x:x[1] ,reverse= True)
    if draw:
        for con in finalCountours:
            cv2.drawContours(img,con[4],-1,(0,0,255),3)
    return img, finalCountours
def _extract_edges_image(gray):
    """Return a binary cv2 image defining the detected edges.

    Currently using 'canny', might be replaced with something
    more advanced.
    """
    return cv2.Canny(gray, 100, 200)
Exemple #19
0
def regions_of_interest(image: Image, params: Parameters) -> [Rect]:
    edges = cv2.Canny(image, 50, 150)
    kernel = np.ones((5, 5), np.uint8)
    gradient = cv2.morphologyEx(edges, cv2.MORPH_GRADIENT, kernel)
    cnts = cv2.findContours(gradient, cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if len(cnts) == 2 else cnts[1]
    rects = map(cv2.boundingRect, cnts)

    rects = list(map(lambda r: Rect(r[0], r[1], r[2], r[3]), rects))
    rects.sort(key=lambda r: (r.x, r.y))

    rectsUsed = set()
    regions = []
    i = 0
    while i < len(rects):
        if i not in rectsUsed:
            rectsUsed.add(i)
            rect = rects[i]

            j = i + 1
            while j < len(rects):
                cand = rects[j]
                if j not in rectsUsed and intersects(
                        rect, cand, params.mergeDeltaX, params.mergeDeltaY):
                    rect = merge(rect, cand)
                    rectsUsed.add(j)
                    j = i + 1
                j = j + 1

            regions.append(rect)
        i = i + 1

    return regions
Exemple #20
0
def get_dim(img):
    l, r, t, b = 10000, 0, 10000, 0
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(gray, 100, 100)
    # cv2.imshow('edges', edges)
    # cv2.waitKey(0)

    lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 10, minLineLength=50)
    for line in lines:
        for x1, y1, x2, y2 in line:
            # print(x1, y1, x2, y2)
            if abs(x1 - x2) < 20:
                x = int((x1 + x2) / 2)
                l, r = min(l, x), max(r, x)
            if abs(y1 - y2) < 20:
                y = int((y1 + y2) / 2)
                t, b = min(t, y), max(b, y)

    # cv2.line(img, (l, t), (l, b), (0, 255, 0), 2)
    # cv2.line(img, (r, t), (r, b), (0, 255, 0), 2)
    # cv2.line(img, (l, t), (r, t), (0, 255, 0), 2)
    # cv2.line(img, (l, b), (r, b), (0, 255, 0), 2)
    # cv2.imshow('img', img)
    # cv2.waitKey(0)

    return 60 * 25, 160, l, r, t, b
def runSimpleFindContours(img):
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    Gaussian = cv2.GaussianBlur(gray, (5, 5), 0)
    canny = cv2.Canny(Gaussian, 100, 250)
    contours, hierarchy = cv2.findContours(canny, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
    return (contours, hierarchy)
Exemple #22
0
def GetRightPos(image):
    # 边缘检测
    canny = cv2.Canny(image, 200, 400)

    # 轮廓提取
    img, contours, _ = cv2.findContours(canny, cv2.RETR_TREE,
                                        cv2.CHAIN_APPROX_SIMPLE)

    rightRectangles = []
    for i, contour in enumerate(contours):
        M = cv2.moments(contour)

        if M['m00'] == 0:
            cx, cy = 0, 0
        else:
            cx, cy = M['m10'] / M['m00'], M['m01'] / M['m00']

        if 1000 < cv2.contourArea(contour) < 1300 and 120 < cv2.arcLength(
                contour, True) < 400:
            if cx > 100:
                x, y, w, h = cv2.boundingRect(contour)  # 外接矩形
                rightRectangles.append((x, y, w, h))

    if len(rightRectangles) > 0:
        # 内侧方块
        current = min(rightRectangles, key=lambda s: s[2] * s[3])
        x, y, w, h = current[0], current[1], current[2], current[3]
        return x, y, w, h

    return 0, 0, 0, 0
Exemple #23
0
def process_image(path, out_path):

    orig_im = Image.open(path)
    scale, im = downscale_image(orig_im)

    edges = cv2.Canny(np.asarray(im), 100, 200)

    # TODO: dilate image _before_ finding a border. This is crazy sensitive!
    contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    borders = find_border_components(contours, edges)
    borders.sort(key=lambda i_x1_y1_x2_y2: (i_x1_y1_x2_y2[3] - i_x1_y1_x2_y2[
        1]) * (i_x1_y1_x2_y2[4] - i_x1_y1_x2_y2[2]))

    border_contour = None
    if len(borders):
        border_contour = contours[borders[0][0]]
        edges = remove_border(border_contour, edges)

    edges = 255 * (edges > 0).astype(np.uint8)

    # Remove ~1px borders using a rank filter.
    maxed_rows = rank_filter(edges, -4, size=(1, 20))
    maxed_cols = rank_filter(edges, -4, size=(20, 1))
    debordered = np.minimum(np.minimum(edges, maxed_rows), maxed_cols)
    edges = debordered

    contours = find_components(edges)
    if len(contours) == 0:
        print('%s -> (no text!)' % path)
        return

    crop = find_optimal_components_subset(contours, edges)
    crop = pad_crop(crop, contours, edges, border_contour)

    crop = [int(x / scale)
            for x in crop]  # upscale to the original image size.

    #draw = ImageDraw.Draw(im)
    #c_info = props_for_contours(contours, edges)
    #for c in c_info:
    #    this_crop = c['x1'], c['y1'], c['x2'], c['y2']
    #    draw.rectangle(this_crop, outline='blue')
    #draw.rectangle(crop, outline='red')
    #im.save(out_path)
    #draw.text((50, 50), path, fill='red')
    #orig_im.save(out_path)
    #im.show()
    text_im = orig_im.crop(crop)
    text_im = text_im.convert('RGB')
    bytesim = cv2.cvtColor(np.asarray(text_im), cv2.COLOR_BGR2GRAY)
    ret, bytesim = cv2.threshold(np.asarray(bytesim), 127, 255,
                                 cv2.THRESH_BINARY)
    if img_estim(bytesim, 127) == 'dark':
        ret, bytesim = cv2.threshold(np.asarray(text_im), 127, 255,
                                     cv2.THRESH_BINARY_INV)
    text_im = Image.fromarray(bytesim)
    text_im.save(out_path)
    print('%s -> %s' % (path, out_path))
def preProcessing(img):
    imgGray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
    imgBlur = cv.GaussianBlur(imgGray,(5,5),1)
    imgCanny = cv.Canny(imgBlur,200,200)
    kernel = np.ones((5,5))
    imgDial = cv.dilate(imgCanny,kernel,iterations=2)
    imgThres = cv.erode(imgDial,kernel,iterations=1)
    return imgThres
Exemple #25
0
    def find_contours(self):
        #       img = cv2.GaussianBlur(self.img, (5,5), 0)
        edges = cv2.Canny(self.img, 100, 255)
        ret, thresh = cv2.threshold(edges, 127, 255, 0)
        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)

        return contours
 def _observe(self):
     image = self.game.get_canvas()
     image = np.array(Image.open(BytesIO(base64.b64decode(image))))
     image = cv.cvtColor(image, cv.COLOR_BGRA2GRAY)
     image = cv.Canny(image, threshold1 = 100, threshold2 = 200)
     image = cv.resize(image, (80,80))
     self.current_frame = image
     return self.current_frame
Exemple #27
0
def edge_demo(image):  #边缘提取
    blurred = cv.GaussianBlur(image, (3, 3), 0)
    gray = cv.cvtColor(blurred, cv.COLOR_BGR2GRAY)
    xgrad = cv.Sobel(gray, cv.CV_16SC1, 1, 0)
    ygrad = cv.Sobel(gray, cv.CV_16SC1, 0, 1)
    edge = cv.Canny(xgrad, ygrad, 50, 150)
    dst = cv.bitwise_and(image, image, mask=edge)
    cv.imshow("edge", dst)
Exemple #28
0
def canny_edge_detector(image):

    # Convert the image color to grayscale
    gray_image = cv.cvtColor(image, cv.COLOR_RGB2GRAY)

    # Reduce noise from the image
    blur = cv.GaussianBlur(gray_image, (5, 5), 0)
    canny = cv.Canny(blur, 50, 150)
    return canny
def Gray_image(image):
    #getting the edges, by first converting the image to grayscale image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    #apply gaussian filter, using 5x5 kernel
    blur = cv2.GaussianBlur(gray, (5, 5), 0)
    #getting edges, second argument is the low_threshold, followed by the high one
    edges = cv2.Canny(blur, 75, 150)
    #houghline transform function
    return edges
def getCanny(image, sigma_val=0.33):
    #get median of single channel pixel image
    num_image_median = np.median(image)
    #use median to apply automatic canny
    lower_param = int(max(0, (1.0 - sigma_val) * num_image_median))
    upper_param = int(max(255, (1.0 - sigma_val) * num_image_median))
    canny_edged_img = cv.Canny(image, lower_param, upper_param)
    #return canny image
    return canny_edged_img