示例#1
0
def DetectObjectFromImage(beforeImage, afterImage, beforeGrayImage,
                          afterGrayImage):

    resizeRate = GetContour.SquareDetectAndReturnRateAsSquare(beforeGrayImage)
    beforeImage = CustomOpenCV.ResizeImageAsRate(beforeImage, resizeRate)
    beforeGrayImage = CustomOpenCV.ResizeImageAsRate(beforeGrayImage,
                                                     resizeRate)
    afterImage = CustomOpenCV.ResizeImageAsRate(afterImage, resizeRate)
    afterGrayImage = CustomOpenCV.ResizeImageAsRate(afterGrayImage, resizeRate)

    squareContourData = DetectBackgroundSquare.DetectBackgroundSquareFromImage(
        beforeImage)  #형광색 인식으로 점 4개 찾는 함수
    #squareContourData = DetectBlackBoardContourFromOriginImage(beforeGrayImage)

    # 굴곡진 큰 사각형 정사각형으로 보정
    perspectiveUpdatedBeforeImage = ImageMatrixMove.ImageMatrixMove(
        beforeImage, squareContourData)
    perspectiveUpdatedAfterImage = ImageMatrixMove.ImageMatrixMove(
        afterImage, squareContourData)

    perspectiveUpdatedBeforeImage = CustomOpenCV.ResizeImageAsWidth(
        perspectiveUpdatedBeforeImage, DefineManager.IMAGE_WIDTH)
    perspectiveUpdatedAfterImage = CustomOpenCV.ResizeImageAsWidth(
        perspectiveUpdatedAfterImage, DefineManager.IMAGE_WIDTH)
    # Resize image as shape [ rateHeight, DefineManager.IMAGE_WIDTH ]

    #CustomOpenCV.ShowImagesWithName([perspectiveUpdatedBeforeImage, perspectiveUpdatedAfterImage],
    #                                ["perspectiveUpdatedBeforeImage", "perspectiveUpdatedAfterImage"])

    perspectiveUpdatedBeforeGrayImage = cv2.cvtColor(
        perspectiveUpdatedBeforeImage, cv2.COLOR_BGR2GRAY)
    perspectiveUpdatedAfterGrayImage = cv2.cvtColor(
        perspectiveUpdatedAfterImage, cv2.COLOR_BGR2GRAY)

    morphologyKernel = np.ones(
        (Setting.DefineManager.MORPHOLOGY_MASK_SIZE + 1,
         Setting.DefineManager.MORPHOLOGY_MASK_SIZE + 1), np.uint8)
    perspectiveUpdatedBeforeMorphologyGrayImage = cv2.morphologyEx(
        perspectiveUpdatedBeforeGrayImage, cv2.MORPH_OPEN, morphologyKernel)
    perspectiveUpdatedAfterMorphologyGrayImage = cv2.morphologyEx(
        perspectiveUpdatedAfterGrayImage, cv2.MORPH_OPEN, morphologyKernel)
    # Reduce image noise

    beforeThresholdedBlackBoardImage = cv2.adaptiveThreshold(
        perspectiveUpdatedBeforeMorphologyGrayImage,
        Setting.DefineManager.SET_IMAGE_WHITE_COLOR,
        cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,
        Setting.DefineManager.NEIGHBORHOOD_MASK_SIZE, 10)
    afterThresholdedBlackBoardImage = cv2.adaptiveThreshold(
        perspectiveUpdatedAfterMorphologyGrayImage,
        Setting.DefineManager.SET_IMAGE_WHITE_COLOR,
        cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,
        Setting.DefineManager.NEIGHBORHOOD_MASK_SIZE, 10)
    # Adaptive Threshold Image
    #CustomOpenCV.ShowImagesWithName([beforeThresholdedBlackBoardImage, afterThresholdedBlackBoardImage], ['beforeThresholdedBlackBoardImage', 'afterThresholdedBlackBoardImage'])

    differenceBasedOnThreshImage = cv2.absdiff(
        beforeThresholdedBlackBoardImage, afterThresholdedBlackBoardImage)
    differenceBasedOnThreshImage[
        differenceBasedOnThreshImage > Setting.DefineManager.
        EACH_IMAGE_DIFFERENCE_THRESHOLD] = Setting.DefineManager.SET_IMAGE_WHITE_COLOR
    # Detect each image difference from Threshold Image

    #CustomOpenCV.ShowImagesWithName([differenceBasedOnThreshImage], ["differenceBasedOnThreshImage"])
    objectFoundedImage = GetContour.GetObjectImage(
        perspectiveUpdatedBeforeImage, perspectiveUpdatedAfterImage)

    humanDetectedContour, contourLineDrawImage = GetContour.GetContour(
        objectFoundedImage, perspectiveUpdatedAfterImage)
    GetContour.FindNavel(humanDetectedContour, contourLineDrawImage)
    importantPoint = GetContour.AngleAsDealWithPointFromContours(
        humanDetectedContour, contourLineDrawImage)

    return [
        beforeThresholdedBlackBoardImage, afterThresholdedBlackBoardImage,
        differenceBasedOnThreshImage, humanDetectedContour
    ]
示例#2
0
def DetectObjectFromImage(beforeImage, afterImage, beforeGrayImage,
                          afterGrayImage):

    resizeRate = GetContour.SquareDetectAndReturnRateAsSquare(beforeGrayImage)
    beforeImage = CustomOpenCV.ResizeImageAsRate(beforeImage, resizeRate)
    beforeGrayImage = CustomOpenCV.ResizeImageAsRate(beforeGrayImage,
                                                     resizeRate)
    afterImage = CustomOpenCV.ResizeImageAsRate(afterImage, resizeRate)
    afterGrayImage = CustomOpenCV.ResizeImageAsRate(afterGrayImage, resizeRate)

    #squareContourData = DetectBackgroundSquare.DetectBackgroundSquareFromImage(beforeImage) #형광색 인식으로 점 4개 찾는 함수
    # in mac
    # this function is not working and falling loop.
    squareContourData = DetectBlackBoardContourFromOriginImage(beforeGrayImage)

    # 굴곡진 큰 사각형 정사각형으로 보정
    perspectiveUpdatedBeforeImage = ImageMatrixMove.ImageMatrixMove(
        beforeImage, squareContourData)
    perspectiveUpdatedAfterImage = ImageMatrixMove.ImageMatrixMove(
        afterImage, squareContourData)

    perspectiveUpdatedBeforeImage = CustomOpenCV.ResizeImageAsWidth(
        perspectiveUpdatedBeforeImage, DefineManager.IMAGE_WIDTH)
    perspectiveUpdatedAfterImage = CustomOpenCV.ResizeImageAsWidth(
        perspectiveUpdatedAfterImage, DefineManager.IMAGE_WIDTH)
    # Resize image as shape [ rateHeight, DefineManager.IMAGE_WIDTH ]

    #CustomOpenCV.ShowImagesWithName([perspectiveUpdatedBeforeImage, perspectiveUpdatedAfterImage],
    #                                ["perspectiveUpdatedBeforeImage", "perspectiveUpdatedAfterImage"])

    perspectiveUpdatedBeforeGrayImage = cv2.cvtColor(
        perspectiveUpdatedBeforeImage, cv2.COLOR_BGR2GRAY)
    perspectiveUpdatedAfterGrayImage = cv2.cvtColor(
        perspectiveUpdatedAfterImage, cv2.COLOR_BGR2GRAY)

    morphologyKernel = np.ones(
        (Setting.DefineManager.MORPHOLOGY_MASK_SIZE + 1,
         Setting.DefineManager.MORPHOLOGY_MASK_SIZE + 1), np.uint8)
    perspectiveUpdatedBeforeMorphologyGrayImage = cv2.morphologyEx(
        perspectiveUpdatedBeforeGrayImage, cv2.MORPH_OPEN, morphologyKernel)
    perspectiveUpdatedAfterMorphologyGrayImage = cv2.morphologyEx(
        perspectiveUpdatedAfterGrayImage, cv2.MORPH_OPEN, morphologyKernel)
    # Reduce image noise

    beforeThresholdedBlackBoardImage = cv2.adaptiveThreshold(
        perspectiveUpdatedBeforeMorphologyGrayImage,
        Setting.DefineManager.SET_IMAGE_WHITE_COLOR,
        cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,
        Setting.DefineManager.NEIGHBORHOOD_MASK_SIZE, 10)
    afterThresholdedBlackBoardImage = cv2.adaptiveThreshold(
        perspectiveUpdatedAfterMorphologyGrayImage,
        Setting.DefineManager.SET_IMAGE_WHITE_COLOR,
        cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,
        Setting.DefineManager.NEIGHBORHOOD_MASK_SIZE, 10)
    # Adaptive Threshold Image
    #CustomOpenCV.ShowImagesWithName([beforeThresholdedBlackBoardImage, afterThresholdedBlackBoardImage], ['beforeThresholdedBlackBoardImage', 'afterThresholdedBlackBoardImage'])

    differenceBasedOnThreshImage = cv2.absdiff(
        beforeThresholdedBlackBoardImage, afterThresholdedBlackBoardImage)
    differenceBasedOnThreshImage[
        differenceBasedOnThreshImage > Setting.DefineManager.
        EACH_IMAGE_DIFFERENCE_THRESHOLD] = Setting.DefineManager.SET_IMAGE_WHITE_COLOR
    # Detect each image difference from Threshold Image

    #CustomOpenCV.ShowImagesWithName([differenceBasedOnThreshImage], ["differenceBasedOnThreshImage"])
    objectFoundedImage = GetContour.GetObjectImage(
        perspectiveUpdatedBeforeImage, perspectiveUpdatedAfterImage)

    humanDetectedContour, contourLineDrawImage = GetContour.GetContour(
        objectFoundedImage, perspectiveUpdatedAfterImage)
    faceMinY, faceMaxY = GetContour.DetectFaceAndGetY(
        perspectiveUpdatedAfterImage)
    navelPoint, faceRate, maxY, minY = GetContour.FindNavel(
        humanDetectedContour, faceMaxY, contourLineDrawImage)
    height = maxY - minY
    importantPoint = GetContour.AngleAsDealWithPointFromContours(
        humanDetectedContour, contourLineDrawImage)

    beforeDrawImage = np.copy(perspectiveUpdatedBeforeImage)
    afterDrawImage = np.copy(perspectiveUpdatedAfterImage)
    functionParameter = []
    for index in range(len(importantPoint)):
        xArray = []
        yArray = []
        for point in importantPoint[index]:
            x, y = point.ravel()
            xArray.append(x)
            yArray.append(y)
        xArray = np.asarray(xArray)
        yArray = np.asarray(yArray)
        if xArray.shape[0] > 0:
            # ax + b = y (a, b 를 받아옴)
            functionCharacteristic = sp.polyfit(
                xArray, yArray, DefineManager.FUNCTION_DIMENSION)
            functionParameter.append(functionCharacteristic)
            yRegressionArray = sp.polyval(functionCharacteristic, xArray)
            err = np.sqrt(
                sum((yArray - yRegressionArray)**2) / yArray.shape[0])
            pointA, pointB = GetContour.GetStartAndEndPointsFromLine(
                functionCharacteristic, xArray)

            cv2.line(beforeDrawImage, pointA, pointB,
                     DefineManager.RGB_COLOR_GREEN, 1)
            cv2.line(afterDrawImage, pointA, pointB,
                     DefineManager.RGB_COLOR_GREEN, 1)

    CustomOpenCV.ShowImagesWithName([beforeDrawImage, afterDrawImage])

    return [
        perspectiveUpdatedBeforeImage, perspectiveUpdatedAfterImage, height,
        navelPoint, humanDetectedContour, functionParameter, beforeDrawImage,
        faceRate
    ]