Example #1
0
def FillDifferenceImage(differenceImage):
    height, width = differenceImage.shape[:]
    afterDifference = np.ndarray((height + Setting.DefineManager.ADD_IMAGE_HEIGHT * 2,width + Setting.DefineManager.ADD_IMAGE_WIDTH * 2),
                                 dtype = differenceImage.dtype)
    afterDifference[:]=Setting.DefineManager.SET_IMAGE_BLACK_COLOR
    afterDifference[Setting.DefineManager.ADD_IMAGE_HEIGHT + height + 15:,
    Setting.DefineManager.ADD_IMAGE_WIDTH:Setting.DefineManager.ADD_IMAGE_WIDTH + width] = Setting.DefineManager.SET_IMAGE_WHITE_COLOR
    afterDifference[Setting.DefineManager.ADD_IMAGE_HEIGHT:Setting.DefineManager.ADD_IMAGE_HEIGHT + height,
    Setting.DefineManager.ADD_IMAGE_WIDTH:Setting.DefineManager.ADD_IMAGE_WIDTH + width] = differenceImage[:]
    beforeDifference = np.ndarray(afterDifference.shape)
    alpha = 10
    while True:
        kernel = np.ones((Setting.DefineManager.MORPHOLOGY_MASK_SIZE + alpha,Setting.DefineManager.MORPHOLOGY_MASK_SIZE + alpha), np.uint8)
        beforeDifference = np.copy(afterDifference)
        afterDifference = cv2.morphologyEx(afterDifference, cv2.MORPH_CLOSE, kernel)
        afterDifference = cv2.GaussianBlur(afterDifference, (Setting.DefineManager.WIDTH_MASK_SIZE
                                                             , Setting.DefineManager.HEIGHT_MASK_SIZE), 0)
        thresh, afterDifference = cv2.threshold(afterDifference, Setting.DefineManager.THRESHOLD
                                                , Setting.DefineManager.SET_IMAGE_WHITE_COLOR, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        contourLength = len(GetContour(afterDifference)[0])
        ccv.ShowImagesWithName([beforeDifference,afterDifference],[],700)
        if contourLength < Setting.DefineManager.END_CONTOUR_COUNT:
            break
    afterDifference = afterDifference[Setting.DefineManager.ADD_IMAGE_HEIGHT:Setting.DefineManager.ADD_IMAGE_HEIGHT + height,
    Setting.DefineManager.ADD_IMAGE_WIDTH:Setting.DefineManager.ADD_IMAGE_WIDTH + width]
    finalDifference = cv2.absdiff(differenceImage, afterDifference)

    ccv.ShowImagesWithName([differenceImage, afterDifference, finalDifference],['Before','After', 'Added'])
    return afterDifference
Example #2
0
def BodyLineDraw(bodyHeight, navelPoint, faceRate, image):
    drawImage = np.copy(image)
    imageHeight, imageWidth = drawImage.shape[:2]
    ratioLength = int(bodyHeight * faceRate)
    eighthRatioLength = int(bodyHeight * DefineManager.EIGHT_RATIO)
    cv2.line(drawImage, (navelPoint[0], 0), (navelPoint[0], imageHeight),
             DefineManager.RGB_COLOR_GREEN, 1)
    # 배꼽 세로 선
    cv2.line(drawImage, (0, navelPoint[1]), (imageWidth, navelPoint[1]),
             DefineManager.RGB_COLOR_GREEN, 1)
    # 배꼼 가로선
    cv2.line(drawImage, (navelPoint[0] - eighthRatioLength, 0),
             (navelPoint[0] - eighthRatioLength, imageHeight),
             DefineManager.RGB_COLOR_GREEN, 1)
    # 왼쪽 어깨선
    cv2.line(drawImage, (navelPoint[0] + eighthRatioLength, 0),
             (navelPoint[0] + eighthRatioLength, imageHeight),
             DefineManager.RGB_COLOR_GREEN, 1)
    # 오른쪽 어깨선
    cv2.line(drawImage, (0, navelPoint[1] + ratioLength),
             (imageWidth, navelPoint[1] + ratioLength),
             DefineManager.RGB_COLOR_GREEN, 1)
    # 사타구니 선
    cv2.line(drawImage, (0, navelPoint[1] - ratioLength),
             (imageWidth, navelPoint[1] - ratioLength),
             DefineManager.RGB_COLOR_GREEN, 1)
    # 가슴선

    CustomOpenCV.ShowImagesWithName([drawImage])
Example #3
0
def DrawPointToImage(positionData, imageData):
    print "DrawPointToImage"
    drawImage = np.copy(imageData)
    gaussianImage = np.copy(imageData)
    gaussianImage[:] = 0
    height, width = drawImage.shape[:2]
    index = 0
    for eachPosition in positionData:
        if eachPosition[
                DefineManager.X_POSITION_SAVE_POINT] < 0 or eachPosition[
                    DefineManager.Y_POSITION_SAVE_POINT] < 0:
            continue
        if eachPosition[
                DefineManager.X_POSITION_SAVE_POINT] >= width or eachPosition[
                    DefineManager.Y_POSITION_SAVE_POINT] >= height:
            continue
        cv2.circle(imageData,
                   (int(eachPosition[DefineManager.X_POSITION_SAVE_POINT]),
                    int(eachPosition[DefineManager.Y_POSITION_SAVE_POINT])), 2,
                   DefineManager.RGB_COLOR_RED, -1)
        cv2.circle(gaussianImage,
                   (int(eachPosition[DefineManager.X_POSITION_SAVE_POINT]),
                    int(eachPosition[DefineManager.Y_POSITION_SAVE_POINT])), 2,
                   DefineManager.RGB_COLOR_WHITE, -1)
        cv2.putText(imageData, str(index),
                    (int(eachPosition[DefineManager.X_POSITION_SAVE_POINT]),
                     int(eachPosition[DefineManager.Y_POSITION_SAVE_POINT])),
                    cv2.FONT_HERSHEY_COMPLEX, 0.3,
                    DefineManager.RGB_COLOR_WHITE)
        index = index + 1
    CustomOpenCV.ShowImagesWithName([imageData])
Example #4
0
def DetectBlackBoardContourFromOriginImage(targetGrayImage):

    targetEqualizeGrayImage = GetContour.GetMeanRateImage(targetGrayImage)

    morpholgyKernel = np.ones((Setting.DefineManager.MORPHOLOGY_MASK_SIZE,
                               Setting.DefineManager.MORPHOLOGY_MASK_SIZE),
                              np.uint8)
    targetMorphologyGrayImage = cv2.morphologyEx(targetEqualizeGrayImage,
                                                 cv2.MORPH_OPEN,
                                                 morpholgyKernel)

    CustomOpenCV.ShowImagesWithName(
        [CustomOpenCV.ResizeImageAsRate(targetMorphologyGrayImage, 0.5)],
        ["targetEdgeMorphologyGrayImage"])
    # Reduce image noise

    targetMorphologyGrayImage = cv2.adaptiveThreshold(
        targetMorphologyGrayImage, Setting.DefineManager.SET_IMAGE_WHITE_COLOR,
        cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,
        Setting.DefineManager.NEIGHBORHOOD_MASK_SIZE, 10)
    # Get small size of block's threshold value

    targetEdgeMorphologyGrayImage = cv2.Canny(
        targetMorphologyGrayImage,
        Setting.DefineManager.CANNY_MINIMUM_THRESHOLD,
        Setting.DefineManager.CANNY_MAXIMUM_THRESHOLD,
        apertureSize=5)

    CustomOpenCV.ShowImagesWithName(
        [CustomOpenCV.ResizeImageAsRate(targetEdgeMorphologyGrayImage, 0.5)],
        ["targetEdgeMorphologyGrayImage"])

    # Edge detect from bulr processed image
    (_, beforeEdgeGrayImageContour,
     h) = cv2.findContours(targetEdgeMorphologyGrayImage, cv2.RETR_LIST,
                           cv2.CHAIN_APPROX_SIMPLE)
    # Get image contour

    foundedMaxAreaSizeContours = sorted(
        beforeEdgeGrayImageContour, key=cv2.contourArea,
        reverse=True)[:Setting.DefineManager.GET_MAXIMUM_AREA_SIZE]

    return FindSquareObjectFromContourData(foundedMaxAreaSizeContours)
Example #5
0
def AngleAsDealWithPointFromContours(contours, drawImage):
    pointAngle = []
    for contourIndex in contours:
        length = len(contourIndex)
        strideKey = max(length / Setting.DefineManager.RESEARCH_ANGLE_COUNT,Setting.DefineManager.MINIMUM_STRIDE_KEY)
        beforeAngle = 0.0
        for index in range(int(length/strideKey) + 1):
            pointA = contourIndex[((index-1) * strideKey)%length].ravel()
            pointB = contourIndex[((index) * strideKey)%length].ravel()
            pointC = contourIndex[((index+1) * strideKey)%length].ravel()
            x, y = pointB.ravel()
            cv2.circle(drawImage, (x,y), 2, Setting.DefineManager.RGB_COLOR_BLUE, -1)
            nowAngle = AngleBetweenThreePoints(pointA,pointB,pointC)
            absAngle = abs(beforeAngle - nowAngle)
            if absAngle > Setting.DefineManager.ANGLE_AS_DEAL_WITH_POINT :
                pointAngle.append(np.asarray([pointB]))
                angleText = str(int(nowAngle)) + "," + str(int(absAngle))
                thickness = 0.3
                cv2.circle(drawImage, (x,y), 2, Setting.DefineManager.RGB_COLOR_RED, -1)
                cv2.putText(drawImage, angleText,(x,y),0, thickness,Setting.DefineManager.RGB_COLOR_WHITE)
            beforeAngle = nowAngle
    ccv.ShowImagesWithName([drawImage],['PointImage'])
    return pointAngle
Example #6
0
def AngleAsDealWithPointFromContours(contours, drawImage):
    pointAngle = []
    union = []
    contourIndex = contours[0]
    length = len(contourIndex)
    strideKey = Setting.DefineManager.MINIMUM_STRIDE_KEY
    beforeAngle = 0.0
    for index in range(int(length / strideKey)):
        indexA = (index - 1) * strideKey
        indexB = (index) * strideKey
        indexC = (index + 1) * strideKey
        if indexA < indexB:
            for pointIndex in range(indexA, indexB):
                union.append(contourIndex[pointIndex % length].ravel())
        pointA = contourIndex[indexA % length].ravel()
        pointB = contourIndex[indexB % length].ravel()
        pointC = contourIndex[indexC % length].ravel()
        x, y = pointB.ravel()
        cv2.circle(drawImage, (x, y), 2, Setting.DefineManager.RGB_COLOR_BLUE,
                   -1)
        nowAngle = AngleBetweenThreePoints(pointA, pointB, pointC)
        absAngle = abs(beforeAngle - nowAngle)
        if absAngle > Setting.DefineManager.ANGLE_AS_DEAL_WITH_POINT:
            union.append(pointB)
            pointAngle.append(union)
            union = []
            angleText = str(int(nowAngle)) + "," + str(int(absAngle))
            thickness = 0.3
            cv2.circle(drawImage, (x, y), 2,
                       Setting.DefineManager.RGB_COLOR_RED, -1)
            cv2.putText(drawImage, angleText, (x, y), 0, thickness,
                        Setting.DefineManager.RGB_COLOR_WHITE)
        beforeAngle = nowAngle
    pointAngle.append(union)

    ccv.ShowImagesWithName([drawImage], ['PointImage'])
    return pointAngle
Example #7
0
def DetectObjectFromImage(beforeImage, afterImage, beforeGrayImage,
                          afterGrayImage):

    resizeRate = GetContour.SquareDetectAndReturnRateAsSquare(beforeGrayImage)
    beforeImage = CustomOpenCV.ResizeImageAsRate(beforeImage, resizeRate)
    beforeGrayImage = CustomOpenCV.ResizeImageAsRate(beforeGrayImage,
                                                     resizeRate)
    afterImage = CustomOpenCV.ResizeImageAsRate(afterImage, resizeRate)
    afterGrayImage = CustomOpenCV.ResizeImageAsRate(afterGrayImage, resizeRate)

    squareContourData = DetectBackgroundSquare.DetectBackgroundSquareFromImage(
        beforeImage)  #형광색 인식으로 점 4개 찾는 함수
    #squareContourData = DetectBlackBoardContourFromOriginImage(beforeGrayImage)

    # 굴곡진 큰 사각형 정사각형으로 보정
    perspectiveUpdatedBeforeImage = ImageMatrixMove.ImageMatrixMove(
        beforeImage, squareContourData)
    perspectiveUpdatedAfterImage = ImageMatrixMove.ImageMatrixMove(
        afterImage, squareContourData)

    perspectiveUpdatedBeforeImage = CustomOpenCV.ResizeImageAsWidth(
        perspectiveUpdatedBeforeImage, DefineManager.IMAGE_WIDTH)
    perspectiveUpdatedAfterImage = CustomOpenCV.ResizeImageAsWidth(
        perspectiveUpdatedAfterImage, DefineManager.IMAGE_WIDTH)
    # Resize image as shape [ rateHeight, DefineManager.IMAGE_WIDTH ]

    #CustomOpenCV.ShowImagesWithName([perspectiveUpdatedBeforeImage, perspectiveUpdatedAfterImage],
    #                                ["perspectiveUpdatedBeforeImage", "perspectiveUpdatedAfterImage"])

    perspectiveUpdatedBeforeGrayImage = cv2.cvtColor(
        perspectiveUpdatedBeforeImage, cv2.COLOR_BGR2GRAY)
    perspectiveUpdatedAfterGrayImage = cv2.cvtColor(
        perspectiveUpdatedAfterImage, cv2.COLOR_BGR2GRAY)

    morphologyKernel = np.ones(
        (Setting.DefineManager.MORPHOLOGY_MASK_SIZE + 1,
         Setting.DefineManager.MORPHOLOGY_MASK_SIZE + 1), np.uint8)
    perspectiveUpdatedBeforeMorphologyGrayImage = cv2.morphologyEx(
        perspectiveUpdatedBeforeGrayImage, cv2.MORPH_OPEN, morphologyKernel)
    perspectiveUpdatedAfterMorphologyGrayImage = cv2.morphologyEx(
        perspectiveUpdatedAfterGrayImage, cv2.MORPH_OPEN, morphologyKernel)
    # Reduce image noise

    beforeThresholdedBlackBoardImage = cv2.adaptiveThreshold(
        perspectiveUpdatedBeforeMorphologyGrayImage,
        Setting.DefineManager.SET_IMAGE_WHITE_COLOR,
        cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,
        Setting.DefineManager.NEIGHBORHOOD_MASK_SIZE, 10)
    afterThresholdedBlackBoardImage = cv2.adaptiveThreshold(
        perspectiveUpdatedAfterMorphologyGrayImage,
        Setting.DefineManager.SET_IMAGE_WHITE_COLOR,
        cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,
        Setting.DefineManager.NEIGHBORHOOD_MASK_SIZE, 10)
    # Adaptive Threshold Image
    #CustomOpenCV.ShowImagesWithName([beforeThresholdedBlackBoardImage, afterThresholdedBlackBoardImage], ['beforeThresholdedBlackBoardImage', 'afterThresholdedBlackBoardImage'])

    differenceBasedOnThreshImage = cv2.absdiff(
        beforeThresholdedBlackBoardImage, afterThresholdedBlackBoardImage)
    differenceBasedOnThreshImage[
        differenceBasedOnThreshImage > Setting.DefineManager.
        EACH_IMAGE_DIFFERENCE_THRESHOLD] = Setting.DefineManager.SET_IMAGE_WHITE_COLOR
    # Detect each image difference from Threshold Image

    #CustomOpenCV.ShowImagesWithName([differenceBasedOnThreshImage], ["differenceBasedOnThreshImage"])
    objectFoundedImage = GetContour.GetObjectImage(
        perspectiveUpdatedBeforeImage, perspectiveUpdatedAfterImage)

    humanDetectedContour, contourLineDrawImage = GetContour.GetContour(
        objectFoundedImage, perspectiveUpdatedAfterImage)
    GetContour.FindNavel(humanDetectedContour, contourLineDrawImage)
    importantPoint = GetContour.AngleAsDealWithPointFromContours(
        humanDetectedContour, contourLineDrawImage)

    return [
        beforeThresholdedBlackBoardImage, afterThresholdedBlackBoardImage,
        differenceBasedOnThreshImage, humanDetectedContour
    ]
Example #8
0
def DetectObjectFromImage(beforeImage, afterImage, beforeGrayImage,
                          afterGrayImage):

    resizeRate = GetContour.SquareDetectAndReturnRateAsSquare(beforeGrayImage)
    beforeImage = CustomOpenCV.ResizeImageAsRate(beforeImage, resizeRate)
    beforeGrayImage = CustomOpenCV.ResizeImageAsRate(beforeGrayImage,
                                                     resizeRate)
    afterImage = CustomOpenCV.ResizeImageAsRate(afterImage, resizeRate)
    afterGrayImage = CustomOpenCV.ResizeImageAsRate(afterGrayImage, resizeRate)

    #squareContourData = DetectBackgroundSquare.DetectBackgroundSquareFromImage(beforeImage) #형광색 인식으로 점 4개 찾는 함수
    # in mac
    # this function is not working and falling loop.
    squareContourData = DetectBlackBoardContourFromOriginImage(beforeGrayImage)

    # 굴곡진 큰 사각형 정사각형으로 보정
    perspectiveUpdatedBeforeImage = ImageMatrixMove.ImageMatrixMove(
        beforeImage, squareContourData)
    perspectiveUpdatedAfterImage = ImageMatrixMove.ImageMatrixMove(
        afterImage, squareContourData)

    perspectiveUpdatedBeforeImage = CustomOpenCV.ResizeImageAsWidth(
        perspectiveUpdatedBeforeImage, DefineManager.IMAGE_WIDTH)
    perspectiveUpdatedAfterImage = CustomOpenCV.ResizeImageAsWidth(
        perspectiveUpdatedAfterImage, DefineManager.IMAGE_WIDTH)
    # Resize image as shape [ rateHeight, DefineManager.IMAGE_WIDTH ]

    #CustomOpenCV.ShowImagesWithName([perspectiveUpdatedBeforeImage, perspectiveUpdatedAfterImage],
    #                                ["perspectiveUpdatedBeforeImage", "perspectiveUpdatedAfterImage"])

    perspectiveUpdatedBeforeGrayImage = cv2.cvtColor(
        perspectiveUpdatedBeforeImage, cv2.COLOR_BGR2GRAY)
    perspectiveUpdatedAfterGrayImage = cv2.cvtColor(
        perspectiveUpdatedAfterImage, cv2.COLOR_BGR2GRAY)

    morphologyKernel = np.ones(
        (Setting.DefineManager.MORPHOLOGY_MASK_SIZE + 1,
         Setting.DefineManager.MORPHOLOGY_MASK_SIZE + 1), np.uint8)
    perspectiveUpdatedBeforeMorphologyGrayImage = cv2.morphologyEx(
        perspectiveUpdatedBeforeGrayImage, cv2.MORPH_OPEN, morphologyKernel)
    perspectiveUpdatedAfterMorphologyGrayImage = cv2.morphologyEx(
        perspectiveUpdatedAfterGrayImage, cv2.MORPH_OPEN, morphologyKernel)
    # Reduce image noise

    beforeThresholdedBlackBoardImage = cv2.adaptiveThreshold(
        perspectiveUpdatedBeforeMorphologyGrayImage,
        Setting.DefineManager.SET_IMAGE_WHITE_COLOR,
        cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,
        Setting.DefineManager.NEIGHBORHOOD_MASK_SIZE, 10)
    afterThresholdedBlackBoardImage = cv2.adaptiveThreshold(
        perspectiveUpdatedAfterMorphologyGrayImage,
        Setting.DefineManager.SET_IMAGE_WHITE_COLOR,
        cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,
        Setting.DefineManager.NEIGHBORHOOD_MASK_SIZE, 10)
    # Adaptive Threshold Image
    #CustomOpenCV.ShowImagesWithName([beforeThresholdedBlackBoardImage, afterThresholdedBlackBoardImage], ['beforeThresholdedBlackBoardImage', 'afterThresholdedBlackBoardImage'])

    differenceBasedOnThreshImage = cv2.absdiff(
        beforeThresholdedBlackBoardImage, afterThresholdedBlackBoardImage)
    differenceBasedOnThreshImage[
        differenceBasedOnThreshImage > Setting.DefineManager.
        EACH_IMAGE_DIFFERENCE_THRESHOLD] = Setting.DefineManager.SET_IMAGE_WHITE_COLOR
    # Detect each image difference from Threshold Image

    #CustomOpenCV.ShowImagesWithName([differenceBasedOnThreshImage], ["differenceBasedOnThreshImage"])
    objectFoundedImage = GetContour.GetObjectImage(
        perspectiveUpdatedBeforeImage, perspectiveUpdatedAfterImage)

    humanDetectedContour, contourLineDrawImage = GetContour.GetContour(
        objectFoundedImage, perspectiveUpdatedAfterImage)
    faceMinY, faceMaxY = GetContour.DetectFaceAndGetY(
        perspectiveUpdatedAfterImage)
    navelPoint, faceRate, maxY, minY = GetContour.FindNavel(
        humanDetectedContour, faceMaxY, contourLineDrawImage)
    height = maxY - minY
    importantPoint = GetContour.AngleAsDealWithPointFromContours(
        humanDetectedContour, contourLineDrawImage)

    beforeDrawImage = np.copy(perspectiveUpdatedBeforeImage)
    afterDrawImage = np.copy(perspectiveUpdatedAfterImage)
    functionParameter = []
    for index in range(len(importantPoint)):
        xArray = []
        yArray = []
        for point in importantPoint[index]:
            x, y = point.ravel()
            xArray.append(x)
            yArray.append(y)
        xArray = np.asarray(xArray)
        yArray = np.asarray(yArray)
        if xArray.shape[0] > 0:
            # ax + b = y (a, b 를 받아옴)
            functionCharacteristic = sp.polyfit(
                xArray, yArray, DefineManager.FUNCTION_DIMENSION)
            functionParameter.append(functionCharacteristic)
            yRegressionArray = sp.polyval(functionCharacteristic, xArray)
            err = np.sqrt(
                sum((yArray - yRegressionArray)**2) / yArray.shape[0])
            pointA, pointB = GetContour.GetStartAndEndPointsFromLine(
                functionCharacteristic, xArray)

            cv2.line(beforeDrawImage, pointA, pointB,
                     DefineManager.RGB_COLOR_GREEN, 1)
            cv2.line(afterDrawImage, pointA, pointB,
                     DefineManager.RGB_COLOR_GREEN, 1)

    CustomOpenCV.ShowImagesWithName([beforeDrawImage, afterDrawImage])

    return [
        perspectiveUpdatedBeforeImage, perspectiveUpdatedAfterImage, height,
        navelPoint, humanDetectedContour, functionParameter, beforeDrawImage,
        faceRate
    ]