예제 #1
0
def getLaneCurve(img,display=2):
 
    imgCopy = img.copy()
    imgResult = img.copy()
    #### STEP 1
    imgThres = utlis.thresholding(img)
 
    #### STEP 2
    hT, wT, c = img.shape
    points = utlis.valTrackbars()
    imgWarp = utlis.warpImg(imgThres,points,wT,hT)
    imgWarpPoints = utlis.drawPoints(imgCopy,points)
 
    #### STEP 3
    middlePoint,imgHist = utlis.getHistogram(imgWarp,display=True,minPer=0.5,region=4)
    curveAveragePoint, imgHist = utlis.getHistogram(imgWarp, display=True, minPer=0.9)
    curveRaw = curveAveragePoint - middlePoint
 
    #### SETP 4
    curveList.append(curveRaw)
    if len(curveList)>avgVal:
        curveList.pop(0)
    curve = int(sum(curveList)/len(curveList))
 
    #### STEP 5
    if display != 0:
        imgInvWarp = utlis.warpImg(imgWarp, points, wT, hT, inv=True)
        imgInvWarp = cv2.cvtColor(imgInvWarp, cv2.COLOR_GRAY2BGR)
        imgInvWarp[0:hT // 3, 0:wT] = 0, 0, 0
        imgLaneColor = np.zeros_like(img)
        imgLaneColor[:] = 0, 255, 0
        imgLaneColor = cv2.bitwise_and(imgInvWarp, imgLaneColor)
        imgResult = cv2.addWeighted(imgResult, 1, imgLaneColor, 1, 0)
        midY = 450
        cv2.putText(imgResult, str(curve), (wT // 2 - 80, 85), cv2.FONT_HERSHEY_COMPLEX, 2, (255, 0, 255), 3)
        cv2.line(imgResult, (wT // 2, midY), (wT // 2 + (curve * 3), midY), (255, 0, 255), 5)
        cv2.line(imgResult, ((wT // 2 + (curve * 3)), midY - 25), (wT // 2 + (curve * 3), midY + 25), (0, 255, 0), 5)
        for x in range(-30, 30):
            w = wT // 20
            cv2.line(imgResult, (w * x + int(curve // 50), midY - 10),
                     (w * x + int(curve // 50), midY + 10), (0, 0, 255), 2)
        #fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
        #cv2.putText(imgResult, 'FPS ' + str(int(fps)), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (230, 50, 50), 3);
    if display == 2:
        imgStacked = utlis.stackImages(0.7, ([img, imgWarpPoints, imgWarp],
                                             [imgHist, imgLaneColor, imgResult]))
        cv2.imshow('ImageStack', imgStacked)
        cv2.imshow('Warp Points', imgWarpPoints)
    elif display == 1:
        cv2.imshow('Resutlt', imgResult)
 
    #### NORMALIZATION
    curve = curve/100
    if curve>1: curve ==1
    if curve<-1:curve == -1
 
    return curve
예제 #2
0
imgCanny = cv2.Canny(imgBlur, 100, 110)

cimg = cv2.cvtColor(imgBlur, cv2.COLOR_GRAY2BGR)

# Find all contours
contours, hierachy = cv2.findContours(imgCanny, cv2.RETR_EXTERNAL,
                                      cv2.CHAIN_APPROX_NONE)
cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 5)

circles = cv2.HoughCircles(imgBlur,
                           cv2.HOUGH_GRADIENT,
                           1,
                           20,
                           param1=500,
                           param2=40,
                           minRadius=0,
                           maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
    cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)

imgBlank = np.zeros_like(img)
imageArray = ([img, imgGray, imgBlur,
               imgCanny], [imgContours, imgBlank, imgBlank, imgBlank])
imgStack = utlis.stackImages(imageArray, 0.5)

cv2.imshow('dd', imgStack)
cv2.imshow('Sample Display', cimg)
cv2.waitKey(0)

# plaque counting
예제 #3
0
        imgRawGrade = np.zeros_like(imgGardeDisplay, np.uint8)  # NEW BLANK IMAGE WITH GRADE AREA SIZE
        cv2.putText(imgRawGrade, str(int(score)) + "%", (70, 100)
                        , cv2.FONT_HERSHEY_COMPLEX, 3, (0, 255, 255), 3)  # ADD THE GRADE TO NEW IMAGE
        invMatrixG = cv2.getPerspectiveTransform(ptG2, ptG1)  # INVERSE TRANSFORMATION MATRIX
        imgInvGradeDisplay = cv2.warpPerspective(imgRawGrade, invMatrixG, (widthImg, heightImg))  # INV IMAGE WARP

        imgFinal=cv2.addWeighted(imgFinal,1,imgInvWrap,1,0)
        imgFinal = cv2.addWeighted(imgFinal, 1, imgInvGradeDisplay, 1, 0)


    imgBlank=np.zeros_like(img)
    imageArray=([img,imgGray,imgBlur,imgCanny],
                [imgContours,imgBiggestContours,imgWrapColored,imgThresh],
                [imgResult,imgRawDrawing,imgInvWrap,imgFinal])
except:
    imgBlank = np.zeros_like(img)
    imageArray = ([img, imgGray, imgBlur, imgBlank],
                      [imgBlank, imgBlank, imgBlank, imgBlank],
                      [imgBlank, imgBlank, imgBlank, imgBlank])

# LABELS FOR DISPLAY
lables = [["Original","Gray","Blur","Canny"],
                  ["Contours","Biggest Contour","Wrap","Threshold",],
              ["Result","Raw Draw","Inv Wrap","Final"]]

imgStacked=utlis.stackImages(imageArray,0.5,lables)

cv2.imshow("Stacked Image",imgStacked)
cv2.imshow("Final Image",imgFinal)

cv2.waitKey(0)
        if  int(np.sum(arrayCurve)) == 0:averageCurve = currentCurve
        else:
            averageCurve = np.sum(arrayCurve) // arrayCurve.shape[0]
        if abs(averageCurve-currentCurve) >200: arrayCurve[arrayCounter] = averageCurve
        else :arrayCurve[arrayCounter] = currentCurve
        arrayCounter +=1
        if arrayCounter >=noOfArrayValues : arrayCounter=0
        cv2.putText(imgFinal, str(int(averageCurve)), (frameWidth//2-70, 70), cv2.FONT_HERSHEY_DUPLEX, 1.75, (0, 0, 255), 2, cv2.LINE_AA)

    except:
        lane_curve=00
        pass

    imgFinal= utlis.drawLines(imgFinal,lane_curve)


    imgThres = cv2.cvtColor(imgThres,cv2.COLOR_GRAY2BGR)
    imgBlank = np.zeros_like(img)
    imgStacked = utlis.stackImages(0.7, ([img,imgUndis,imgWarpPoints],
                                         [imgColor, imgCanny, imgThres],
                                         [imgWarp,imgSliding,imgFinal]
                                         ))

    cv2.imshow("PipeLine",imgStacked)
    cv2.imshow("Result", imgFinal)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()
예제 #5
0
    h_max = cv2.getTrackbarPos("HUE Max", "HSV")  #98
    s_min = cv2.getTrackbarPos("SAT Min", "HSV")  #149
    s_max = cv2.getTrackbarPos("SAT Max", "HSV")  #255
    v_min = cv2.getTrackbarPos("VALUE Min", "HSV")  #46
    v_max = cv2.getTrackbarPos("VALUE Max", "HSV")  #255
    print(h_min)

    lower = np.array([h_min, s_min, v_min])
    upper = np.array([h_max, s_max, v_max])
    mask = cv2.inRange(imgHsv, lower, upper)
    result = cv2.bitwise_and(img, img, mask=mask)
    mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)

    imgBlur = cv2.GaussianBlur(result, (7, 7), 1)
    imgGray = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2GRAY)
    threshold1 = cv2.getTrackbarPos("Threshold1", "Parameters")  #204
    threshold2 = cv2.getTrackbarPos("Threshold2", "Parameters")  #77
    imgCanny = cv2.Canny(imgGray, threshold1, threshold2)
    kernel = np.ones((5, 5))
    imgDil = cv2.dilate(imgCanny, kernel, iterations=1)
    getContours(imgDil, imgContour)
    display(imgContour)

    stack = utlis.stackImages(0.7, ([img, result], [imgDil, imgContour]))

    cv2.imshow('Horizontal Stacking', stack)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()
        # APPLY ADAPTIVE THRESHOLD
        imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY)
        imgAdaptiveThre= cv2.adaptiveThreshold(imgWarpGray, 255, 1, 1, 7, 2)
        imgAdaptiveThre = cv2.bitwise_not(imgAdaptiveThre)
        imgAdaptiveThre=cv2.medianBlur(imgAdaptiveThre,3)

        # Image Array for Display
        imageArray = ([img,imgGray,imgThreshold,imgContours],
                      [imgBigContour,imgWarpColored, imgWarpGray,imgAdaptiveThre])

    else:
        imageArray = ([img,imgGray,imgThreshold,imgContours],
                      [imgBlank, imgBlank, imgBlank, imgBlank])

    # LABELS FOR DISPLAY
    lables = [["Original","Gray","Threshold","Contours"],
              ["Biggest Contour","Warp Prespective","Warp Gray","Adaptive Threshold"]]

    stackedImage = utlis.stackImages(imageArray,0.75,lables)
    cv2.imshow("Result",stackedImage)

    # SAVE IMAGE WHEN 's' key is pressed
    if cv2.waitKey(1) & 0xFF == ord('s'):
        cv2.imwrite("Scanned/myImage"+str(count)+".jpg",imgWarpColored)
        cv2.rectangle(stackedImage, ((int(stackedImage.shape[1] / 2) - 230), int(stackedImage.shape[0] / 2) + 50),
                      (1100, 350), (0, 255, 0), cv2.FILLED)
        cv2.putText(stackedImage, "Scan Saved", (int(stackedImage.shape[1] / 2) - 200, int(stackedImage.shape[0] / 2)),
                    cv2.FONT_HERSHEY_DUPLEX, 3, (0, 0, 255), 5, cv2.LINE_AA)
        cv2.imshow('Result', stackedImage)
        cv2.waitKey(300)
        count += 1
예제 #7
0
def omr(imgpath,field,answer):

    pathImage = imgpath
    heightImg = 700
    widthImg = 700
    questions=5
    choices=5
    ans= answer


    img = cv2.imread(pathImage)
    img = cv2.resize(img, (widthImg, heightImg)) # RESIZE IMAGE
    imgFinal = img.copy()
    imgBlank = np.zeros((heightImg,widthImg, 3), np.uint8) # CREATE A BLANK IMAGE FOR TESTING DEBUGGING IF REQUIRED
    imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # CONVERT IMAGE TO GRAY SCALE
    imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1) # ADD GAUSSIAN BLUR
    imgCanny = cv2.Canny(imgBlur,10,70) # APPLY CANNY


    ## FIND ALL COUNTOURS
    imgContours = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
    imgBigContour = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
    contours, hierarchy = cv2.findContours(imgCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # FIND ALL CONTOURS
    cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 10) # DRAW ALL DETECTED CONTOURS
    rectCon = utlis.rectContour(contours) # FILTER FOR RECTANGLE CONTOURS
    biggestPoints= utlis.getCornerPoints(rectCon[0]) # GET CORNER POINTS OF THE BIGGEST RECTANGLE
    # gradePoints = utlis.getCornerPoints(rectCon[1]) # GET CORNER POINTS OF THE SECOND BIGGEST RECTANGLE



    if biggestPoints.size != 0:

        # BIGGEST RECTANGLE WARPING
        biggestPoints=utlis.reorder(biggestPoints) # REORDER FOR WARPING
        cv2.drawContours(imgBigContour, biggestPoints, -1, (0, 255, 0), 20) # DRAW THE BIGGEST CONTOUR
        pts1 = np.float32(biggestPoints) # PREPARE POINTS FOR WARP
        pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP
        matrix = cv2.getPerspectiveTransform(pts1, pts2) # GET TRANSFORMATION MATRIX
        imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg)) # APPLY WARP PERSPECTIVE


        # APPLY THRESHOLD
        imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY) # CONVERT TO GRAYSCALE
        imgThresh = cv2.threshold(imgWarpGray, 170, 255,cv2.THRESH_BINARY_INV )[1] # APPLY THRESHOLD AND INVERSE

        boxes = utlis.splitBoxes(imgThresh) # GET INDIVIDUAL BOXES
        # cv2.imshow("Split Test ", boxes[3])
        countR=0
        countC=0
        myPixelVal = np.zeros((questions,choices)) # TO STORE THE NON ZERO VALUES OF EACH BOX
        nonPixelVal = np.zeros((1,1))
        nonPixelVal[0][0] = 5
        for image in boxes:
            #cv2.imshow(str(countR)+str(countC),image)
            totalPixels = cv2.countNonZero(image)
            myPixelVal[countR][countC]= totalPixels
            countC += 1
            if (countC==choices):countC=0;countR +=1

        # FIND THE USER ANSWERS AND PUT THEM IN A LIST
        myIndex=[]
        for x in range (0,questions):
            arr = myPixelVal[x]
            print("arrrr",arr)
            max1 = np.amax(arr)
            myIndexVal = np.where(arr == np.amax(arr))
            print(max1)
            temp =np.delete(arr,myIndexVal)
            print("arrrr", temp)
            max2= np.amax(temp)
            myIndexVal1 = np.where(arr == max2)
            if(max1.tolist()/1000 - max2.tolist()/1000  < 2):
                print("masbcjasfjhds",max1,max2)
                myIndexVal = nonPixelVal
            myIndex.append(myIndexVal[0][0])
        print("USER ANSWERS",myIndex)

            # COMPARE THE VALUES TO FIND THE CORRECT ANSWERS


        if field == 1:
            grading=[]
            for x in range(0,questions):
                if ans[x] == myIndex[x]:
                    grading.append(1)
                else:grading.append(0)
            #print("GRADING",grading)
            score = (sum(grading)/questions)*50 # FINAL GRADE
            print("SCORE",score)
            
        
            imageArray = ([img,imgGray,imgBlur, imgCanny],
                          [imgContours,imgBigContour,imgThresh,imgBlank])
            lables = [["Original","Gray","Blur","Edges"],
                      ["Contours","Biggest Contour","Threshold"," "]]

            stackedImage = utlis.stackImages(imageArray,0.5,lables)
            cv2.imshow('Result', stackedImage)
            # cv2.imwrite('r.jpg', stackedImage)
            cv2.waitKey()
            return(str(score))

    return(myIndex)
예제 #8
0
    def GoruntuIsle(self, img):

        try:

            global widthImg, heightImg, ans, x, y, xsag, xsol, w, h, esikdeger, ox, oy, ow, oh, secimSayisi, sorusayisi, sayac, sonbulunanNumara, bulunanNumara, sonbulunanpuan, bulunanpuan
            img2 = cv2.resize(img, (widthImg, heightImg))
            if cameraNo == -1:
                pass
            else:
                img2 = cv2.rotate(img2, cv2.ROTATE_180)
            imgCountours = img2.copy()
            imageFinal = img2.copy()
            imgBiggestCountours = img2.copy()
            imgGray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
            imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1)
            imgCanny = cv2.Canny(imgBlur, 10, 50)
            #cv2.imshow("test",imgCanny)

            try:
                #FIND ALL COUNTERS
                countours, hierarchy = cv2.findContours(
                    imgCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
                cv2.drawContours(imgCountours, countours, -1, (0, 255, 0), 10)

                #FIND RECTANGLES
                rectCon = utlis.rectContour(countours)
                biggestContour = utlis.getCornerPoints(rectCon[0])
                #print(biggestContour)

                if biggestContour.size != 0:
                    cv2.drawContours(imgBiggestCountours, biggestContour, -1,
                                     (0, 255, 0), 20)
                    biggestContour = utlis.reorder(biggestContour)
                    pts1 = np.float32(
                        biggestContour)  # PREPARE POINTS FOR WARP
                    pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg],
                                       [widthImg,
                                        heightImg]])  # PREPARE POINTS FOR WARP
                    matrix = cv2.getPerspectiveTransform(
                        pts1, pts2)  # GET TRANSFORMATION MATRIX
                    imgWarpColored = cv2.warpPerspective(
                        img2, matrix,
                        (widthImg, heightImg))  # APPLY WARP PERSPECTIVE
                    #cv2.imshow("bulunan",imgWarpColored)
                    #APPLY TRESHOLD

                    imgWarpGray = cv2.cvtColor(imgWarpColored,
                                               cv2.COLOR_BGR2GRAY)
                    imgThresh = cv2.threshold(imgWarpGray, tresh, 255,
                                              cv2.THRESH_BINARY_INV)[1]

                    #boxes=utlis.splitBoxes(imgThresh)

                    crop_imgSol = imgThresh[y:y + h, xsol:(xsol + w)]
                    crop_imgSag = imgThresh[y:y + h, xsag:(xsag + w)]
                    crop_imgOgrenciNu = imgThresh[oy:oy + oh, ox:(ox + ow)]

                    #cv2.imshow("cropped", crop_imgSol)
                    #cv2.imwrite("croppedsol.jpg",crop_imgSol)
                    #cv2.imwrite("croppedsag.jpg",crop_imgSag)

                    boxesSol = utlis.splitBoxes(crop_imgSol)
                    boxesSag = utlis.splitBoxes(crop_imgSag)
                    boxesOgrenciNu = utlis.splitBoxesOgrenciNu(
                        crop_imgOgrenciNu)

                    sorusayisi = 20
                    #GETTING NOPIXEL VALUES OF EACH
                    myPixelVal = np.zeros((sorusayisi, secimSayisi))

                    myPixelValOgrenciNu = np.zeros((4, 10))
                    countC = 0
                    countR = 0

                    for image in boxesOgrenciNu:
                        totalPixels = cv2.countNonZero(image)
                        myPixelValOgrenciNu[countR][countC] = totalPixels
                        countC += 1
                        if (countC == 10):
                            countR += 1
                            countC = 0
                    #print(myPixelValOgrenciNu)

                    countC = 0
                    countR = 0

                    for image in boxesSol:
                        totalPixels = cv2.countNonZero(image)
                        myPixelVal[countR][countC] = totalPixels
                        countC += 1
                        if (countC == secimSayisi):
                            countR += 1
                            countC = 0
                    #print(myPixelVal)

                    for image in boxesSag:
                        totalPixels = cv2.countNonZero(image)
                        myPixelVal[countR][countC] = totalPixels
                        countC += 1
                        if (countC == secimSayisi):
                            countR += 1
                            countC = 0

                    #FINDING INDEX VALUES OF THE MARKINGS

                    myIndexOgrenciNu = []
                    for x in range(0, 4):
                        arr = myPixelValOgrenciNu[x]
                        #print("arr",arr)
                        myIndexVal = np.where(arr == np.amax(arr))
                        #print(myIndexVal[0])
                        myIndexOgrenciNu.append(myIndexVal[0][0])
                    ogrenciNumarasi = str(myIndexOgrenciNu[0]) + str(
                        myIndexOgrenciNu[1]) + str(myIndexOgrenciNu[2]) + str(
                            myIndexOgrenciNu[3])
                    #print('Öğrenci numarası {}'.format(ogrenciNumarasi))

                    #cv2.imshow('mum',utlis.showNumber2(imgWarpColored,myIndexOgrenciNu,4,10,ox,oy,ow,oh))

                    myIndex = []
                    for x in range(0, sorusayisi):
                        isaretsayisi = 0
                        arr = myPixelVal[x]
                        #print("arr-"+str(x),arr)
                        #print('max',np.amax(arr))
                        #print('sayı',np.count_nonzero(arr>esikdeger))
                        isaretsayisi = np.count_nonzero(arr > esikdeger)
                        enfazla = np.amax(arr)
                        if isaretsayisi > 1:
                            myIndexVal[0][
                                0] = 5  #iki ve dahafazla işaretlenmiş
                        elif esikdeger < enfazla:
                            myIndexVal = np.where(arr == np.amax(arr))
                            #print(np.where(arr==np.amax(arr))[0])
                        else:
                            #pass
                            myIndexVal[0][0] = 4
                        #print(myIndexVal[0])
                        myIndex.append(myIndexVal[0][0])
                    #print(myIndex)

                    #GRADING
                    grading = []
                    for x in range(0, sorusayisi):
                        if myIndex[x] == 4:
                            grading.append(4)
                        elif myIndex[x] == 5:
                            grading.append(5)
                        elif ans[x] == myIndex[x]:
                            grading.append(1)
                        else:
                            grading.append(0)
                    #print(grading)

                    #SCORE
                    DogrularSay = grading.count(1)
                    YanlislariSay = grading.count(0) + grading.count(5)
                    BoslariSay = grading.count(4)
                    score = (DogrularSay / sorusayisi) * 100
                    mesaj = 'No:' + ogrenciNumarasi + ' Puan:' + str(
                        score) + ' Doğru:' + str(
                            DogrularSay) + ' Yanlış:' + str(
                                YanlislariSay) + ' Boş:' + str(BoslariSay)
                    #print(score)

                    #DISPLAY ANSWERS
                    #imgResult=imgWarpColored.copy()

                    imgResultSol = imgWarpColored.copy()
                    #imgResultSag=imgWarpColored.copy()
                    #imgResult= imgResult[y:y+h, x:x+w]
                    imgResultNu = utlis.showNumber2(imgResultSol,
                                                    myIndexOgrenciNu, 4, 10,
                                                    ox, oy, ow, oh)
                    imgResultSol = utlis.showAnswers2(imgResultSol,
                                                      myIndex[0:10],
                                                      grading[0:10], ans[0:10],
                                                      10, 4, xsol, y, w, h)
                    imgResultSag = utlis.showAnswers2(imgResultSol,
                                                      myIndex[10:20],
                                                      grading[10:20],
                                                      ans[10:20], 10, 4, xsag,
                                                      y, w, h)

                    #cv2.imshow("imgResultSag",imgResultSag)

                    imRawDrawingSol = np.zeros_like(imgResultSol)
                    imgResultNu = utlis.showNumber2(imRawDrawingSol,
                                                    myIndexOgrenciNu, 4, 10,
                                                    ox, oy, ow, oh)
                    imRawDrawingSol = utlis.showAnswers2(
                        imRawDrawingSol, myIndex[0:10], grading[0:10],
                        ans[0:10], 10, 4, xsol, y, w, h)
                    imRawDrawingSag = utlis.showAnswers2(
                        imRawDrawingSol, myIndex[10:20], grading[10:20],
                        ans[10:20], 10, 4, xsag, y, w, h)

                    #cv2.imshow("imgResult1",imRawDrawing)
                    #pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP
                    #pts2s = np.float32([[0, 0],[w, 0], [0, h],[w, h]]) # PREPARE POINTS FOR WARP
                    invMatrix = cv2.getPerspectiveTransform(
                        pts2, pts1)  # GET TRANSFORMATION MATRIX

                    # font
                    font = cv2.FONT_HERSHEY_SIMPLEX

                    # org
                    org = (50), (heightImg - 20)

                    # fontScale
                    fontScale = 0.8

                    # Blue color in BGR
                    #b,g,r,a
                    color = (0, 0, 0, 0)

                    # Line thickness of 2 px
                    thickness = 2

                    # Using cv2.putText() method
                    #cv2.putText(imRawDrawingSol, 'No:'+ogrenciNumarasi+' Puan:'+str(score) +' D:'+str(DogrularSay)+' Y:'+str(YanlislariSay)+' B:'+str(BoslariSay), org, font,fontScale, color, thickness, cv2.LINE_AA,)

                    imgInvWarp = cv2.warpPerspective(
                        imRawDrawingSol, invMatrix,
                        ((widthImg), heightImg))  # APPLY WARP PERSPECTIVE
                    #cv2.putText(imageFinal,str(score),((widthImg-150),(heightImg-100)),cv2.FONT_HERSHEY_COMPLEX,3,(0,255,255),3)
                    imageFinal = cv2.addWeighted(imageFinal, 1, imgInvWarp, 1,
                                                 0)
                    imageFinal = cv2.rectangle(imageFinal,
                                               (50, (heightImg - 50)),
                                               (widthImg - 50,
                                                (heightImg - 10)),
                                               (255, 255, 255), -1)
                    imageFinal = utlis.print_utf8_text(
                        imageFinal, mesaj, color,
                        (widthImg / 2 - (len(mesaj) * 6)), (heightImg - 50))

                    imageFinal = cv2.rectangle(imageFinal, (150, 25),
                                               (600, 450), (0, 255, 255), 3,
                                               cv2.LINE_AA)
                    #cv2.putText(imageFinal,'Deneme',(50,125),cv2.FONT_HERSHEY_COMPLEX,3,(0,255,255),3)
                    #cv2.imshow("Camera",imageFinal)

                    if asamalariGoster == True:
                        imgBlank = np.zeros_like(img2)
                        imageArray = ([img2, imgGray, imgBlur, imgCanny], [
                            imgCountours, imgBiggestCountours, imgWarpColored,
                            imgThresh
                        ], [imgResultSag, imageFinal, imgBlank, imgBlank])
                        imgStacked = utlis.stackImages(imageArray, .5)
                        cv2.imshow("imgStacked", imgStacked)

                    genislik = int(widthImg / 1.5)
                    yukseklik = int(heightImg / 1.5)
                    imageFinal = cv2.resize(imageFinal, (genislik, yukseklik))
                    # print(f'yeni gen-yuk {genislik} {yukseklik}')
                    height, width, channel = imageFinal.shape
                    step = channel * width
                    # create QImage from image
                    qImg = QImage(imageFinal.data, width, height, step,
                                  QImage.Format_BGR888)
                    # show image in img_label
                    self.ui.imgCamera_2.setPixmap(QPixmap.fromImage(qImg))

                    ogrenciCevapSikleri = []
                    ogrenciDogruYanlis = []
                    cevaplar = []

                    for i in myIndex:
                        if i == 0: ogrenciCevapSikleri.append('A')
                        elif i == 1: ogrenciCevapSikleri.append('B')
                        elif i == 2: ogrenciCevapSikleri.append('C')
                        elif i == 3: ogrenciCevapSikleri.append('D')
                        elif i == 4: ogrenciCevapSikleri.append('N')
                        elif i == 5: ogrenciCevapSikleri.append('M')

                    for x in range(0, sorusayisi):
                        if myIndex[x] == ans[x]: ogrenciDogruYanlis.append('D')
                        else: ogrenciDogruYanlis.append('Y')

                    for i in myIndex:
                        cevaplar.append(i)

                    #print(ogrenciCevapSikleri)
                    #print(ogrenciDogruYanlis)
                    # self.ui.imgBulunan.setVisible(False)

                    if otomatikDurdur == True:
                        print(f'otomatik durdurma açık')
                        bulunanNumara = ogrenciNumarasi
                        bulunanpuan = score
                        if sayac < 5:
                            print(
                                f'sayac {sayac} bulunanpuan {bulunanpuan} sonbulunan {sonbulunanpuan}'
                            )
                            if (sonbulunanpuan == bulunanpuan) and (
                                    sonbulunanNumara == bulunanNumara) and (
                                        int(sonbulunanNumara) > 0):
                                sayac += 1
                            else:
                                sayac = 0
                                sonbulunanNumara = bulunanNumara
                                sonbulunanpuan = bulunanpuan
                        else:
                            global toogledurum
                            toogledurum = False
                            # self.StartStop()
                            sayac = 0
                            print(f'bulundu ')
                            # print(ogrenciCevapSikleri)
                            # print(ogrenciDogruYanlis)

                            b = ''
                            for i in range(len(ogrenciCevapSikleri)):
                                b += (ogrenciCevapSikleri[i])
                            self.ui.lblogrencicevaplar.setPlainText(b)

                            imgResultSag = cv2.resize(imgResultSag,
                                                      (genislik, yukseklik))
                            # print(f'yeni gen-yuk {genislik} {yukseklik}')
                            height, width, channel = imgResultSag.shape
                            step = channel * width
                            # create QImage from image
                            qImg = QImage(imgResultSag.data, width, height,
                                          step, QImage.Format_BGR888)
                            # show image in img_label
                            # self.ui.imgBulunan.setVisible(True)
                            self.ui.imgBulunan.setPixmap(
                                QPixmap.fromImage(qImg))

            except Exception as Hata:
                print('Bulma hatası oluştu :', Hata)
        except Exception as Hata:
            print('Hata oluştu :', Hata)
예제 #9
0
            # Show answer and grade on final image
            imgFinal = cv2.addWeighted(imgFinal, 1, imgInvWarp, 1, 0)
            imgFinal = cv2.addWeighted(imgFinal, 1, imgInvGradeDisplay, 1, 0)

            imgArray = ([img, imgGray, imgCanny, imgContours],
                        [imgBigContour, imgThresh, imgWarpColored, imgFinal])
            cv2.imshow("Final Result", imgFinal)
    except:
        imgArray = ([img, imgGray, imgCanny,
                     imgContours], [imgBlank, imgBlank, imgBlank, imgBlank])

    lables = [["Original", "Canny", "Edges", "Contours"],
              ["Biggest Contour", "Threshold", "Warpped", "Final"]]

    stackedImage = utlis.stackImages(imgArray, 0.5, lables)
    cv2.imshow("Result", stackedImage)

    # Save images
    if cv2.waitKey(1) & 0xFF == ord('s'):
        cv2.imwrite("Scanned/myImage" + str(count) + ".jpg", imgFinal)
        cv2.rectangle(stackedImage, ((int(stackedImage.shape[1] / 2) - 230),
                                     int(stackedImage.shape[0] / 2) + 50),
                      (1100, 350), (0, 255, 0), cv2.FILLED)
        cv2.putText(stackedImage, "Scan Saved",
                    (int(stackedImage.shape[1] / 2) - 200,
                     int(stackedImage.shape[0] / 2)), cv2.FONT_HERSHEY_DUPLEX,
                    3, (0, 0, 255), 5, cv2.LINE_AA)
        cv2.imshow('Result', stackedImage)
        cv2.waitKey(300)
        count += 1
예제 #10
0
                            #print(cx,"Straight")
                            
                        elif cx < 110:
                            os.system('cansend can0 666#01')
                            #print(cx,"left")
                        elif cx > 190:
                            os.system('cansend can0 666#03')
                            #print(cx,"Right")
                    else:
                        #print(cx,"value")
                        os.system('cansend can0 666#04')

            else:
                 print ("I don't see the line")
                 os.system('cansend can0 666#04')
            imgStacked = utlis.stackImages(0.7, ([crop_img,blur,th],
                                         [gray,edged,mask] ))

            cv.imshow("Image Processing",imgStacked)
            
            #cv.imshow('camera',frame)
            keyboard = cv.waitKey(30)
            if keyboard == 'q' or keyboard == 27:
                break
        except KeyboardInterrupt:
            break

    # cleanup
    capture.release()
    cv.destroyAllWindows()
    del capture
   os.system('sudo ip link set down can0') 
예제 #11
0
def readfile(path):
    #===============================
    #path = "scantron-100.jpg"
    widthImg = 1245
    heightImg=3000
    question =50
    choices = 5
    #===============================
    img = cv2.imread(path)

    #PREPROCESSING
    img = cv2.resize(img,(widthImg,heightImg))
    imgContours = img.copy()
    imgBiggestContours = img.copy()
    imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    imgBlur = cv2.GaussianBlur(imgGray,(5,5),1)
    imgCanny = cv2.Canny(imgBlur,10,50)

    # FINDING ALL CONTOURS
    countours, hierarchy = cv2.findContours(imgCanny,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
    cv2.drawContours(imgContours,countours,-1,(0,255,0),10)

    #FIND RECTANGLES
    rectCon = utlis.rectContour(countours)
    biggestContour = utlis.getCornerPoints(rectCon[0])
    gradePoints = utlis.getCornerPoints(rectCon[1])
    test = biggestContour.copy()
    test[0][0]=[333,2617]
    test[1][0]=[331,437]
    test[2][0]=[775,437]
    test[3][0]=[778,2617]
    #print("ttt:",test)
    #print("\n for contour\n",biggestContour )
    #print("\n for grade\n",gradePoints)
    biggestContour=test

    if biggestContour.size != 0 and gradePoints.size != 0:
        cv2.drawContours(imgBiggestContours,biggestContour,-1,(0,255,0),20)
        cv2.drawContours(imgBiggestContours,gradePoints,-1,(255,0,0),20)

        biggestContour= utlis.reorder(biggestContour)
        gradePoints = utlis.reorder(gradePoints)

        pt1 = np.float32(biggestContour)
        pt2= np.float32([[0,0],[widthImg,0],[0,heightImg],[widthImg,heightImg]])
        matrix = cv2.getPerspectiveTransform(pt1,pt2)
        imgWarpColored = cv2.warpPerspective(img,matrix,(widthImg,heightImg))

        ptG1 = np.float32(gradePoints)
        ptG2 = np.float32([[0,0],[325,0],[0,150],[325,150]])
        matrixG = cv2.getPerspectiveTransform(ptG1, ptG2)
        imgGradeDisplay = cv2.warpPerspective(img, matrixG,(325, 150))
        #cv2.imshow("Grade", imgGradeDisplay)

        imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY)
        imgThresh = cv2.threshold(imgWarpGray,150,255,cv2.THRESH_BINARY_INV)[1]
        #cv2.imshow("Grade", imgThresh)
        
        boxes = utlis.splitBoxes(imgThresh)
        #cv2.imshow("test", boxes[4])
        #print(cv2.countNonZero(boxes[2]),cv2.countNonZero(boxes[0]))


        #GETTING NO ZERO PIXEL VALUES OF EACH BOX
        myPixelVal = np.zeros((question,choices))
        countC = 0
        countR = 0

        for image in boxes:
            totalPixels = cv2.countNonZero(image)
            myPixelVal[countR][countC] = totalPixels
            countC +=1
            if (countC == choices):countR+=1; countC=0
        #print(myPixelVal)

        global myIndex
        localmyIndex = []
        for x in range(0, question):
            arrline = myPixelVal[x]
            arrmed= np.median(arrline)
            localmyIndex.append(-1)
            for y in range(0,choices):
                if(myPixelVal[x][y]/arrmed > 2):
                    localmyIndex[x]=y
        myIndex = localmyIndex




    imgBlank = np.zeros_like(img)
    imageArray = ([img,imgGray,imgBlur,imgCanny],
    [imgContours,imgBiggestContours,imgWarpColored,imgThresh])
    imgStacked = utlis.stackImages(imageArray,0.5)


    #cv2.imshow("stacked images",imgStacked)
    cv2.waitKey(0)
예제 #12
0
            ImgRawGrade = np.zeros_like(ImgGradeDisplay)
            cv2.putText(ImgRawGrade,
                        str(int(score)) + "%", (50, 100),
                        cv2.FONT_HERSHEY_COMPLEX, 3, (0, 100, 255), 3)
            #cv2.imshow("Grade Image",ImgRawGrade)
            InvMatrixG = cv2.getPerspectiveTransform(ptG2, ptG1)
            ImgInvGradeDisplay = cv2.warpPerspective(ImgRawGrade, InvMatrixG,
                                                     (widthImg, heightImg))

            imgFinal = cv2.addWeighted(imgFinal, 1, ImgInvWarp, 1, 0)
            imgFinal = cv2.addWeighted(imgFinal, 1, ImgInvGradeDisplay, 1, 0)

        imgBlank = np.zeros_like(img)
        ImageArray = ([img, imgGray, imgBlur, imgCanny], [
            imgContours, imgBigestContours, ImgWarpColored, ImgThreshold
        ], [ImgResult, ImgDrawing, ImgInvWarp, imgFinal])
    except:
        imgBlank = np.zeros_like(img)
        ImageArray = ([img, imgGray, imgBlur,
                       imgCanny], [imgBlank, imgBlank, imgBlank, imgBlank],
                      [imgBlank, imgBlank, imgBlank, imgBlank])
    lables = [["Original", "Gray", "Blur", "Canny"],
              ["Contours", "Biggest Con", "Warp", "Threshold"],
              ["Result", "Raw Drawing", "Inv Raw", "Final"]]
    ImageStacked = utlis.stackImages(ImageArray, 0.3, lables)
    cv2.imshow("The Final Result", imgFinal)
    cv2.imshow("Stacked Images", ImageStacked)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        cv2.imwrite("FinalImage.jpg", imgFinal)
        cv2.waitKey(300)
        #break
예제 #13
0
def getLaneCurve(img, display=2):

    imgCopy = img.copy()
    imgResult = img.copy()

    #### mask the image and get the white A4 paper
    imgThres = utlis.thresholding(img)

    #### Wrap the image to get Bird's-eye view
    hT, wT, c = img.shape
    points = [[128, 147], [352, 147], [49, 240], [431, 240]]
    imgWarp = utlis.warpImg(imgThres, points, wT, hT)

    # draw the points
    if display != 0:
        imgWarpPoints = utlis.drawPoints(imgCopy, points)

    #### get the middle point to calculate the curve
    middlePoint, imgHist = utlis.getHistogram(imgWarp,
                                              display=True,
                                              minPer=0.8,
                                              region=4)
    curveAveragePoint, imgHist = utlis.getHistogram(imgWarp,
                                                    display=True,
                                                    minPer=0.9)
    curveRaw = curveAveragePoint - middlePoint

    # average the last'10' curve
    curveList.append(curveRaw)
    if len(curveList) > avgVal:
        curveList.pop(0)
    curve = int(sum(curveList) / len(curveList))

    # displays
    if display != 0:
        imgInvWarp = utlis.warpImg(imgWarp, points, wT, hT, inv=True)
        imgInvWarp = cv2.cvtColor(imgInvWarp, cv2.COLOR_GRAY2BGR)
        imgInvWarp[0:hT // 3, 0:wT] = 0, 0, 0
        imgLaneColor = np.zeros_like(img)
        imgLaneColor[:] = 0, 255, 0
        imgLaneColor = cv2.bitwise_and(imgInvWarp, imgLaneColor)
        imgResult = cv2.addWeighted(imgResult, 1, imgLaneColor, 1, 0)
        midY = 450
        cv2.putText(imgResult, str(curve), (wT // 2 - 80, 85),
                    cv2.FONT_HERSHEY_COMPLEX, 2, (255, 0, 255), 3)
        cv2.line(imgResult, (wT // 2, midY), (wT // 2 + (curve * 3), midY),
                 (255, 0, 255), 5)
        cv2.line(imgResult, ((wT // 2 + (curve * 3)), midY - 25),
                 (wT // 2 + (curve * 3), midY + 25), (0, 255, 0), 5)
        for x in range(-30, 30):
            w = wT // 20
            cv2.line(imgResult, (w * x + int(curve // 50), midY - 10),
                     (w * x + int(curve // 50), midY + 10), (0, 0, 255), 2)
        # fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
        # cv2.putText(imgResult, 'FPS ' + str(int(fps)), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (230, 50, 50), 3)
    if display == 2:
        imgStacked = utlis.stackImages(0.7,
                                       ([img, imgWarpPoints, imgWarp],
                                        [imgHist, imgLaneColor, imgResult]))
        cv2.imshow('ImageStack', imgStacked)
        cv2.waitKey(1)
    elif display == 1:
        cv2.imshow('Resutlt', imgResult)
        cv2.waitKey(1)

    # NORMALIZATION
    curve = curve / 100
    if curve > 1: curve == 1
    if curve < -1: curve == -1

    return curve
예제 #14
0
        elif averageCurve <= -30:
            turn = 'left'
        else:
            turn = 'stay in center'
        cv2.putText(imgFinal, turn, (frameWidth // 2 - 70, 70),
                    cv2.FONT_HERSHEY_DUPLEX, 1.75, (0, 0, 255), 2, cv2.LINE_AA)
        #cv2.putText(imgFinal, str(int(averageCurve)), (frameWidth//2-70, 70), cv2.FONT_HERSHEY_DUPLEX, 1.75, (0, 0, 255), 2, cv2.LINE_AA)

    except:
        lane_curve = 00
        pass

    imgFinal = utlis.drawLines(imgFinal, lane_curve)

    imgThres = cv2.cvtColor(imgThres, cv2.COLOR_GRAY2BGR)
    imgBlank = np.zeros_like(img)
    # imgStacked = utlis.stackImages(0.7, ([img,imgUndis,imgWarpPoints],
    #                                      [imgColor, imgCanny, imgThres],
    #                                      [imgWarp,imgSliding,imgFinal]
    #                                      ))
    imgStacked = utlis.stackImages(
        0.7, ([img, imgUndis, imgWarpPoints], [imgWarp, imgSliding, imgFinal]))

    cv2.imshow("PipeLine", imgStacked)
    cv2.imshow("Result", imgFinal)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()
예제 #15
0
            imageFinal = cv2.addWeighted(imageFinal, 1, imgInvWarp, 1, 0)
            imageFinal = cv2.rectangle(imageFinal, (50, (heightImg - 50)),
                                       (widthImg - 50, (heightImg - 10)),
                                       (255, 255, 255), -1)
            imageFinal = utlis.print_utf8_text(
                imageFinal, mesaj, color, (widthImg / 2 - (len(mesaj) * 6)),
                (heightImg - 50))
            #cv2.putText(imageFinal,'Deneme',(50,125),cv2.FONT_HERSHEY_COMPLEX,3,(0,255,255),3)
            cv2.imshow("Camera", imageFinal)

        imgBlank = np.zeros_like(img)

        imageArray = ([img, imgGray, imgBlur, imgCanny], [
            imgCountours, imgBiggestCountours, imgWarpColored, imgThresh
        ], [imgResultSag, imageFinal, imgBlank, imgBlank])
        imgStacked = utlis.stackImages(imageArray, .5)
        #cv2.imshow("imgStacked",imgStacked)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    except Exception as hata:
        imgBlank = np.zeros_like(img)
        imageArray = ([img, imgGray, imgBlur,
                       imgCanny], [imgBlank, imgBlank, imgBlank, imgBlank],
                      [imgBlank, imgBlank, imgBlank, imgBlank])
        imgStacked = utlis.stackImages(imageArray, .5)
        #cv2.imshow("imgStacked",imgStacked)
        cv2.imshow("Camera", img)
        print('hata' + str(hata))
        #break