Example #1
0
        pt1=np.float32(biggestContours)
        pt2=np.float32([[0,0],[widthImg,0],[0,heightImg],[widthImg,heightImg]])
        matrix=cv2.getPerspectiveTransform(pt1,pt2)
        imgWrapColored=cv2.warpPerspective(img,matrix,(widthImg,heightImg))

        ptG1 = np.float32(gradePoints)
        ptG2 = np.float32([[0, 0], [325, 0], [0, 150], [325, 150]])  # here heiht/width can be any any val as desire
        matrixG = cv2.getPerspectiveTransform(ptG1, ptG2)
        imgGardeDisplay = cv2.warpPerspective(img, matrixG, (325, 150))
        # cv2.imshow("Grade Display",imgGardeDisplay)

        # APPLY THRESHOLD
        imgWrapGray=cv2.cvtColor(imgWrapColored,cv2.COLOR_BGR2GRAY)
        imgThresh=cv2.threshold(imgWrapGray,170,255,cv2.THRESH_BINARY_INV)[1]

        boxes=utlis.splitBoxes(imgThresh)
        # cv2.imshow("boxes",boxes[24])
        # print(cv2.countNonZero(boxes[1]),cv2.countNonZero(boxes[2]))

        countR = 0
        countC = 0
        myPixelVal = np.zeros((questions, choices))  # TO STORE THE NON ZERO VALUES OF EACH BOX
        for image in boxes:
            # cv2.imshow(str(countR)+str(countC),image)
            totalPixels = cv2.countNonZero(image)
            myPixelVal[countR][countC] = totalPixels
            countC += 1
            if (countC == choices):
                countC = 0
                countR += 1
        # print(myPixelVal)
Example #2
0
def omr(imgpath,field,answer):

    pathImage = imgpath
    heightImg = 700
    widthImg = 700
    questions=5
    choices=5
    ans= answer


    img = cv2.imread(pathImage)
    img = cv2.resize(img, (widthImg, heightImg)) # RESIZE IMAGE
    imgFinal = img.copy()
    imgBlank = np.zeros((heightImg,widthImg, 3), np.uint8) # CREATE A BLANK IMAGE FOR TESTING DEBUGGING IF REQUIRED
    imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # CONVERT IMAGE TO GRAY SCALE
    imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1) # ADD GAUSSIAN BLUR
    imgCanny = cv2.Canny(imgBlur,10,70) # APPLY CANNY


    ## FIND ALL COUNTOURS
    imgContours = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
    imgBigContour = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
    contours, hierarchy = cv2.findContours(imgCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # FIND ALL CONTOURS
    cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 10) # DRAW ALL DETECTED CONTOURS
    rectCon = utlis.rectContour(contours) # FILTER FOR RECTANGLE CONTOURS
    biggestPoints= utlis.getCornerPoints(rectCon[0]) # GET CORNER POINTS OF THE BIGGEST RECTANGLE
    # gradePoints = utlis.getCornerPoints(rectCon[1]) # GET CORNER POINTS OF THE SECOND BIGGEST RECTANGLE



    if biggestPoints.size != 0:

        # BIGGEST RECTANGLE WARPING
        biggestPoints=utlis.reorder(biggestPoints) # REORDER FOR WARPING
        cv2.drawContours(imgBigContour, biggestPoints, -1, (0, 255, 0), 20) # DRAW THE BIGGEST CONTOUR
        pts1 = np.float32(biggestPoints) # PREPARE POINTS FOR WARP
        pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP
        matrix = cv2.getPerspectiveTransform(pts1, pts2) # GET TRANSFORMATION MATRIX
        imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg)) # APPLY WARP PERSPECTIVE


        # APPLY THRESHOLD
        imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY) # CONVERT TO GRAYSCALE
        imgThresh = cv2.threshold(imgWarpGray, 170, 255,cv2.THRESH_BINARY_INV )[1] # APPLY THRESHOLD AND INVERSE

        boxes = utlis.splitBoxes(imgThresh) # GET INDIVIDUAL BOXES
        # cv2.imshow("Split Test ", boxes[3])
        countR=0
        countC=0
        myPixelVal = np.zeros((questions,choices)) # TO STORE THE NON ZERO VALUES OF EACH BOX
        nonPixelVal = np.zeros((1,1))
        nonPixelVal[0][0] = 5
        for image in boxes:
            #cv2.imshow(str(countR)+str(countC),image)
            totalPixels = cv2.countNonZero(image)
            myPixelVal[countR][countC]= totalPixels
            countC += 1
            if (countC==choices):countC=0;countR +=1

        # FIND THE USER ANSWERS AND PUT THEM IN A LIST
        myIndex=[]
        for x in range (0,questions):
            arr = myPixelVal[x]
            print("arrrr",arr)
            max1 = np.amax(arr)
            myIndexVal = np.where(arr == np.amax(arr))
            print(max1)
            temp =np.delete(arr,myIndexVal)
            print("arrrr", temp)
            max2= np.amax(temp)
            myIndexVal1 = np.where(arr == max2)
            if(max1.tolist()/1000 - max2.tolist()/1000  < 2):
                print("masbcjasfjhds",max1,max2)
                myIndexVal = nonPixelVal
            myIndex.append(myIndexVal[0][0])
        print("USER ANSWERS",myIndex)

            # COMPARE THE VALUES TO FIND THE CORRECT ANSWERS


        if field == 1:
            grading=[]
            for x in range(0,questions):
                if ans[x] == myIndex[x]:
                    grading.append(1)
                else:grading.append(0)
            #print("GRADING",grading)
            score = (sum(grading)/questions)*50 # FINAL GRADE
            print("SCORE",score)
            
        
            imageArray = ([img,imgGray,imgBlur, imgCanny],
                          [imgContours,imgBigContour,imgThresh,imgBlank])
            lables = [["Original","Gray","Blur","Edges"],
                      ["Contours","Biggest Contour","Threshold"," "]]

            stackedImage = utlis.stackImages(imageArray,0.5,lables)
            cv2.imshow('Result', stackedImage)
            # cv2.imwrite('r.jpg', stackedImage)
            cv2.waitKey()
            return(str(score))

    return(myIndex)
Example #3
0
            ptsG1 = np.float32(gradePoints)  # PREPARE POINTS FOR WARP
            ptsG2 = np.float32([[0, 0], [325, 0], [0, 150],
                                [325, 150]])  # PREPARE POINTS FOR WARP
            matrixG = cv2.getPerspectiveTransform(
                ptsG1, ptsG2)  # GET TRANSFORMATION MATRIX
            imgGradeDisplay = cv2.warpPerspective(
                img, matrixG, (325, 150))  # APPLY WARP PERSPECTIVE

            # APPLY THRESHOLD
            imgWarpGray = cv2.cvtColor(
                imgWarpColored, cv2.COLOR_BGR2GRAY)  # CONVERT TO GRAYSCALE
            imgThresh = cv2.threshold(
                imgWarpGray, 170, 255,
                cv2.THRESH_BINARY_INV)[1]  # APPLY THRESHOLD AND INVERSE

            boxes = utlis.splitBoxes(imgThresh)  # GET INDIVIDUAL BOXES
            cv2.imshow("Split Test ", boxes[3])
            countR = 0
            countC = 0
            myPixelVal = np.zeros(
                (questions,
                 choices))  # TO STORE THE NON ZERO VALUES OF EACH BOX
            for image in boxes:
                #cv2.imshow(str(countR)+str(countC),image)
                totalPixels = cv2.countNonZero(image)
                myPixelVal[countR][countC] = totalPixels
                countC += 1
                if (countC == choices):
                    countC = 0
                    countR += 1
Example #4
0
    def GoruntuIsle(self, img):

        try:

            global widthImg, heightImg, ans, x, y, xsag, xsol, w, h, esikdeger, ox, oy, ow, oh, secimSayisi, sorusayisi, sayac, sonbulunanNumara, bulunanNumara, sonbulunanpuan, bulunanpuan
            img2 = cv2.resize(img, (widthImg, heightImg))
            if cameraNo == -1:
                pass
            else:
                img2 = cv2.rotate(img2, cv2.ROTATE_180)
            imgCountours = img2.copy()
            imageFinal = img2.copy()
            imgBiggestCountours = img2.copy()
            imgGray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
            imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1)
            imgCanny = cv2.Canny(imgBlur, 10, 50)
            #cv2.imshow("test",imgCanny)

            try:
                #FIND ALL COUNTERS
                countours, hierarchy = cv2.findContours(
                    imgCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
                cv2.drawContours(imgCountours, countours, -1, (0, 255, 0), 10)

                #FIND RECTANGLES
                rectCon = utlis.rectContour(countours)
                biggestContour = utlis.getCornerPoints(rectCon[0])
                #print(biggestContour)

                if biggestContour.size != 0:
                    cv2.drawContours(imgBiggestCountours, biggestContour, -1,
                                     (0, 255, 0), 20)
                    biggestContour = utlis.reorder(biggestContour)
                    pts1 = np.float32(
                        biggestContour)  # PREPARE POINTS FOR WARP
                    pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg],
                                       [widthImg,
                                        heightImg]])  # PREPARE POINTS FOR WARP
                    matrix = cv2.getPerspectiveTransform(
                        pts1, pts2)  # GET TRANSFORMATION MATRIX
                    imgWarpColored = cv2.warpPerspective(
                        img2, matrix,
                        (widthImg, heightImg))  # APPLY WARP PERSPECTIVE
                    #cv2.imshow("bulunan",imgWarpColored)
                    #APPLY TRESHOLD

                    imgWarpGray = cv2.cvtColor(imgWarpColored,
                                               cv2.COLOR_BGR2GRAY)
                    imgThresh = cv2.threshold(imgWarpGray, tresh, 255,
                                              cv2.THRESH_BINARY_INV)[1]

                    #boxes=utlis.splitBoxes(imgThresh)

                    crop_imgSol = imgThresh[y:y + h, xsol:(xsol + w)]
                    crop_imgSag = imgThresh[y:y + h, xsag:(xsag + w)]
                    crop_imgOgrenciNu = imgThresh[oy:oy + oh, ox:(ox + ow)]

                    #cv2.imshow("cropped", crop_imgSol)
                    #cv2.imwrite("croppedsol.jpg",crop_imgSol)
                    #cv2.imwrite("croppedsag.jpg",crop_imgSag)

                    boxesSol = utlis.splitBoxes(crop_imgSol)
                    boxesSag = utlis.splitBoxes(crop_imgSag)
                    boxesOgrenciNu = utlis.splitBoxesOgrenciNu(
                        crop_imgOgrenciNu)

                    sorusayisi = 20
                    #GETTING NOPIXEL VALUES OF EACH
                    myPixelVal = np.zeros((sorusayisi, secimSayisi))

                    myPixelValOgrenciNu = np.zeros((4, 10))
                    countC = 0
                    countR = 0

                    for image in boxesOgrenciNu:
                        totalPixels = cv2.countNonZero(image)
                        myPixelValOgrenciNu[countR][countC] = totalPixels
                        countC += 1
                        if (countC == 10):
                            countR += 1
                            countC = 0
                    #print(myPixelValOgrenciNu)

                    countC = 0
                    countR = 0

                    for image in boxesSol:
                        totalPixels = cv2.countNonZero(image)
                        myPixelVal[countR][countC] = totalPixels
                        countC += 1
                        if (countC == secimSayisi):
                            countR += 1
                            countC = 0
                    #print(myPixelVal)

                    for image in boxesSag:
                        totalPixels = cv2.countNonZero(image)
                        myPixelVal[countR][countC] = totalPixels
                        countC += 1
                        if (countC == secimSayisi):
                            countR += 1
                            countC = 0

                    #FINDING INDEX VALUES OF THE MARKINGS

                    myIndexOgrenciNu = []
                    for x in range(0, 4):
                        arr = myPixelValOgrenciNu[x]
                        #print("arr",arr)
                        myIndexVal = np.where(arr == np.amax(arr))
                        #print(myIndexVal[0])
                        myIndexOgrenciNu.append(myIndexVal[0][0])
                    ogrenciNumarasi = str(myIndexOgrenciNu[0]) + str(
                        myIndexOgrenciNu[1]) + str(myIndexOgrenciNu[2]) + str(
                            myIndexOgrenciNu[3])
                    #print('Öğrenci numarası {}'.format(ogrenciNumarasi))

                    #cv2.imshow('mum',utlis.showNumber2(imgWarpColored,myIndexOgrenciNu,4,10,ox,oy,ow,oh))

                    myIndex = []
                    for x in range(0, sorusayisi):
                        isaretsayisi = 0
                        arr = myPixelVal[x]
                        #print("arr-"+str(x),arr)
                        #print('max',np.amax(arr))
                        #print('sayı',np.count_nonzero(arr>esikdeger))
                        isaretsayisi = np.count_nonzero(arr > esikdeger)
                        enfazla = np.amax(arr)
                        if isaretsayisi > 1:
                            myIndexVal[0][
                                0] = 5  #iki ve dahafazla işaretlenmiş
                        elif esikdeger < enfazla:
                            myIndexVal = np.where(arr == np.amax(arr))
                            #print(np.where(arr==np.amax(arr))[0])
                        else:
                            #pass
                            myIndexVal[0][0] = 4
                        #print(myIndexVal[0])
                        myIndex.append(myIndexVal[0][0])
                    #print(myIndex)

                    #GRADING
                    grading = []
                    for x in range(0, sorusayisi):
                        if myIndex[x] == 4:
                            grading.append(4)
                        elif myIndex[x] == 5:
                            grading.append(5)
                        elif ans[x] == myIndex[x]:
                            grading.append(1)
                        else:
                            grading.append(0)
                    #print(grading)

                    #SCORE
                    DogrularSay = grading.count(1)
                    YanlislariSay = grading.count(0) + grading.count(5)
                    BoslariSay = grading.count(4)
                    score = (DogrularSay / sorusayisi) * 100
                    mesaj = 'No:' + ogrenciNumarasi + ' Puan:' + str(
                        score) + ' Doğru:' + str(
                            DogrularSay) + ' Yanlış:' + str(
                                YanlislariSay) + ' Boş:' + str(BoslariSay)
                    #print(score)

                    #DISPLAY ANSWERS
                    #imgResult=imgWarpColored.copy()

                    imgResultSol = imgWarpColored.copy()
                    #imgResultSag=imgWarpColored.copy()
                    #imgResult= imgResult[y:y+h, x:x+w]
                    imgResultNu = utlis.showNumber2(imgResultSol,
                                                    myIndexOgrenciNu, 4, 10,
                                                    ox, oy, ow, oh)
                    imgResultSol = utlis.showAnswers2(imgResultSol,
                                                      myIndex[0:10],
                                                      grading[0:10], ans[0:10],
                                                      10, 4, xsol, y, w, h)
                    imgResultSag = utlis.showAnswers2(imgResultSol,
                                                      myIndex[10:20],
                                                      grading[10:20],
                                                      ans[10:20], 10, 4, xsag,
                                                      y, w, h)

                    #cv2.imshow("imgResultSag",imgResultSag)

                    imRawDrawingSol = np.zeros_like(imgResultSol)
                    imgResultNu = utlis.showNumber2(imRawDrawingSol,
                                                    myIndexOgrenciNu, 4, 10,
                                                    ox, oy, ow, oh)
                    imRawDrawingSol = utlis.showAnswers2(
                        imRawDrawingSol, myIndex[0:10], grading[0:10],
                        ans[0:10], 10, 4, xsol, y, w, h)
                    imRawDrawingSag = utlis.showAnswers2(
                        imRawDrawingSol, myIndex[10:20], grading[10:20],
                        ans[10:20], 10, 4, xsag, y, w, h)

                    #cv2.imshow("imgResult1",imRawDrawing)
                    #pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP
                    #pts2s = np.float32([[0, 0],[w, 0], [0, h],[w, h]]) # PREPARE POINTS FOR WARP
                    invMatrix = cv2.getPerspectiveTransform(
                        pts2, pts1)  # GET TRANSFORMATION MATRIX

                    # font
                    font = cv2.FONT_HERSHEY_SIMPLEX

                    # org
                    org = (50), (heightImg - 20)

                    # fontScale
                    fontScale = 0.8

                    # Blue color in BGR
                    #b,g,r,a
                    color = (0, 0, 0, 0)

                    # Line thickness of 2 px
                    thickness = 2

                    # Using cv2.putText() method
                    #cv2.putText(imRawDrawingSol, 'No:'+ogrenciNumarasi+' Puan:'+str(score) +' D:'+str(DogrularSay)+' Y:'+str(YanlislariSay)+' B:'+str(BoslariSay), org, font,fontScale, color, thickness, cv2.LINE_AA,)

                    imgInvWarp = cv2.warpPerspective(
                        imRawDrawingSol, invMatrix,
                        ((widthImg), heightImg))  # APPLY WARP PERSPECTIVE
                    #cv2.putText(imageFinal,str(score),((widthImg-150),(heightImg-100)),cv2.FONT_HERSHEY_COMPLEX,3,(0,255,255),3)
                    imageFinal = cv2.addWeighted(imageFinal, 1, imgInvWarp, 1,
                                                 0)
                    imageFinal = cv2.rectangle(imageFinal,
                                               (50, (heightImg - 50)),
                                               (widthImg - 50,
                                                (heightImg - 10)),
                                               (255, 255, 255), -1)
                    imageFinal = utlis.print_utf8_text(
                        imageFinal, mesaj, color,
                        (widthImg / 2 - (len(mesaj) * 6)), (heightImg - 50))

                    imageFinal = cv2.rectangle(imageFinal, (150, 25),
                                               (600, 450), (0, 255, 255), 3,
                                               cv2.LINE_AA)
                    #cv2.putText(imageFinal,'Deneme',(50,125),cv2.FONT_HERSHEY_COMPLEX,3,(0,255,255),3)
                    #cv2.imshow("Camera",imageFinal)

                    if asamalariGoster == True:
                        imgBlank = np.zeros_like(img2)
                        imageArray = ([img2, imgGray, imgBlur, imgCanny], [
                            imgCountours, imgBiggestCountours, imgWarpColored,
                            imgThresh
                        ], [imgResultSag, imageFinal, imgBlank, imgBlank])
                        imgStacked = utlis.stackImages(imageArray, .5)
                        cv2.imshow("imgStacked", imgStacked)

                    genislik = int(widthImg / 1.5)
                    yukseklik = int(heightImg / 1.5)
                    imageFinal = cv2.resize(imageFinal, (genislik, yukseklik))
                    # print(f'yeni gen-yuk {genislik} {yukseklik}')
                    height, width, channel = imageFinal.shape
                    step = channel * width
                    # create QImage from image
                    qImg = QImage(imageFinal.data, width, height, step,
                                  QImage.Format_BGR888)
                    # show image in img_label
                    self.ui.imgCamera_2.setPixmap(QPixmap.fromImage(qImg))

                    ogrenciCevapSikleri = []
                    ogrenciDogruYanlis = []
                    cevaplar = []

                    for i in myIndex:
                        if i == 0: ogrenciCevapSikleri.append('A')
                        elif i == 1: ogrenciCevapSikleri.append('B')
                        elif i == 2: ogrenciCevapSikleri.append('C')
                        elif i == 3: ogrenciCevapSikleri.append('D')
                        elif i == 4: ogrenciCevapSikleri.append('N')
                        elif i == 5: ogrenciCevapSikleri.append('M')

                    for x in range(0, sorusayisi):
                        if myIndex[x] == ans[x]: ogrenciDogruYanlis.append('D')
                        else: ogrenciDogruYanlis.append('Y')

                    for i in myIndex:
                        cevaplar.append(i)

                    #print(ogrenciCevapSikleri)
                    #print(ogrenciDogruYanlis)
                    # self.ui.imgBulunan.setVisible(False)

                    if otomatikDurdur == True:
                        print(f'otomatik durdurma açık')
                        bulunanNumara = ogrenciNumarasi
                        bulunanpuan = score
                        if sayac < 5:
                            print(
                                f'sayac {sayac} bulunanpuan {bulunanpuan} sonbulunan {sonbulunanpuan}'
                            )
                            if (sonbulunanpuan == bulunanpuan) and (
                                    sonbulunanNumara == bulunanNumara) and (
                                        int(sonbulunanNumara) > 0):
                                sayac += 1
                            else:
                                sayac = 0
                                sonbulunanNumara = bulunanNumara
                                sonbulunanpuan = bulunanpuan
                        else:
                            global toogledurum
                            toogledurum = False
                            # self.StartStop()
                            sayac = 0
                            print(f'bulundu ')
                            # print(ogrenciCevapSikleri)
                            # print(ogrenciDogruYanlis)

                            b = ''
                            for i in range(len(ogrenciCevapSikleri)):
                                b += (ogrenciCevapSikleri[i])
                            self.ui.lblogrencicevaplar.setPlainText(b)

                            imgResultSag = cv2.resize(imgResultSag,
                                                      (genislik, yukseklik))
                            # print(f'yeni gen-yuk {genislik} {yukseklik}')
                            height, width, channel = imgResultSag.shape
                            step = channel * width
                            # create QImage from image
                            qImg = QImage(imgResultSag.data, width, height,
                                          step, QImage.Format_BGR888)
                            # show image in img_label
                            # self.ui.imgBulunan.setVisible(True)
                            self.ui.imgBulunan.setPixmap(
                                QPixmap.fromImage(qImg))

            except Exception as Hata:
                print('Bulma hatası oluştu :', Hata)
        except Exception as Hata:
            print('Hata oluştu :', Hata)
Example #5
0
def readfile(path):
    #===============================
    #path = "scantron-100.jpg"
    widthImg = 1245
    heightImg=3000
    question =50
    choices = 5
    #===============================
    img = cv2.imread(path)

    #PREPROCESSING
    img = cv2.resize(img,(widthImg,heightImg))
    imgContours = img.copy()
    imgBiggestContours = img.copy()
    imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    imgBlur = cv2.GaussianBlur(imgGray,(5,5),1)
    imgCanny = cv2.Canny(imgBlur,10,50)

    # FINDING ALL CONTOURS
    countours, hierarchy = cv2.findContours(imgCanny,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
    cv2.drawContours(imgContours,countours,-1,(0,255,0),10)

    #FIND RECTANGLES
    rectCon = utlis.rectContour(countours)
    biggestContour = utlis.getCornerPoints(rectCon[0])
    gradePoints = utlis.getCornerPoints(rectCon[1])
    test = biggestContour.copy()
    test[0][0]=[333,2617]
    test[1][0]=[331,437]
    test[2][0]=[775,437]
    test[3][0]=[778,2617]
    #print("ttt:",test)
    #print("\n for contour\n",biggestContour )
    #print("\n for grade\n",gradePoints)
    biggestContour=test

    if biggestContour.size != 0 and gradePoints.size != 0:
        cv2.drawContours(imgBiggestContours,biggestContour,-1,(0,255,0),20)
        cv2.drawContours(imgBiggestContours,gradePoints,-1,(255,0,0),20)

        biggestContour= utlis.reorder(biggestContour)
        gradePoints = utlis.reorder(gradePoints)

        pt1 = np.float32(biggestContour)
        pt2= np.float32([[0,0],[widthImg,0],[0,heightImg],[widthImg,heightImg]])
        matrix = cv2.getPerspectiveTransform(pt1,pt2)
        imgWarpColored = cv2.warpPerspective(img,matrix,(widthImg,heightImg))

        ptG1 = np.float32(gradePoints)
        ptG2 = np.float32([[0,0],[325,0],[0,150],[325,150]])
        matrixG = cv2.getPerspectiveTransform(ptG1, ptG2)
        imgGradeDisplay = cv2.warpPerspective(img, matrixG,(325, 150))
        #cv2.imshow("Grade", imgGradeDisplay)

        imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY)
        imgThresh = cv2.threshold(imgWarpGray,150,255,cv2.THRESH_BINARY_INV)[1]
        #cv2.imshow("Grade", imgThresh)
        
        boxes = utlis.splitBoxes(imgThresh)
        #cv2.imshow("test", boxes[4])
        #print(cv2.countNonZero(boxes[2]),cv2.countNonZero(boxes[0]))


        #GETTING NO ZERO PIXEL VALUES OF EACH BOX
        myPixelVal = np.zeros((question,choices))
        countC = 0
        countR = 0

        for image in boxes:
            totalPixels = cv2.countNonZero(image)
            myPixelVal[countR][countC] = totalPixels
            countC +=1
            if (countC == choices):countR+=1; countC=0
        #print(myPixelVal)

        global myIndex
        localmyIndex = []
        for x in range(0, question):
            arrline = myPixelVal[x]
            arrmed= np.median(arrline)
            localmyIndex.append(-1)
            for y in range(0,choices):
                if(myPixelVal[x][y]/arrmed > 2):
                    localmyIndex[x]=y
        myIndex = localmyIndex




    imgBlank = np.zeros_like(img)
    imageArray = ([img,imgGray,imgBlur,imgCanny],
    [imgContours,imgBiggestContours,imgWarpColored,imgThresh])
    imgStacked = utlis.stackImages(imageArray,0.5)


    #cv2.imshow("stacked images",imgStacked)
    cv2.waitKey(0)
def process (path):

    file_path=path
    file=open(file_path,'r')
    lines=file.readlines()
    img_path=lines[0]
    img_path=img_path.strip()
    questions = int(lines[1])
    choices = 5
    if(questions==15):
        heightImg=690
    elif(questions==20):
        heightImg=700
    elif(questions==25):
        heightImg=700
    else:
        fileName = path.split('\\')[-1]
        fileName = fileName.split('.')[0]
        f = open('E:\\FinalProject\\Outputs\\' + fileName + '.txt', "w+")
        f.write("-1")
    print(img_path, questions)
    img = cv2.imread(img_path)

    img = cv2.resize(img, (widthImg, heightImg))  # RESIZE IMAGE

    imgFinal = img.copy()
    imgBlank = np.zeros((heightImg, widthImg, 3), np.uint8)  # CREATE A BLANK IMAGE FOR TESTING DEBUGGING IF REQUIRED
    imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # CONVERT IMAGE TO GRAY SCALE
    imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1)  # ADD GAUSSIAN BLUR
    imgCanny = cv2.Canny(imgBlur, 10, 70)  # APPLY CANNY

    try:
        ## FIND ALL COUNTOURS
        imgContours = img.copy()  # COPY IMAGE FOR DISPLAY PURPOSES
        imgBigContour = img.copy()  # COPY IMAGE FOR DISPLAY PURPOSES
        contours, hierarchy = cv2.findContours(imgCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)  # FIND ALL CONTOURS
        cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 10)  # DRAW ALL DETECTED CONTOURS
        rectCon = utlis.rectContour(contours)  # FILTER FOR RECTANGLE CONTOURS
        biggestPoints = utlis.getCornerPoints(rectCon[0])  # GET CORNER POINTS OF THE BIGGEST RECTANGLE
        #gradePoints = utlis.getCornerPoints(rectCon[1])  # GET CORNER POINTS OF THE SECOND BIGGEST RECTANGLE

        if biggestPoints.size != 0 :

            # BIGGEST RECTANGLE WARPING
            biggestPoints = utlis.reorder(biggestPoints)  # REORDER FOR WARPING
            cv2.drawContours(imgBigContour, biggestPoints, -1, (0, 255, 0), 20)  # DRAW THE BIGGEST CONTOUR
            pts1 = np.float32(biggestPoints)  # PREPARE POINTS FOR WARP
            pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]])  # PREPARE POINTS FOR WARP
            matrix = cv2.getPerspectiveTransform(pts1, pts2)  # GET TRANSFORMATION MATRIX
            imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg))  # APPLY WARP PERSPECTIVE


            # APPLY THRESHOLD
            imgWarpGray = cv2.cvtColor(imgWarpColored, cv2.COLOR_BGR2GRAY)  # CONVERT TO GRAYSCALE
            imgAdaptiveTh = cv2.adaptiveThreshold(imgWarpGray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
            imgThresh = cv2.threshold(imgAdaptiveTh, 170, 255, cv2.THRESH_BINARY_INV)[1]  # APPLY THRESHOLD AND INVERSE


            boxes = utlis.splitBoxes(imgThresh,questions)  # GET INDIVIDUAL BOXES
            countR = 0
            countC = 0
            myPixelVal = np.zeros((questions, choices))  # TO STORE THE NON ZERO VALUES OF EACH BOX
            for image in boxes:
                #cv2.imshow(str(countR)+str(countC),image)
                totalPixels = cv2.countNonZero(image)
                myPixelVal[countR][countC] = totalPixels
                #cv2.imshow("Split Test ", image)
                #print("hi",myPixelVal[countR][countC])
                #cv2.waitKey(0)
                countC += 1
                if (countC == choices): countC = 0;countR += 1

            # FIND THE USER ANSWERS AND PUT THEM IN A LIST
            myIndex = []
            for x in range(0, questions):
                arr = myPixelVal[x]
                myIndexVal = np.where(arr == np.amax(arr))
                myIndex.append(myIndexVal[0][0])


            # COMPARE THE VALUES TO FIND THE CORRECT ANSWERS
            answers=''
            #grading = []
            #print("hi")
            for x in range(0, questions):
                answers+=str(myIndex[x])
                #if ans[x] == myIndex[x]:
                    #grading.append(1)
                #else:
                    #grading.append(0)
            # print("GRADING",grading)
            #score = (sum(grading) / questions) * 100  # FINAL GRADE
            #print("SCORE",score)

            #result =str(myIndex)
            print("USER ANSWERS", answers)
            fileName = path.split('\\')[-1]
            fileName = fileName.split('.')[0]
            f = open('E:\\FinalProject\\Outputs\\' + fileName + '.txt', "w+")
            f.write(answers)
    except:
        imageArray = ([img, imgGray, imgCanny, imgContours],
                      [imgBlank, imgBlank, imgBlank, imgBlank])
        print("Exception")
        fileName = path.split('\\')[-1]
        fileName = fileName.split('.')[0]
        f = open('E:\\FinalProject\\Outputs\\' + fileName + '.txt', "w+")
        f.write("-1")
Example #7
0
            matrix = cv2.getPerspectiveTransform(pt1, pt2)
            ImgWarpColored = cv2.warpPerspective(img, matrix,
                                                 (widthImg, heightImg))

            ptG1 = np.float32(gradePoints)
            ptG2 = np.float32([[0, 0], [325, 0], [0, 150], [325, 150]])
            matrixG = cv2.getPerspectiveTransform(ptG1, ptG2)
            ImgGradeDisplay = cv2.warpPerspective(img, matrixG, (325, 150))
            #cv2.imshow("Grade",ImgGradeDisplay)

            # Apply threshold
            ImgWarpGray = cv2.cvtColor(ImgWarpColored, cv2.COLOR_BGR2GRAY)
            ImgThreshold = cv2.threshold(ImgWarpGray, 170, 255,
                                         cv2.THRESH_BINARY_INV)[1]

            boxes = utlis.splitBoxes(ImgThreshold)
            #cv2.imshow("Box",boxes[20])

            # Getting No Zero Pixels Values of Each Box
            myPixesVal = np.zeros((question, choise))
            countR = 0
            countC = 0

            for image in boxes:
                totalPixes = cv2.countNonZero(image)
                myPixesVal[countR][countC] = totalPixes
                countC += 1
                if (countC == choise):
                    countR += 1
                    countC = 0
            #print(myPixesVal)
Example #8
0
            imgWarpGray = cv2.cvtColor(imgWarpColored, cv2.COLOR_BGR2GRAY)
            imgThresh = cv2.threshold(imgWarpGray, 100, 255,
                                      cv2.THRESH_BINARY_INV)[1]

            #boxes=utlis.splitBoxes(imgThresh)
            #cv2.imshow("test",boxes[1])

            crop_imgSol = imgThresh[y:y + h, xsol:(xsol + w)]
            crop_imgSag = imgThresh[y:y + h, xsag:(xsag + w)]
            crop_imgOgrenciNu = imgThresh[oy:oy + oh, ox:(ox + ow)]

            #cv2.imshow("cropped", crop_imgOgrenciNu)
            #cv2.imwrite("croppedsol.jpg",crop_imgSol)
            #cv2.imwrite("croppedsag.jpg",crop_imgSag)

            boxesSol = utlis.splitBoxes(crop_imgSol)
            boxesSag = utlis.splitBoxes(crop_imgSag)
            boxesOgrenciNu = utlis.splitBoxesOgrenciNu(crop_imgOgrenciNu)

            sorusayisi = 20
            #GETTING NOPIXEL VALUES OF EACH
            myPixelVal = np.zeros((sorusayisi, secimSayisi))

            myPixelValOgrenciNu = np.zeros((4, 10))
            countC = 0
            countR = 0

            for image in boxesOgrenciNu:
                totalPixels = cv2.countNonZero(image)
                myPixelValOgrenciNu[countR][countC] = totalPixels
                countC += 1