Exemplo n.º 1
0
    imgBigContour = img.copy()  # COPY IMAGE FOR DISPLAY PURPOSES
    _, contours, _ = cv2.findContours(
        imgThreshold, cv2.RETR_TREE,
        cv2.CHAIN_APPROX_SIMPLE)  # FIND ALL CONTOURS
    cv2.drawContours(imgContours, contours, -1, (0, 255, 0),
                     10)  # DRAW ALL DETECTED CONTOURS
    resized = cv2.resize(imgContours, (heightImg // 2, widthImg // 2))
    cv2.imshow("contoured image", resized)
    cv2.waitKey(0)
    # FIND THE BIGGEST COUNTOUR
    biggest, maxArea = utlis.biggestContour(
        contours)  # FIND THE BIGGEST CONTOUR
    if biggest.size != 0:
        # print("biggest",biggest)

        biggest, flag = utlis.reorder(biggest)
        if flag == 1:
            for contour in contours:
                (x, y, w, h) = cv2.boundingRect(contour)
                cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 10)

            print("error!!")
            img = cv2.resize(img, (heightImg // 2, widthImg // 2))
            cv2.imshow("image for try", img)
            cv2.waitKey(0)
            break
        # out = img[ topx:bottomx+1,topy:bottomy+1]
        # cv2.imshow("sample cropping",out)
        # cv2.waitKey(0)
        # cv2.drawContours(imgBigContour, biggest, -1, (0, 255, 0), 20) # DRAW THE BIGGEST CONTOUR
        # imgBigContour = utlis.drawRectangle(imgBigContour,biggest,2)
Exemplo n.º 2
0
    ########## FINDING ALL CONTOURS
    contours,hierarchy=cv2.findContours(imgCanny,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
    cv2.drawContours(imgContours,contours,-1,(0,255,0),10)

    ######## FIND RECTANGLES
    rectCon=utlis.rectContour(contours)
    biggestContours=utlis.getCornerPoints(rectCon[0])
    gradePoints=utlis.getCornerPoints(rectCon[1])
    # print(biggestContours,gradePoint)
    # print(len(biggestContours))

    if biggestContours.size !=0 and gradePoints.size !=0:
        cv2.drawContours(imgBiggestContours,biggestContours,-1,(0,255,0),20)
        cv2.drawContours(imgBiggestContours, gradePoints, -1, (255, 0, 0), 20)
        # reorder the cordinate points of req rect for finding out origin point and to be used without confusion in future
        biggestContours=utlis.reorder(biggestContours)
        gradePoints=utlis.reorder(gradePoints)

        pt1=np.float32(biggestContours)
        pt2=np.float32([[0,0],[widthImg,0],[0,heightImg],[widthImg,heightImg]])
        matrix=cv2.getPerspectiveTransform(pt1,pt2)
        imgWrapColored=cv2.warpPerspective(img,matrix,(widthImg,heightImg))

        ptG1 = np.float32(gradePoints)
        ptG2 = np.float32([[0, 0], [325, 0], [0, 150], [325, 150]])  # here heiht/width can be any any val as desire
        matrixG = cv2.getPerspectiveTransform(ptG1, ptG2)
        imgGardeDisplay = cv2.warpPerspective(img, matrixG, (325, 150))
        # cv2.imshow("Grade Display",imgGardeDisplay)

        # APPLY THRESHOLD
        imgWrapGray=cv2.cvtColor(imgWrapColored,cv2.COLOR_BGR2GRAY)
Exemplo n.º 3
0
        imgBigContour = img.copy()  # COPY IMAGE FOR DISPLAY PURPOSES
        contours, hierarchy = cv2.findContours(
            imgCanny, cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_NONE)  # FIND ALL CONTOURS
        cv2.drawContours(imgContours, contours, -1, (0, 255, 0),
                         10)  # DRAW ALL DETECTED CONTOURS
        rectCon = utlis.rectContour(contours)  # FILTER FOR RECTANGLE CONTOURS
        biggestPoints = utlis.getCornerPoints(
            rectCon[0])  # GET CORNER POINTS OF THE BIGGEST RECTANGLE
        gradePoints = utlis.getCornerPoints(
            rectCon[1])  # GET CORNER POINTS OF THE SECOND BIGGEST RECTANGLE

        if biggestPoints.size != 0 and gradePoints.size != 0:

            # BIGGEST RECTANGLE WARPING
            biggestPoints = utlis.reorder(biggestPoints)  # REORDER FOR WARPING
            cv2.drawContours(imgBigContour, biggestPoints, -1, (0, 255, 0),
                             20)  # DRAW THE BIGGEST CONTOUR
            pts1 = np.float32(biggestPoints)  # PREPARE POINTS FOR WARP
            pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg],
                               [widthImg,
                                heightImg]])  # PREPARE POINTS FOR WARP
            matrix = cv2.getPerspectiveTransform(
                pts1, pts2)  # GET TRANSFORMATION MATRIX
            imgWarpColored = cv2.warpPerspective(
                img, matrix, (widthImg, heightImg))  # APPLY WARP PERSPECTIVE

            # SECOND BIGGEST RECTANGLE WARPING
            cv2.drawContours(imgBigContour, gradePoints, -1, (255, 0, 0),
                             20)  # DRAW THE BIGGEST CONTOUR
            gradePoints = utlis.reorder(gradePoints)  # REORDER FOR WARPING
    imgThreshold = cv2.Canny(imgBlur,thres[0],thres[1]) # APPLY CANNY BLUR
    kernel = np.ones((5, 5))#INITIALISING KERNEL
    imgDial = cv2.dilate(imgThreshold, kernel, iterations=2) # APPLY DILATION
    imgThreshold = cv2.erode(imgDial, kernel, iterations=1)  # APPLY EROSION

    ## FIND ALL COUNTOURS
    imgContours = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
    imgBigContour = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
    contours, hierarchy = cv2.findContours(imgThreshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # FIND ALL CONTOURS
    cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 10) # DRAW ALL DETECTED CONTOURS


    # FIND THE BIGGEST COUNTOUR
    biggest, maxArea = utlis.biggestContour(contours) # FIND THE BIGGEST CONTOUR
    if biggest.size != 0:
        biggest=utlis.reorder(biggest)
        cv2.drawContours(imgBigContour, biggest, -1, (0, 255, 0), 20) # DRAW THE BIGGEST CONTOUR
        imgBigContour = utlis.drawRectangle(imgBigContour,biggest,2)
        pts1 = np.float32(biggest) # PREPARE POINTS FOR WARP
        pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP
        matrix = cv2.getPerspectiveTransform(pts1, pts2)
        imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg))

        #REMOVE 20 PIXELS FORM EACH SIDE
        imgWarpColored=imgWarpColored[20:imgWarpColored.shape[0] - 20, 20:imgWarpColored.shape[1] - 20]
        imgWarpColored = cv2.resize(imgWarpColored,(widthImg,heightImg))

        # APPLY ADAPTIVE THRESHOLD
        imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY)
        imgAdaptiveThre= cv2.adaptiveThreshold(imgWarpGray, 255, 1, 1, 7, 2)
        imgAdaptiveThre = cv2.bitwise_not(imgAdaptiveThre)
Exemplo n.º 5
0
    def GoruntuIsle(self, img):

        try:

            global widthImg, heightImg, ans, x, y, xsag, xsol, w, h, esikdeger, ox, oy, ow, oh, secimSayisi, sorusayisi, sayac, sonbulunanNumara, bulunanNumara, sonbulunanpuan, bulunanpuan
            img2 = cv2.resize(img, (widthImg, heightImg))
            if cameraNo == -1:
                pass
            else:
                img2 = cv2.rotate(img2, cv2.ROTATE_180)
            imgCountours = img2.copy()
            imageFinal = img2.copy()
            imgBiggestCountours = img2.copy()
            imgGray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
            imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1)
            imgCanny = cv2.Canny(imgBlur, 10, 50)
            #cv2.imshow("test",imgCanny)

            try:
                #FIND ALL COUNTERS
                countours, hierarchy = cv2.findContours(
                    imgCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
                cv2.drawContours(imgCountours, countours, -1, (0, 255, 0), 10)

                #FIND RECTANGLES
                rectCon = utlis.rectContour(countours)
                biggestContour = utlis.getCornerPoints(rectCon[0])
                #print(biggestContour)

                if biggestContour.size != 0:
                    cv2.drawContours(imgBiggestCountours, biggestContour, -1,
                                     (0, 255, 0), 20)
                    biggestContour = utlis.reorder(biggestContour)
                    pts1 = np.float32(
                        biggestContour)  # PREPARE POINTS FOR WARP
                    pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg],
                                       [widthImg,
                                        heightImg]])  # PREPARE POINTS FOR WARP
                    matrix = cv2.getPerspectiveTransform(
                        pts1, pts2)  # GET TRANSFORMATION MATRIX
                    imgWarpColored = cv2.warpPerspective(
                        img2, matrix,
                        (widthImg, heightImg))  # APPLY WARP PERSPECTIVE
                    #cv2.imshow("bulunan",imgWarpColored)
                    #APPLY TRESHOLD

                    imgWarpGray = cv2.cvtColor(imgWarpColored,
                                               cv2.COLOR_BGR2GRAY)
                    imgThresh = cv2.threshold(imgWarpGray, tresh, 255,
                                              cv2.THRESH_BINARY_INV)[1]

                    #boxes=utlis.splitBoxes(imgThresh)

                    crop_imgSol = imgThresh[y:y + h, xsol:(xsol + w)]
                    crop_imgSag = imgThresh[y:y + h, xsag:(xsag + w)]
                    crop_imgOgrenciNu = imgThresh[oy:oy + oh, ox:(ox + ow)]

                    #cv2.imshow("cropped", crop_imgSol)
                    #cv2.imwrite("croppedsol.jpg",crop_imgSol)
                    #cv2.imwrite("croppedsag.jpg",crop_imgSag)

                    boxesSol = utlis.splitBoxes(crop_imgSol)
                    boxesSag = utlis.splitBoxes(crop_imgSag)
                    boxesOgrenciNu = utlis.splitBoxesOgrenciNu(
                        crop_imgOgrenciNu)

                    sorusayisi = 20
                    #GETTING NOPIXEL VALUES OF EACH
                    myPixelVal = np.zeros((sorusayisi, secimSayisi))

                    myPixelValOgrenciNu = np.zeros((4, 10))
                    countC = 0
                    countR = 0

                    for image in boxesOgrenciNu:
                        totalPixels = cv2.countNonZero(image)
                        myPixelValOgrenciNu[countR][countC] = totalPixels
                        countC += 1
                        if (countC == 10):
                            countR += 1
                            countC = 0
                    #print(myPixelValOgrenciNu)

                    countC = 0
                    countR = 0

                    for image in boxesSol:
                        totalPixels = cv2.countNonZero(image)
                        myPixelVal[countR][countC] = totalPixels
                        countC += 1
                        if (countC == secimSayisi):
                            countR += 1
                            countC = 0
                    #print(myPixelVal)

                    for image in boxesSag:
                        totalPixels = cv2.countNonZero(image)
                        myPixelVal[countR][countC] = totalPixels
                        countC += 1
                        if (countC == secimSayisi):
                            countR += 1
                            countC = 0

                    #FINDING INDEX VALUES OF THE MARKINGS

                    myIndexOgrenciNu = []
                    for x in range(0, 4):
                        arr = myPixelValOgrenciNu[x]
                        #print("arr",arr)
                        myIndexVal = np.where(arr == np.amax(arr))
                        #print(myIndexVal[0])
                        myIndexOgrenciNu.append(myIndexVal[0][0])
                    ogrenciNumarasi = str(myIndexOgrenciNu[0]) + str(
                        myIndexOgrenciNu[1]) + str(myIndexOgrenciNu[2]) + str(
                            myIndexOgrenciNu[3])
                    #print('Öğrenci numarası {}'.format(ogrenciNumarasi))

                    #cv2.imshow('mum',utlis.showNumber2(imgWarpColored,myIndexOgrenciNu,4,10,ox,oy,ow,oh))

                    myIndex = []
                    for x in range(0, sorusayisi):
                        isaretsayisi = 0
                        arr = myPixelVal[x]
                        #print("arr-"+str(x),arr)
                        #print('max',np.amax(arr))
                        #print('sayı',np.count_nonzero(arr>esikdeger))
                        isaretsayisi = np.count_nonzero(arr > esikdeger)
                        enfazla = np.amax(arr)
                        if isaretsayisi > 1:
                            myIndexVal[0][
                                0] = 5  #iki ve dahafazla işaretlenmiş
                        elif esikdeger < enfazla:
                            myIndexVal = np.where(arr == np.amax(arr))
                            #print(np.where(arr==np.amax(arr))[0])
                        else:
                            #pass
                            myIndexVal[0][0] = 4
                        #print(myIndexVal[0])
                        myIndex.append(myIndexVal[0][0])
                    #print(myIndex)

                    #GRADING
                    grading = []
                    for x in range(0, sorusayisi):
                        if myIndex[x] == 4:
                            grading.append(4)
                        elif myIndex[x] == 5:
                            grading.append(5)
                        elif ans[x] == myIndex[x]:
                            grading.append(1)
                        else:
                            grading.append(0)
                    #print(grading)

                    #SCORE
                    DogrularSay = grading.count(1)
                    YanlislariSay = grading.count(0) + grading.count(5)
                    BoslariSay = grading.count(4)
                    score = (DogrularSay / sorusayisi) * 100
                    mesaj = 'No:' + ogrenciNumarasi + ' Puan:' + str(
                        score) + ' Doğru:' + str(
                            DogrularSay) + ' Yanlış:' + str(
                                YanlislariSay) + ' Boş:' + str(BoslariSay)
                    #print(score)

                    #DISPLAY ANSWERS
                    #imgResult=imgWarpColored.copy()

                    imgResultSol = imgWarpColored.copy()
                    #imgResultSag=imgWarpColored.copy()
                    #imgResult= imgResult[y:y+h, x:x+w]
                    imgResultNu = utlis.showNumber2(imgResultSol,
                                                    myIndexOgrenciNu, 4, 10,
                                                    ox, oy, ow, oh)
                    imgResultSol = utlis.showAnswers2(imgResultSol,
                                                      myIndex[0:10],
                                                      grading[0:10], ans[0:10],
                                                      10, 4, xsol, y, w, h)
                    imgResultSag = utlis.showAnswers2(imgResultSol,
                                                      myIndex[10:20],
                                                      grading[10:20],
                                                      ans[10:20], 10, 4, xsag,
                                                      y, w, h)

                    #cv2.imshow("imgResultSag",imgResultSag)

                    imRawDrawingSol = np.zeros_like(imgResultSol)
                    imgResultNu = utlis.showNumber2(imRawDrawingSol,
                                                    myIndexOgrenciNu, 4, 10,
                                                    ox, oy, ow, oh)
                    imRawDrawingSol = utlis.showAnswers2(
                        imRawDrawingSol, myIndex[0:10], grading[0:10],
                        ans[0:10], 10, 4, xsol, y, w, h)
                    imRawDrawingSag = utlis.showAnswers2(
                        imRawDrawingSol, myIndex[10:20], grading[10:20],
                        ans[10:20], 10, 4, xsag, y, w, h)

                    #cv2.imshow("imgResult1",imRawDrawing)
                    #pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP
                    #pts2s = np.float32([[0, 0],[w, 0], [0, h],[w, h]]) # PREPARE POINTS FOR WARP
                    invMatrix = cv2.getPerspectiveTransform(
                        pts2, pts1)  # GET TRANSFORMATION MATRIX

                    # font
                    font = cv2.FONT_HERSHEY_SIMPLEX

                    # org
                    org = (50), (heightImg - 20)

                    # fontScale
                    fontScale = 0.8

                    # Blue color in BGR
                    #b,g,r,a
                    color = (0, 0, 0, 0)

                    # Line thickness of 2 px
                    thickness = 2

                    # Using cv2.putText() method
                    #cv2.putText(imRawDrawingSol, 'No:'+ogrenciNumarasi+' Puan:'+str(score) +' D:'+str(DogrularSay)+' Y:'+str(YanlislariSay)+' B:'+str(BoslariSay), org, font,fontScale, color, thickness, cv2.LINE_AA,)

                    imgInvWarp = cv2.warpPerspective(
                        imRawDrawingSol, invMatrix,
                        ((widthImg), heightImg))  # APPLY WARP PERSPECTIVE
                    #cv2.putText(imageFinal,str(score),((widthImg-150),(heightImg-100)),cv2.FONT_HERSHEY_COMPLEX,3,(0,255,255),3)
                    imageFinal = cv2.addWeighted(imageFinal, 1, imgInvWarp, 1,
                                                 0)
                    imageFinal = cv2.rectangle(imageFinal,
                                               (50, (heightImg - 50)),
                                               (widthImg - 50,
                                                (heightImg - 10)),
                                               (255, 255, 255), -1)
                    imageFinal = utlis.print_utf8_text(
                        imageFinal, mesaj, color,
                        (widthImg / 2 - (len(mesaj) * 6)), (heightImg - 50))

                    imageFinal = cv2.rectangle(imageFinal, (150, 25),
                                               (600, 450), (0, 255, 255), 3,
                                               cv2.LINE_AA)
                    #cv2.putText(imageFinal,'Deneme',(50,125),cv2.FONT_HERSHEY_COMPLEX,3,(0,255,255),3)
                    #cv2.imshow("Camera",imageFinal)

                    if asamalariGoster == True:
                        imgBlank = np.zeros_like(img2)
                        imageArray = ([img2, imgGray, imgBlur, imgCanny], [
                            imgCountours, imgBiggestCountours, imgWarpColored,
                            imgThresh
                        ], [imgResultSag, imageFinal, imgBlank, imgBlank])
                        imgStacked = utlis.stackImages(imageArray, .5)
                        cv2.imshow("imgStacked", imgStacked)

                    genislik = int(widthImg / 1.5)
                    yukseklik = int(heightImg / 1.5)
                    imageFinal = cv2.resize(imageFinal, (genislik, yukseklik))
                    # print(f'yeni gen-yuk {genislik} {yukseklik}')
                    height, width, channel = imageFinal.shape
                    step = channel * width
                    # create QImage from image
                    qImg = QImage(imageFinal.data, width, height, step,
                                  QImage.Format_BGR888)
                    # show image in img_label
                    self.ui.imgCamera_2.setPixmap(QPixmap.fromImage(qImg))

                    ogrenciCevapSikleri = []
                    ogrenciDogruYanlis = []
                    cevaplar = []

                    for i in myIndex:
                        if i == 0: ogrenciCevapSikleri.append('A')
                        elif i == 1: ogrenciCevapSikleri.append('B')
                        elif i == 2: ogrenciCevapSikleri.append('C')
                        elif i == 3: ogrenciCevapSikleri.append('D')
                        elif i == 4: ogrenciCevapSikleri.append('N')
                        elif i == 5: ogrenciCevapSikleri.append('M')

                    for x in range(0, sorusayisi):
                        if myIndex[x] == ans[x]: ogrenciDogruYanlis.append('D')
                        else: ogrenciDogruYanlis.append('Y')

                    for i in myIndex:
                        cevaplar.append(i)

                    #print(ogrenciCevapSikleri)
                    #print(ogrenciDogruYanlis)
                    # self.ui.imgBulunan.setVisible(False)

                    if otomatikDurdur == True:
                        print(f'otomatik durdurma açık')
                        bulunanNumara = ogrenciNumarasi
                        bulunanpuan = score
                        if sayac < 5:
                            print(
                                f'sayac {sayac} bulunanpuan {bulunanpuan} sonbulunan {sonbulunanpuan}'
                            )
                            if (sonbulunanpuan == bulunanpuan) and (
                                    sonbulunanNumara == bulunanNumara) and (
                                        int(sonbulunanNumara) > 0):
                                sayac += 1
                            else:
                                sayac = 0
                                sonbulunanNumara = bulunanNumara
                                sonbulunanpuan = bulunanpuan
                        else:
                            global toogledurum
                            toogledurum = False
                            # self.StartStop()
                            sayac = 0
                            print(f'bulundu ')
                            # print(ogrenciCevapSikleri)
                            # print(ogrenciDogruYanlis)

                            b = ''
                            for i in range(len(ogrenciCevapSikleri)):
                                b += (ogrenciCevapSikleri[i])
                            self.ui.lblogrencicevaplar.setPlainText(b)

                            imgResultSag = cv2.resize(imgResultSag,
                                                      (genislik, yukseklik))
                            # print(f'yeni gen-yuk {genislik} {yukseklik}')
                            height, width, channel = imgResultSag.shape
                            step = channel * width
                            # create QImage from image
                            qImg = QImage(imgResultSag.data, width, height,
                                          step, QImage.Format_BGR888)
                            # show image in img_label
                            # self.ui.imgBulunan.setVisible(True)
                            self.ui.imgBulunan.setPixmap(
                                QPixmap.fromImage(qImg))

            except Exception as Hata:
                print('Bulma hatası oluştu :', Hata)
        except Exception as Hata:
            print('Hata oluştu :', Hata)
Exemplo n.º 6
0
def omr(imgpath,field,answer):

    pathImage = imgpath
    heightImg = 700
    widthImg = 700
    questions=5
    choices=5
    ans= answer


    img = cv2.imread(pathImage)
    img = cv2.resize(img, (widthImg, heightImg)) # RESIZE IMAGE
    imgFinal = img.copy()
    imgBlank = np.zeros((heightImg,widthImg, 3), np.uint8) # CREATE A BLANK IMAGE FOR TESTING DEBUGGING IF REQUIRED
    imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # CONVERT IMAGE TO GRAY SCALE
    imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1) # ADD GAUSSIAN BLUR
    imgCanny = cv2.Canny(imgBlur,10,70) # APPLY CANNY


    ## FIND ALL COUNTOURS
    imgContours = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
    imgBigContour = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
    contours, hierarchy = cv2.findContours(imgCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # FIND ALL CONTOURS
    cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 10) # DRAW ALL DETECTED CONTOURS
    rectCon = utlis.rectContour(contours) # FILTER FOR RECTANGLE CONTOURS
    biggestPoints= utlis.getCornerPoints(rectCon[0]) # GET CORNER POINTS OF THE BIGGEST RECTANGLE
    # gradePoints = utlis.getCornerPoints(rectCon[1]) # GET CORNER POINTS OF THE SECOND BIGGEST RECTANGLE



    if biggestPoints.size != 0:

        # BIGGEST RECTANGLE WARPING
        biggestPoints=utlis.reorder(biggestPoints) # REORDER FOR WARPING
        cv2.drawContours(imgBigContour, biggestPoints, -1, (0, 255, 0), 20) # DRAW THE BIGGEST CONTOUR
        pts1 = np.float32(biggestPoints) # PREPARE POINTS FOR WARP
        pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP
        matrix = cv2.getPerspectiveTransform(pts1, pts2) # GET TRANSFORMATION MATRIX
        imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg)) # APPLY WARP PERSPECTIVE


        # APPLY THRESHOLD
        imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY) # CONVERT TO GRAYSCALE
        imgThresh = cv2.threshold(imgWarpGray, 170, 255,cv2.THRESH_BINARY_INV )[1] # APPLY THRESHOLD AND INVERSE

        boxes = utlis.splitBoxes(imgThresh) # GET INDIVIDUAL BOXES
        # cv2.imshow("Split Test ", boxes[3])
        countR=0
        countC=0
        myPixelVal = np.zeros((questions,choices)) # TO STORE THE NON ZERO VALUES OF EACH BOX
        nonPixelVal = np.zeros((1,1))
        nonPixelVal[0][0] = 5
        for image in boxes:
            #cv2.imshow(str(countR)+str(countC),image)
            totalPixels = cv2.countNonZero(image)
            myPixelVal[countR][countC]= totalPixels
            countC += 1
            if (countC==choices):countC=0;countR +=1

        # FIND THE USER ANSWERS AND PUT THEM IN A LIST
        myIndex=[]
        for x in range (0,questions):
            arr = myPixelVal[x]
            print("arrrr",arr)
            max1 = np.amax(arr)
            myIndexVal = np.where(arr == np.amax(arr))
            print(max1)
            temp =np.delete(arr,myIndexVal)
            print("arrrr", temp)
            max2= np.amax(temp)
            myIndexVal1 = np.where(arr == max2)
            if(max1.tolist()/1000 - max2.tolist()/1000  < 2):
                print("masbcjasfjhds",max1,max2)
                myIndexVal = nonPixelVal
            myIndex.append(myIndexVal[0][0])
        print("USER ANSWERS",myIndex)

            # COMPARE THE VALUES TO FIND THE CORRECT ANSWERS


        if field == 1:
            grading=[]
            for x in range(0,questions):
                if ans[x] == myIndex[x]:
                    grading.append(1)
                else:grading.append(0)
            #print("GRADING",grading)
            score = (sum(grading)/questions)*50 # FINAL GRADE
            print("SCORE",score)
            
        
            imageArray = ([img,imgGray,imgBlur, imgCanny],
                          [imgContours,imgBigContour,imgThresh,imgBlank])
            lables = [["Original","Gray","Blur","Edges"],
                      ["Contours","Biggest Contour","Threshold"," "]]

            stackedImage = utlis.stackImages(imageArray,0.5,lables)
            cv2.imshow('Result', stackedImage)
            # cv2.imwrite('r.jpg', stackedImage)
            cv2.waitKey()
            return(str(score))

    return(myIndex)
Exemplo n.º 7
0
    imgCanny = cv2.Canny(imgBlue, 10, 70)

    try:
        # Find all contours
        imgContours = img.copy()
        imgBigContour = img.copy()
        contours, hierarchy = cv2.findContours(imgCanny, cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_NONE)
        cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 10)
        rectCon = utlis.getCornerPoints(contours)
        biggestPoints = utlis.getCornerPoints(rectCon[0])
        gradePoints = utlis.getCornerPoints(rectCon[1])

        if biggestPoints.size != 0 and gradePoints.size != 0:
            # Biggest rectangle warping
            biggestPoints = utlis.reorder(biggestPoints)
            cv2.drawContours(imgBigContour, biggestPoints, -1, (0, 255, 0), 20)
            pts1 = np.float32(biggestPoints)
            pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg],
                               [widthImg, heightImg]])
            matrix = cv2.getPerspectiveTransform(pts1, pts2)
            imgWarpColored = cv2.warpPerspective(img, matrix,
                                                 (widthImg, heightImg))

            # Second biggest rectangle
            cv2.drawContours(imgBigContour, gradePoints, -1, (255, 0, 0), 20)
            gradePoints = utlis.reorder(gradePoints)
            ptsG1 = np.float32(gradePoints)
            ptsG2 = np.float32([[0, 0], [325, 0], [0, 150], [325, 150]])
            matrixG = cv2.getPerspectiveTransform(ptsG1, ptsG2)
            imgGradeDisplay = cv2.warpPerspective(img, matrixG, (325, 150))
Exemplo n.º 8
0
    imgCont, conts = utlis.getContours(img, minArea=5000, filter=4)

    if len(conts) != 0:
        biggest = conts[0][2]
        imgWarp = utlis.warpImg(img, biggest, wP, hP)

        imgCont2, conts2 = utlis.getContours(imgWarp,
                                             minArea=2000,
                                             filter=4,
                                             cThr=[50, 50],
                                             draw=False)
        if len(conts) != 0:
            for obj in conts2:
                cv2.polylines(imgCont2, [obj[2]], True, (0, 255, 0), 2)
                nPoints = utlis.reorder(obj[2])
                nW = round(
                    utlis.findDis(nPoints[0][0] // scale,
                                  nPoints[1][0] // scale), 1)
                nH = round(
                    utlis.findDis(nPoints[0][0] // scale,
                                  nPoints[2][0] // scale), 1)
                cv2.arrowedLine(imgCont2, (nPoints[0][0][0], nPoints[0][0][1]),
                                (nPoints[1][0][0], nPoints[1][0][1]),
                                (255, 0, 255), 3, 8, 0, 0.05)
                cv2.arrowedLine(imgCont2, (nPoints[0][0][0], nPoints[0][0][1]),
                                (nPoints[2][0][0], nPoints[2][0][1]),
                                (255, 0, 255), 3, 8, 0, 0.05)
                x, y, w, h = obj[3]
                cv2.putText(imgCont2, '{}mm'.format(nW), (x + 30, y - 10),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (255, 0, 255),
    imgDial = cv2.dilate(imgThreshold, kernel, iterations=2)  # APPLY DILATION
    imgThreshold = cv2.erode(imgDial, kernel, iterations=1)  # APPLY EROSION

    ## FIND ALL COUNTOURS
    imgConts = img.copy()
    imgBigConts = img.copy()
    contours, hierarchy = cv2.findContours(
        imgThreshold, cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE)  # FIND ALL CONTOURS
    cv2.drawContours(imgConts, contours, -1, (0, 255, 0),
                     10)  # DRAW ALL DETECTED CONTOURS

    # FIND THE LARGEST COUNTOUR IN THE FRAME
    big, maxArea = utlis.biggestContour(contours)  # FIND THE BIGGEST CONTOUR
    if big.size != 0:
        big = utlis.reorder(big)
        cv2.drawContours(imgBigConts, big, -1, (0, 255, 0),
                         20)  # DRAW THE BIGGEST CONTOUR
        imgBigConts = utlis.drawRectangle(imgBigConts, big, 2)
        pts1 = np.float32(big)  # PREPARE POINTS FOR WARP
        pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg],
                           [widthImg, heightImg]])  # PREPARE POINTS FOR WARP
        matrix = cv2.getPerspectiveTransform(pts1, pts2)
        imgWarpColored = cv2.warpPerspective(img, matrix,
                                             (widthImg, heightImg))

        #REMOVE EXTRA UNWANTED PIXELS FROM THE SIDES
        imgWarpColored = imgWarpColored[20:imgWarpColored.shape[0] - 20,
                                        20:imgWarpColored.shape[1] - 20]
        imgWarpColored = cv2.resize(imgWarpColored, (widthImg, heightImg))
Exemplo n.º 10
0
def readfile(path):
    #===============================
    #path = "scantron-100.jpg"
    widthImg = 1245
    heightImg=3000
    question =50
    choices = 5
    #===============================
    img = cv2.imread(path)

    #PREPROCESSING
    img = cv2.resize(img,(widthImg,heightImg))
    imgContours = img.copy()
    imgBiggestContours = img.copy()
    imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    imgBlur = cv2.GaussianBlur(imgGray,(5,5),1)
    imgCanny = cv2.Canny(imgBlur,10,50)

    # FINDING ALL CONTOURS
    countours, hierarchy = cv2.findContours(imgCanny,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
    cv2.drawContours(imgContours,countours,-1,(0,255,0),10)

    #FIND RECTANGLES
    rectCon = utlis.rectContour(countours)
    biggestContour = utlis.getCornerPoints(rectCon[0])
    gradePoints = utlis.getCornerPoints(rectCon[1])
    test = biggestContour.copy()
    test[0][0]=[333,2617]
    test[1][0]=[331,437]
    test[2][0]=[775,437]
    test[3][0]=[778,2617]
    #print("ttt:",test)
    #print("\n for contour\n",biggestContour )
    #print("\n for grade\n",gradePoints)
    biggestContour=test

    if biggestContour.size != 0 and gradePoints.size != 0:
        cv2.drawContours(imgBiggestContours,biggestContour,-1,(0,255,0),20)
        cv2.drawContours(imgBiggestContours,gradePoints,-1,(255,0,0),20)

        biggestContour= utlis.reorder(biggestContour)
        gradePoints = utlis.reorder(gradePoints)

        pt1 = np.float32(biggestContour)
        pt2= np.float32([[0,0],[widthImg,0],[0,heightImg],[widthImg,heightImg]])
        matrix = cv2.getPerspectiveTransform(pt1,pt2)
        imgWarpColored = cv2.warpPerspective(img,matrix,(widthImg,heightImg))

        ptG1 = np.float32(gradePoints)
        ptG2 = np.float32([[0,0],[325,0],[0,150],[325,150]])
        matrixG = cv2.getPerspectiveTransform(ptG1, ptG2)
        imgGradeDisplay = cv2.warpPerspective(img, matrixG,(325, 150))
        #cv2.imshow("Grade", imgGradeDisplay)

        imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY)
        imgThresh = cv2.threshold(imgWarpGray,150,255,cv2.THRESH_BINARY_INV)[1]
        #cv2.imshow("Grade", imgThresh)
        
        boxes = utlis.splitBoxes(imgThresh)
        #cv2.imshow("test", boxes[4])
        #print(cv2.countNonZero(boxes[2]),cv2.countNonZero(boxes[0]))


        #GETTING NO ZERO PIXEL VALUES OF EACH BOX
        myPixelVal = np.zeros((question,choices))
        countC = 0
        countR = 0

        for image in boxes:
            totalPixels = cv2.countNonZero(image)
            myPixelVal[countR][countC] = totalPixels
            countC +=1
            if (countC == choices):countR+=1; countC=0
        #print(myPixelVal)

        global myIndex
        localmyIndex = []
        for x in range(0, question):
            arrline = myPixelVal[x]
            arrmed= np.median(arrline)
            localmyIndex.append(-1)
            for y in range(0,choices):
                if(myPixelVal[x][y]/arrmed > 2):
                    localmyIndex[x]=y
        myIndex = localmyIndex




    imgBlank = np.zeros_like(img)
    imageArray = ([img,imgGray,imgBlur,imgCanny],
    [imgContours,imgBiggestContours,imgWarpColored,imgThresh])
    imgStacked = utlis.stackImages(imageArray,0.5)


    #cv2.imshow("stacked images",imgStacked)
    cv2.waitKey(0)
Exemplo n.º 11
0
def process (path):

    file_path=path
    file=open(file_path,'r')
    lines=file.readlines()
    img_path=lines[0]
    img_path=img_path.strip()
    questions = int(lines[1])
    choices = 5
    if(questions==15):
        heightImg=690
    elif(questions==20):
        heightImg=700
    elif(questions==25):
        heightImg=700
    else:
        fileName = path.split('\\')[-1]
        fileName = fileName.split('.')[0]
        f = open('E:\\FinalProject\\Outputs\\' + fileName + '.txt', "w+")
        f.write("-1")
    print(img_path, questions)
    img = cv2.imread(img_path)

    img = cv2.resize(img, (widthImg, heightImg))  # RESIZE IMAGE

    imgFinal = img.copy()
    imgBlank = np.zeros((heightImg, widthImg, 3), np.uint8)  # CREATE A BLANK IMAGE FOR TESTING DEBUGGING IF REQUIRED
    imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # CONVERT IMAGE TO GRAY SCALE
    imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1)  # ADD GAUSSIAN BLUR
    imgCanny = cv2.Canny(imgBlur, 10, 70)  # APPLY CANNY

    try:
        ## FIND ALL COUNTOURS
        imgContours = img.copy()  # COPY IMAGE FOR DISPLAY PURPOSES
        imgBigContour = img.copy()  # COPY IMAGE FOR DISPLAY PURPOSES
        contours, hierarchy = cv2.findContours(imgCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)  # FIND ALL CONTOURS
        cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 10)  # DRAW ALL DETECTED CONTOURS
        rectCon = utlis.rectContour(contours)  # FILTER FOR RECTANGLE CONTOURS
        biggestPoints = utlis.getCornerPoints(rectCon[0])  # GET CORNER POINTS OF THE BIGGEST RECTANGLE
        #gradePoints = utlis.getCornerPoints(rectCon[1])  # GET CORNER POINTS OF THE SECOND BIGGEST RECTANGLE

        if biggestPoints.size != 0 :

            # BIGGEST RECTANGLE WARPING
            biggestPoints = utlis.reorder(biggestPoints)  # REORDER FOR WARPING
            cv2.drawContours(imgBigContour, biggestPoints, -1, (0, 255, 0), 20)  # DRAW THE BIGGEST CONTOUR
            pts1 = np.float32(biggestPoints)  # PREPARE POINTS FOR WARP
            pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]])  # PREPARE POINTS FOR WARP
            matrix = cv2.getPerspectiveTransform(pts1, pts2)  # GET TRANSFORMATION MATRIX
            imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg))  # APPLY WARP PERSPECTIVE


            # APPLY THRESHOLD
            imgWarpGray = cv2.cvtColor(imgWarpColored, cv2.COLOR_BGR2GRAY)  # CONVERT TO GRAYSCALE
            imgAdaptiveTh = cv2.adaptiveThreshold(imgWarpGray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
            imgThresh = cv2.threshold(imgAdaptiveTh, 170, 255, cv2.THRESH_BINARY_INV)[1]  # APPLY THRESHOLD AND INVERSE


            boxes = utlis.splitBoxes(imgThresh,questions)  # GET INDIVIDUAL BOXES
            countR = 0
            countC = 0
            myPixelVal = np.zeros((questions, choices))  # TO STORE THE NON ZERO VALUES OF EACH BOX
            for image in boxes:
                #cv2.imshow(str(countR)+str(countC),image)
                totalPixels = cv2.countNonZero(image)
                myPixelVal[countR][countC] = totalPixels
                #cv2.imshow("Split Test ", image)
                #print("hi",myPixelVal[countR][countC])
                #cv2.waitKey(0)
                countC += 1
                if (countC == choices): countC = 0;countR += 1

            # FIND THE USER ANSWERS AND PUT THEM IN A LIST
            myIndex = []
            for x in range(0, questions):
                arr = myPixelVal[x]
                myIndexVal = np.where(arr == np.amax(arr))
                myIndex.append(myIndexVal[0][0])


            # COMPARE THE VALUES TO FIND THE CORRECT ANSWERS
            answers=''
            #grading = []
            #print("hi")
            for x in range(0, questions):
                answers+=str(myIndex[x])
                #if ans[x] == myIndex[x]:
                    #grading.append(1)
                #else:
                    #grading.append(0)
            # print("GRADING",grading)
            #score = (sum(grading) / questions) * 100  # FINAL GRADE
            #print("SCORE",score)

            #result =str(myIndex)
            print("USER ANSWERS", answers)
            fileName = path.split('\\')[-1]
            fileName = fileName.split('.')[0]
            f = open('E:\\FinalProject\\Outputs\\' + fileName + '.txt', "w+")
            f.write(answers)
    except:
        imageArray = ([img, imgGray, imgCanny, imgContours],
                      [imgBlank, imgBlank, imgBlank, imgBlank])
        print("Exception")
        fileName = path.split('\\')[-1]
        fileName = fileName.split('.')[0]
        f = open('E:\\FinalProject\\Outputs\\' + fileName + '.txt', "w+")
        f.write("-1")
Exemplo n.º 12
0
imgContours, conts = utlis.getContours(
    img, filter=4)  #stores image and contour coordinates
if len(conts) != 0:

    biggest = conts[0][2]  #stores largest dimension (A4 PAPER)

    imgWarp = utlis.warpImg(img, biggest, wP, hP)  #crop
    cv2.imshow('A4', imgWarp)  #shows warped image
    imgContours2, conts2 = utlis.getContours(
        imgWarp, minArea=2000, filter=4, cThr=[50, 50],
        draw=False)  #detect contours of the actual object
    if len(conts2) != 0:
        for obj in conts2:
            cv2.polylines(imgContours2, [obj[2]], True, (0, 255, 0),
                          2)  #draw green contour line around object (width=2)
            nPoints = utlis.reorder(
                obj[2])  #rearranges contour points in ascending order
            nW = round((utlis.findDis(nPoints[0][0] // scale,
                                      nPoints[1][0] // scale) / 10),
                       1)  #stores width
            nH = round((utlis.findDis(nPoints[0][0] // scale,
                                      nPoints[2][0] // scale) / 10),
                       1)  #stores height of object
            cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]),
                            (nPoints[1][0][0], nPoints[1][0][1]),
                            (255, 0, 255), 3, 8, 0, 0.05)  #width arrow
            cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]),
                            (nPoints[2][0][0], nPoints[2][0][1]),
                            (255, 0, 255), 3, 8, 0, 0.05)  #height arrow
            x, y, w, h = obj[3]  #stores origin, width and height
            cv2.putText(imgContours2, '{}cm'.format(nW), (x + w // 2, y + 50),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (255, 0, 255),
Exemplo n.º 13
0
    try:
        #FIND ALL COUNTERS
        countours, hierarchy = cv2.findContours(imgCanny, cv2.RETR_EXTERNAL,
                                                cv2.CHAIN_APPROX_NONE)
        cv2.drawContours(imgCountours, countours, -1, (0, 255, 0), 10)

        #FIND RECTANGLES
        rectCon = utlis.rectContour(countours)
        biggestContour = utlis.getCornerPoints(rectCon[0])
        #print(biggestContour)

        if biggestContour.size != 0:
            cv2.drawContours(imgBiggestCountours, biggestContour, -1,
                             (0, 255, 0), 20)
            biggestContour = utlis.reorder(biggestContour)
            pts1 = np.float32(biggestContour)  # PREPARE POINTS FOR WARP
            pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg],
                               [widthImg,
                                heightImg]])  # PREPARE POINTS FOR WARP
            matrix = cv2.getPerspectiveTransform(
                pts1, pts2)  # GET TRANSFORMATION MATRIX
            imgWarpColored = cv2.warpPerspective(
                img, matrix, (widthImg, heightImg))  # APPLY WARP PERSPECTIVE

            #APPLY TRESHOLD

            imgWarpGray = cv2.cvtColor(imgWarpColored, cv2.COLOR_BGR2GRAY)
            imgThresh = cv2.threshold(imgWarpGray, 100, 255,
                                      cv2.THRESH_BINARY_INV)[1]