Beispiel #1
0
class Ui_MainWindow(QtWidgets.QMainWindow):
    def __init__(self):
        super(Ui_MainWindow, self).__init__()

        ##################      UI loading      ##################

        uic.loadUi(
            '/Users/dakolas/Documents/GitHub/ArtificialVision/Practica 3/mainwindow.ui',
            self)
        #uic.loadUi('mainwindow.ui', self)

        self.addObject = QtWidgets.QDialog()
        uic.loadUi(
            '/Users/dakolas/Documents/GitHub/ArtificialVision/Practica 3/objectName.ui',
            self.addObject)
        #uic.loadUi('objectName.ui', self.addObject)
        self.addObject.okButton.clicked.connect(self.addOkAction)

        self.renameObject = QtWidgets.QDialog()
        uic.loadUi(
            '/Users/dakolas/Documents/GitHub/ArtificialVision/Practica 3/objectRename.ui',
            self.renameObject)
        #uic.loadUi('objectRename.ui', self.renameObject)
        self.renameObject.okButton.clicked.connect(self.renameOkAction)

        ##########################################################

        self.capture = VideoCapture(0)
        self.captureState = True
        self.captureButtonAction()

        self.imageWindow = QRect()

        self.winSelected = False
        self.actionReady = False
        self.openVideo = False

        #Timer to control the capture.
        self.timer = QTimer()
        self.timer.timeout.connect(self.timerLoop)
        self.timer.start(16)

        ##################      Image arrays and viewer objects     ##################

        # FIXED: Opencv images where created with wrong width height values (switched) so the copy failed
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        self.grayImage = np.zeros((240, 320), np.uint8)
        # self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)
        self.imgS = QImage(320, 240, QImage.Format_Grayscale8)
        self.visorS = ImgViewer(320, 240, self.imgS, self.imageFrameS)

        # FIXED: original removed 2 of the 3 chanels with the np.zeros

        self.grayImageDest = np.zeros((240, 320), np.uint8)
        self.imgD = QImage(320, 240, QImage.Format_Grayscale8)
        self.visorD = ImgViewer(320, 240, self.imgD, self.imageFrameD)

        self.colorImageM = np.zeros((240, 700, 3))
        self.colorImageM = cv2.imread("Practica 3/noMatches.jpg")
        self.imgM = QImage(700, 240, QImage.Format_RGB888)
        self.visorM = ImgViewer(700, 240, self.imgM, self.imageFrameS_2)
        #self.visorS.set_open_cv_image(self.grayImageDest)

        ##############################################################################

        ##################      Buttons     ##################

        self.captureButton.clicked.connect(self.captureButtonAction)
        self.addButton.clicked.connect(self.addAction)
        self.renameButton.clicked.connect(self.renameAction)
        self.removeButton.clicked.connect(self.removeAction)
        self.loadButton.clicked.connect(self.loadAction)
        self.loadButton_Video.clicked.connect(self.loadVideoAction)

        ######################################################

        ##################      Image matching      ##################

        #Actual imageObject objects that represent the images. Their keypoints and descriptors can also be obtained from these directly
        self.imageList = []
        #A dictionary mapping object names in the comboBox to the actual objects
        self.mapObjects = {}
        #In these, 0:2 are the first object, 3:5 the second and 6:8 the third. The last are the keypoints of the actual image.
        #They are all a list of lists.
        self.ObjectKeyPointList = []
        #Keypoints of the captured image.
        self.imageKeypointList = []
        #ORB and BFMatcher, using Hamming distance.
        self.orb = cv2.ORB_create()
        self.bf = cv2.BFMatcher(cv2.NORM_HAMMING)

        ##############################################################

        #self.retranslateUi(MainWindow)
        #QtCore.QMetaObject.connectSlotsByName(MainWindow)

        ##################      Signals     ##################

        self.visorS.windowSelected.connect(self.selectWindow)
        self.visorS.pressEvent.connect(self.deSelectWindow)

        ######################################################
        '''
            To use: findHomography(), with LMEDS.
            Para hacer la transformación de vectores, se usa perspectiveTransform()
            Se parte de las listas de keypoints: la de la imagen y la del objeto
            con la escala seleccionada. QueryIdx son los de la imagen, trainIdx los del objeto.
            Tras selecciona el knnmatch con mas 
        
        
        '''

    def loadAction(self):
        if len(self.objectList) != 3:
            imgPath, _ = QFileDialog.getOpenFileName()
            if imgPath != "":
                self.grayImageLoad = cv2.imread(imgPath)
                #print("escala: " + str(self.grayImageLoad.shape))
                y, x, a = self.grayImageLoad.shape
                scaleFactor = x / y
                #print("scaleFactor: " + str(scaleFactor))
                width = int(180 * scaleFactor)
                height = int(180)
                dim = (width, height)
                self.grayImageLoad = cv2.resize(self.grayImageLoad, dim)
                self.grayImageLoad = cv2.cvtColor(self.grayImageLoad,
                                                  cv2.COLOR_BGR2GRAY)

                imgName = imgPath
                image = ImageObject(imgName, self.grayImageLoad, self.orb)
                kp, desc, valid = image.returnKpDes()

                if valid is True:
                    self.imageList.append(image)
                    self.mapObjects[imgName] = self.imageList[-1]
                    self.objectList.addItem(imgName)
                    #Get the image descriptors and add them to the descriptor collection

                    print("DESC:")
                    for i in desc:
                        print(len(i))
                        self.bf.add([i])
                    print("KP:")
                    for i in kp:
                        print(len(i))
                        self.ObjectKeyPointList.append([i])
                else:
                    message = QtWidgets.QMessageBox()
                    message.about(
                        None, 'Error',
                        'Error adding object: The selected object is not descriptive enough.'
                    )

        else:
            message = QtWidgets.QMessageBox()
            message.about(
                None, 'Error',
                'Error loading image: Maximum number of objects reached.')

    def loadVideoAction(self):
        imgPath, _ = QFileDialog.getOpenFileName()
        if imgPath != "":
            self.captureState = True
            self.capture = VideoCapture(imgPath)
            self.timer.stop()
            fps = self.capture.get(cv2.CAP_PROP_FPS)
            self.timer.start(1000 / fps)

    #Calculates the matches between the image captured by the webcam/video and the objects stored. Stores them in obtainedMatches().
    #Returns a list containing, for each of the three (or two, or however many there are), the scale with the most matches.
    def calculateMatches(self):
        if len(self.bf.getTrainDescriptors()) != 0:
            self.imageKeypointList, des = self.orb.detectAndCompute(
                self.grayImage, None)
            obtainedMatches = self.bf.knnMatch(des, k=3)

            #print("obtainedMatches" + str([len(z) for z in obtainedMatches]))

            orderedMatches = [[] for z in range(len(self.imageList) * 3)]
            for l in obtainedMatches:
                for m in l:
                    #print("match id: " + str(m.imgIdx))
                    if (
                            m.imgIdx < len(self.imageList) * 3
                    ):  #Ñapa, pero es que daba ID =  1056 y eso no tiene ni puto sentido
                        orderedMatches[m.imgIdx].append(m)

            #print("before" + str(len(orderedMatches[1])))
            #print("obtainedMatches length" + str(len(obtainedMatches)))

            #print("keypoints antes 1: " + str(len(self.imageList[0].returnKpDes()[0][0])))
            #print("keypoints antes 2: " + str(len(self.imageList[0].returnKpDes()[0][1])))
            #print("keypoints antes 3: " + str(len(self.imageList[0].returnKpDes()[0][2])))
            GoodOrderedMatches = []
            #Iterate over the collection of matches
            '''
            for i in orderedMatches:
                newOrderedMatches = []
                for id in range (len(i)):
                    #Tells us that the match is valid, and inserts it in the appropiate list
                    if id < len(i) - 1:
                        #print("id: " + str(id) + "len i: " + str(len(i)))
                        if i[id].distance < i[id + 1].distance * 0.8:
                            newOrderedMatches.append(i[id])
                    GoodOrderedMatches.append(newOrderedMatches)
            
            orderedMatches = GoodOrderedMatches
            '''

            #print("antes " + str(len(orderedMatches[0])))
            '''
            aux = copy.copy(orderedMatches)

            for i in aux:
                for j in i:
                    if j.distance > 0:
                        i.remove(j)
                        
            orderedMatches = copy.copy(aux)
            '''

            for i in orderedMatches:
                j = 0
                while j < len(i):
                    if i[j].distance > 50:
                        i.pop(j)
                    else:
                        j += 1

            #print("despues" + str(len(orderedMatches[0])))

            #print("after" + str(len(orderedMatches[1])))
            #print("orderedMatches" + str([len(z) for z in orderedMatches]))

            #Iterate over the list of objects, and an id from 0 to number of objects
            for id, image in enumerate(self.imageList, 0):
                index = id * 3
                # Sorts the orderedMatches by the number of matches of each scale, picks the one with most matches and
                # assigns it to scaleWithMostMatches, also returns the position on x
                scaleWithMostMatches = sorted(
                    [[x, y]
                     for x, y in enumerate(orderedMatches[index:index + 3], 0)
                     ],
                    key=lambda x: len(x[1]),
                    reverse=True)[0]

                imageScales = orderedMatches[index:index + 3]
                mostMatchesId = -1
                mostMatchesNum = -1
                mostMatches = []
                for i in range(len(imageScales)):
                    #print("Matches for scale " + str(i) + ": " + str(len(imageScales[i])))
                    if len(imageScales[i]) > mostMatchesNum:
                        mostMatches = imageScales[i]
                        mostMatchesNum = len(imageScales[i])
                        mostMatchesId = i

                self.colorImageM = np.zeros((700, 240, 3))
                self.colorImageM = cv2.imread("Practica 3/noMatches.jpg")
                self.visorM.set_open_cv_imageColor(self.colorImageM)
                #self.noMatchesImg = cv2.resize(self.noMatchesImg, (700, 240))
                #self.visorM.set_open_cv_imageColor(self.noMatchesImg)
                self.visorM.update()

                #print(len(scaleWithMostMatches[1]))
                if (len(mostMatches) > 50):
                    #if (len(scaleWithMostMatches[1]) > 10):
                    points1 = []
                    points2 = []
                    for j in mostMatches:
                        #for j in scaleWithMostMatches[1]:
                        points1.append(self.imageKeypointList[j.queryIdx].pt)
                        #print("..." + str(len(image.returnKpDes()[0][scaleWithMostMatches[0]])))
                        #print("trainidx" + str(j.trainIdx))
                        imageKp, _, _ = image.returnKpDes()
                        imageKp = imageKp[mostMatchesId]
                        #print("Should be a number: " + str(len(imageKp)) + " The one that crashes it: " + str(j.trainIdx))
                        points2.append(imageKp[j.trainIdx].pt)

                    #print("Points1: " + str(len(points1)) + " Points2: " + str(len(points2)))
                    h, mask = cv2.findHomography(np.array(points2),
                                                 np.array(points1), cv2.RANSAC)

                    if h is not None:
                        if len(mostMatches) > 50:

                            corners = np.zeros((4, 2), dtype=np.float32)

                            corners[
                                1,
                                0] = image.getScales()[mostMatchesId].shape[1]
                            corners[
                                2,
                                0] = image.getScales()[mostMatchesId].shape[1]
                            corners[
                                2,
                                1] = image.getScales()[mostMatchesId].shape[0]
                            corners[
                                3,
                                1] = image.getScales()[mostMatchesId].shape[0]

                            #for id, i in enumerate(corners, 0):
                            #    print("Corner " + str(id) + " : " + str(i))

                            #print("corners: " + str(corners))

                            M = cv2.perspectiveTransform(
                                np.array([corners]), h)

                            #print("M: " + str(M))

                            cv2.line(self.grayImage, (M[0][0][0], M[0][0][1]),
                                     (M[0][1][0], M[0][1][1]), (255, 255, 255),
                                     4)
                            cv2.line(self.grayImage, (M[0][1][0], M[0][1][1]),
                                     (M[0][2][0], M[0][2][1]), (255, 255, 255),
                                     4)
                            cv2.line(self.grayImage, (M[0][2][0], M[0][2][1]),
                                     (M[0][3][0], M[0][3][1]), (255, 255, 255),
                                     4)
                            cv2.line(self.grayImage, (M[0][3][0], M[0][3][1]),
                                     (M[0][0][0], M[0][0][1]), (255, 255, 255),
                                     4)

                            #imageAux = np.zeros((240, 320), np.uint8)
                            imageAux = self.mapObjects[
                                self.objectList.currentText()]
                            imageAux = np.array(imageAux.getScales()[0],
                                                dtype=np.uint8)

                            self.showMatAction(
                                self.grayImage, self.imageKeypointList,
                                imageAux,
                                self.ObjectKeyPointList[mostMatchesId],
                                orderedMatches)

    def showMatAction(self, img1, kp1, img2, kp2, matches):

        # BGR (142, 255, 132) light blue
        # (255, 102, 51) light green
        self.colorImageM = cv2.drawMatchesKnn(img1,
                                              kp1,
                                              img2,
                                              kp2[0],
                                              matches[0:1],
                                              None,
                                              flags=2,
                                              matchColor=(142, 255, 132))
        #cv2.imwrite('prueba.png', self.colorImageM)
        self.colorImageM = cv2.resize(self.colorImageM, (700, 240))
        self.colorImageM = cv2.cvtColor(self.colorImageM, cv2.COLOR_BGR2RGB)
        self.visorM.set_open_cv_imageColor(self.colorImageM)
        self.visorM.update()

    def addAction(self):
        if len(self.objectList) != 3:
            self.addObject.show()
        else:
            message = QtWidgets.QMessageBox()
            message.about(
                None, 'Error',
                'Error adding object: Maximum number of objects reached.')

    def addOkAction(self):
        self.addObject.hide()

        if self.actionReady is True:
            #Get coordinates and size of the selected rectangle
            y_OffSet = self.imageWindow.y()
            x_OffSet = self.imageWindow.x()
            height = self.imageWindow.height()
            width = self.imageWindow.width()

            #Get the relevant slice of the source image
            crop_img = copy.copy(self.grayImage[y_OffSet:y_OffSet + height,
                                                x_OffSet:x_OffSet + width])

            #Add the image to the comboBox and the list
            imgName = self.addObject.lineEdit.text()
            image = ImageObject(imgName, crop_img, self.orb)
            kp, desc, valid = image.returnKpDes()

            if valid is True:
                self.imageList.append(image)
                self.mapObjects[imgName] = self.imageList[-1]
                self.objectList.addItem(imgName)
                #Get the image descriptors and add them to the descriptor collection

                print("DESC:")
                auxList = []
                for i in desc:
                    print(len(i))
                    auxList.append(i)
                self.bf.add(auxList)
                print("KP:")
                for i in kp:
                    print(len(i))
                    self.ObjectKeyPointList.append([i])
            else:
                message = QtWidgets.QMessageBox()
                message.about(
                    None, 'Error',
                    'Error adding object: The selected object is not descriptive enough.'
                )

    def renameAction(self):
        self.renameObject.show()

    def renameOkAction(self):
        self.renameObject.hide()

    def removeAction(self):
        if self.objectList.currentIndex() is not -1:
            del self.imageList[self.objectList.currentIndex()]
        self.objectList.removeItem(self.objectList.currentIndex())
        for i in range(self.objectList.currentIndex(),
                       self.objectList.currentIndex() + 2, 1):
            if i is not None:
                del self.imageKeypointList[i]
        #TODO: Regenerar bien listas de descriptores y keypoints
        self.bf.clear()
        for i in self.imageList:
            _, des, _ = i.returnKpDes()
            for i in des:
                self.bf.add([i])

    def captureButtonAction(self):
        if self.captureState is False:
            self.capture = VideoCapture(0)
            self.captureButton.setChecked(True)
            self.captureButton.setText("Stop Capture")
            self.captureState = True

        else:
            self.captureState = False
            self.captureButton.setChecked(False)
            self.captureButton.setText("Start Capture")

    def selectWindow(self, p, w, h):
        if w > 0 and h > 0:
            pEnd = QPointF()
            self.imageWindow.setX(p.x() - w / 2)
            if self.imageWindow.x() < 0:
                self.imageWindow.setX(0)
            self.imageWindow.setY(p.y() - h / 2)
            if self.imageWindow.y() < 0:
                self.imageWindow.setY(0)
            pEnd.setX(p.x() + w / 2)
            if pEnd.x() >= 320:
                pEnd.setX(319)
            pEnd.setY(p.y() + h / 2)
            if pEnd.y() >= 240:
                pEnd.setY(239)
            self.imageWindow.setWidth(pEnd.x() - self.imageWindow.x())
            self.imageWindow.setHeight(pEnd.y() - self.imageWindow.y())
            self.winSelected = True

    def deSelectWindow(self):
        self.winSelected = False
        self.actionReady = True

    def timerLoop(self):
        if (self.captureState == True and self.capture.isOpened() == True):
            ret, self.grayImage = self.capture.read()
            if ret is False:
                self.capture.release()
                self.captureState = False
                self.grayImage = np.zeros((240, 320), np.uint8)
                self.grayImageDest = np.zeros((240, 320), np.uint8)
                self.timer.stop()
                self.timer.start(16)
                return
            self.grayImage = cv2.resize(self.grayImage, (320, 240))
            self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)
            #print(self.grayImage.shape)
            kp = self.orb.detect(self.grayImage, None)
            kp, des = self.orb.compute(self.grayImage, kp)
            self.grayImageDest = copy.copy(self.grayImage)
            self.grayImageDest = cv2.drawKeypoints(
                self.grayImage,
                kp,
                self.grayImageDest,
                color=(255, 255, 255),
                flags=cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG)
            self.calculateMatches()
            #print(matches)
            #print(keypoints)
        if self.winSelected:
            self.visorS.drawSquare(self.imageWindow, Qt.green)

        # FIXED: astype is needed to convert the cv type to the qt expected one
        self.visorS.set_open_cv_image(self.grayImage)
        # FIXED: astype is needed to convert the cv type to the qt expected one

        self.visorD.set_open_cv_image(self.grayImageDest)
        self.visorS.update()
        self.visorD.update()

        self.visorM.set_open_cv_imageColor(self.colorImageM)
        self.visorM.update()
Beispiel #2
0
class Ui_MainWindow(QtWidgets.QMainWindow):
    def __init__(self):
        super(Ui_MainWindow, self).__init__()

        ##################      UI loading      ##################

        #uic.loadUi('mainwindow.ui', self)
        uic.loadUi('Practica 4/mainwindow.ui', self)

        ##########################################################

        self.capture = VideoCapture(0)
        self.captureState = True
        self.captureButtonAction()
        self.colorState = False
        self.imageWindow = QRect()

        self.winSelected = False
        self.actionReady = False
        self.openVideo = False

        # Timer to control the capture.
        self.timer = QTimer()
        self.timer.timeout.connect(self.timerLoop)
        self.timer.start(16)

        ##################      Image arrays and viewer objects     ##################

        # FIXED: Opencv images where created with wrong width height values (switched) so the copy failed
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        self.grayImage = np.zeros((240, 320), np.uint8)
        self.colorImage = np.zeros((240, 320, 3), np.uint8)
        # self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)
        self.imgS = QImage(320, 240, QImage.Format_RGB888)
        self.visorS = ImgViewer(320, 240, self.imgS, self.imageFrameS)

        # FIXED: original removed 2 of the 3 chanels with the np.zeros

        self.grayImageDest = np.zeros((240, 320), np.uint8)
        self.colorImageDest = np.zeros((240, 320, 3), np.uint8)
        self.imgD = QImage(320, 240, QImage.Format_RGB888)
        self.visorD = ImgViewer(320, 240, self.imgD, self.imageFrameD)

        ##############################################################################

        ##################      Buttons     ##################

        self.colorButton.clicked.connect(self.colorButtonAction)
        self.captureButton.clicked.connect(self.captureButtonAction)
        self.loadButton.clicked.connect(self.loadAction)
        self.spinBoxDifference.valueChanged.connect(self.spinBoxAction)

        ######################################################

        ##############################################################

        self.edges = np.zeros((240, 320), np.int8)
        self.imgRegions = np.full((240, 320), -1, dtype=np.int32)
        self.listRegions = []

        ##############################################################

    def spinBoxAction(self):
        if self.colorState is True:
            self.fillImgRegions()
        else:
            self.fillImgRegionsColor()

    '''
    What we have to do is fill each region with a value.
    Iterate over the whole image. If we find a point that doesn't have a region we call floodfill
    Floodfill will fill the region using the mask with a value and give us a rectangle. Using those,
    We iterate over that rectangle and add the points with that value to imgRegions, 
    so we don't iterate multiple times over the same region. After we have done that, we regenerate the mask
    to avoid having different regions with the same value.  
    '''

    def fillImgRegions(self):

        #print("principio" + str(self.imgRegions))

        #np.set_printoptions(threshold = np.inf)

        regionID = 1
        regionList = []
        #print("imagen: " + str(self.grayImage.shape))
        # self.printNumpyArray(self.grayImage)
        self.edges = cv2.Canny(self.grayImage, 40, 120)

        # print("---")
        #print("bordes: " + str(self.edges))
        # print("Stop1")
        # self.printNumpyArray(self.edges)
        self.mask = cv2.copyMakeBorder(self.edges,
                                       1,
                                       1,
                                       1,
                                       1,
                                       cv2.BORDER_CONSTANT,
                                       value=255)
        # print(self.mask.shape)
        # print("Stop")
        # self.printNumpyArray(self.mask)
        #print("borders shape: " + str(self.mask.shape))
        # print("---")
        # print(self.mask)
        '''
        print("Edge size:" + str(self.edges.shape))
        print("Image shape" + str(self.grayImage.shape))
        print("Regions shape" + str(self.imgRegions.shape))
        print("We got here")
        #plt.subplot(121),plt.imshow(self.edges,cmap = 'gray')
        #plt.show()
        '''
        dialogValue = self.spinBoxDifference.value()
        print(dialogValue)
        if self.checkBoxRange.isChecked() == True:
            floodFlags = cv2.FLOODFILL_MASK_ONLY | 4 | 1 << 8
        else:
            floodFlags = cv2.FLOODFILL_MASK_ONLY | 4 | cv2.FLOODFILL_FIXED_RANGE | 1 << 8

        for i in range(0, 240, 1):
            for j in range(0, 320, 1):
                # We found a new region:

                # Optimize this, it's the part that makes it stupid slow
                if self.imgRegions[i][j] == -1:
                    if self.edges[i][j] == 0:

                        _, _, newMask, rect = cv2.floodFill(self.grayImage,
                                                            self.mask, (j, i),
                                                            1,
                                                            loDiff=dialogValue,
                                                            upDiff=dialogValue,
                                                            flags=floodFlags)
                        print(rect)
                        newRegion = region(regionID, rect)

                        for k in range(rect[0], rect[0] + rect[2], 1):
                            for l in range(rect[1], rect[1] + rect[3], 1):
                                if newMask[l + 1][k +
                                                  1] == 1 and self.imgRegions[
                                                      l][k] == -1:
                                    self.imgRegions[l][k] = regionID
                                    newRegion.addPoint(self.grayImage[l][k])
                        newRegion.calcAverage()
                        regionList.append(copy.deepcopy(newRegion))

                        regionID += 1
                    #self.mask = cv2.copyMakeBorder(self.edges, 1,1,1,1, cv2.BORDER_CONSTANT, value = 255)

        for i in range(1, 239, 1):
            for j in range(1, 319, 1):
                if self.imgRegions[i][j] == -1:
                    for k in range(-1, 2, 1):
                        for l in range(-1, 2, 1):
                            if self.imgRegions[i + k][
                                    j +
                                    l] != -1 and self.imgRegions[i][j] == -1:
                                self.imgRegions[i][j] = self.imgRegions[i +
                                                                        k][j +
                                                                           l]

        if self.checkBoxMerge.isChecked() is True:
            print("Merging")
            for i in range(1, 239, 1):
                for j in range(1, 319, 1):
                    found = False
                    for k in range(-1, 2, 1):
                        if found is True:
                            break
                        for l in range(-1, 2, 1):
                            if found is True:
                                break
                            if self.imgRegions[i][j] != self.imgRegions[i +
                                                                        k][j +
                                                                           l]:
                                regionList[self.imgRegions[i][j] -
                                           1].addFrontierPoint([
                                               i, j,
                                               self.imgRegions[i + k][j + l]
                                           ])
                                #print("Point coords: ", i, " ", j, " Region ID: ", self.imgRegions[i][j])
            for i in regionList:
                if i.deleted == False:
                    borderRegions = i.regionsInBorder()
                    for j in borderRegions:
                        otherRegion = regionList[j - 1]
                        if i.regionSize() < otherRegion.regionSize():
                            smallestRegion = i.id
                            biggest = j
                        else:
                            smallestRegion = j
                            biggest = i.id
                        percentageOfBorder = regionList[smallestRegion -
                                                        1].percentageOfBorder(
                                                            self.edges,
                                                            biggest)
                        percentageOfFrontier = regionList[
                            smallestRegion - 1].percentageOfFrontier(biggest)
                        if percentageOfBorder > 0.4 and percentageOfFrontier > 0.4:
                            for k in range(240):
                                for l in range(320):
                                    if self.imgRegions[k][l] == smallestRegion:
                                        self.imgRegions[k][l] = biggest
                            regionList[biggest - 1].mergeRegion(
                                regionList[smallestRegion - 1])
                            regionList[smallestRegion - 1].deleted = True
                        #regionList.pop(smallestRegion-1)
        ''' 
            Lo que tengo que hacer:
            Para cada región, mirar su frontera. Para cada valor distinto que haya, mirar cual de las dos es más pequeña.
            Para la más pequeña, mirar si el número de puntos de ese valor es mayor de un porcentaje y si no muchos de esos puntos
            pertenecen a un borde de canny. Si es así, recorrer el rectángulo de esa región y poner todos los puntos al otro valor.
        '''

        for i in range(240):
            for j in range(320):
                regionIndex = self.imgRegions[i][j] - 1
                region2 = regionList[regionIndex]
                avgGrey = region2.returnAverage()
                self.grayImageDest[i][j] = int(avgGrey)

        print("Number of regions after: ", len(regionList))

        checkBreak = False
        if self.checkBoxBorders.isChecked() == True:
            #We skip the first to avoid out of bounds. Can be done manually, or adding an if check that makes everything slow as f**k.
            for i in range(1, 239, 1):
                for j in range(1, 319, 1):
                    checkBreak = False
                    for k in range(1, -2, -1):
                        if checkBreak == True:
                            break
                        for l in range(1, -2, -1):
                            if self.imgRegions[i][j] != self.imgRegions[i +
                                                                        k][j +
                                                                           l]:
                                self.grayImageDest[i][j] = 255
                                checkBreak = True
                                break
        '''
        #Set borders to black.
        for i in range(0, 240, 1):
            for j in range(0, 320, 1):
                if self.imgRegions[i][j] == -1:
                    self.imgRegions[i][j] = 0       
        '''
        #print("Resultado: " + str(self.imgRegions))
        # print(self.imgRegions.shape)
        # print(np.unique(self.imgRegions))

        #plt.subplot(121),plt.imshow(self.imgRegions,cmap = 'gray')
        # plt.show()

        #cv2.imwrite("result.png", self.imgRegions)
        #self.grayImageDest = cv2.resize(self.grayImageDest, (320, 240))
        #self.grayImageDest = cv2.cvtColor(self.grayImageDest, cv2.COLOR_BGR2GRAY)
        self.visorD.set_open_cv_image(self.grayImageDest)
        self.visorD.update()
        self.imgRegions = np.full((240, 320), -1, dtype=np.int32)

    def fillImgRegionsColor(self):

        regionID = 1
        self.edges = cv2.Canny(self.colorImage, 40, 120)
        self.mask = cv2.copyMakeBorder(self.edges,
                                       1,
                                       1,
                                       1,
                                       1,
                                       cv2.BORDER_CONSTANT,
                                       value=255)
        '''
        #plt.subplot(121),plt.imshow(self.edges,cmap = 'gray')
        #plt.show()
        '''
        regionList = []
        dialogValue = self.spinBoxDifference.value()
        if self.checkBoxRange.isChecked() == True:
            floodFlags = cv2.FLOODFILL_MASK_ONLY | 4 | 1 << 8
        else:
            floodFlags = cv2.FLOODFILL_MASK_ONLY | 4 | cv2.FLOODFILL_FIXED_RANGE | 1 << 8

        for i in range(0, 240, 1):
            for j in range(0, 320, 1):
                # We found a new region:

                # Optimize this, it's the part that makes it stupid slow
                if self.imgRegions[i][j] == -1:
                    if self.edges[i][j] == 0:

                        dialogValueArray = [
                            dialogValue, dialogValue, dialogValue
                        ]

                        _, _, newMask, rect = cv2.floodFill(
                            self.colorImage,
                            self.mask, (j, i),
                            1,
                            loDiff=dialogValueArray,
                            upDiff=dialogValueArray,
                            flags=floodFlags)

                        newRegion = regionColor(regionID, rect)

                        for k in range(rect[0], rect[0] + rect[2], 1):
                            for l in range(rect[1], rect[1] + rect[3], 1):
                                if newMask[l + 1][k +
                                                  1] == 1 and self.imgRegions[
                                                      l][k] == -1:
                                    self.imgRegions[l][k] = regionID
                                    newRegion.addPoint(self.colorImage[l][k])

                    #This should set the piece of grayImageDest to the correct value. Maybe move outside to increase efficiency.
                    #Use imgRegions and the regionID to set each point to the correct value, that way it's only one big loop instead
                    #of many smaller overlapping ones
                        newRegion.calcAverage()
                        regionList.append(copy.deepcopy(newRegion))
                        #print(regionID)
                        regionID += 1
                    #self.mask = cv2.copyMakeBorder(self.edges, 1,1,1,1, cv2.BORDER_CONSTANT, value = 255)
        checkBreak = False
        print("Number of regions: ", len(regionList))

        for i in range(1, 239, 1):
            for j in range(1, 319, 1):
                if self.imgRegions[i][j] == -1:
                    for k in range(-1, 2, 1):
                        for l in range(-1, 2, 1):
                            if self.imgRegions[i + k][
                                    j +
                                    l] != -1 and self.imgRegions[i][j] == -1:
                                self.imgRegions[i][j] = self.imgRegions[i +
                                                                        k][j +
                                                                           l]

        if self.checkBoxMerge.isChecked() is True:
            print("Merging")
            for i in range(1, 239, 1):
                for j in range(1, 319, 1):
                    found = False
                    for k in range(-1, 2, 1):
                        if found is True:
                            break
                        for l in range(-1, 2, 1):
                            if found is True:
                                break
                            if self.imgRegions[i][j] != self.imgRegions[i +
                                                                        k][j +
                                                                           l]:
                                regionList[self.imgRegions[i][j] -
                                           1].addFrontierPoint([
                                               i, j,
                                               self.imgRegions[i + k][j + l]
                                           ])
                                #print("Point coords: ", i, " ", j, " Region ID: ", self.imgRegions[i][j])
            for i in regionList:
                if i.deleted == False:
                    borderRegions = i.regionsInBorder()
                    for j in borderRegions:
                        otherRegion = regionList[j - 1]
                        if i.regionSize() < otherRegion.regionSize():
                            smallestRegion = i.id
                            biggest = j
                        else:
                            smallestRegion = j
                            biggest = i.id
                        percentageOfBorder = regionList[smallestRegion -
                                                        1].percentageOfBorder(
                                                            self.edges,
                                                            biggest)
                        percentageOfFrontier = regionList[
                            smallestRegion - 1].percentageOfFrontier(biggest)
                        if percentageOfBorder > 0.4 and percentageOfFrontier > 0.4:
                            for k in range(240):
                                for l in range(320):
                                    if self.imgRegions[k][l] == smallestRegion:
                                        self.imgRegions[k][l] = biggest
                            regionList[biggest - 1].mergeRegion(
                                regionList[smallestRegion - 1])
                            regionList[smallestRegion - 1].deleted = True

        for i in range(240):
            for j in range(320):
                regionIndex = self.imgRegions[i][j] - 1
                region2 = regionList[regionIndex]
                avgColor = region2.returnAverage()
                self.colorImageDest[i][j] = avgColor

        if self.checkBoxBorders.isChecked() == True:
            #We skip the first to avoid out of bounds. Can be done manually, or adding an if check that makes everything slow as f**k.
            for i in range(1, 240, 1):
                for j in range(1, 320, 1):
                    checkBreak = False
                    for k in range(1, -2, -1):
                        if checkBreak == True:
                            break
                        for l in range(1, -2, -1):
                            if self.imgRegions[i][j] != self.imgRegions[i +
                                                                        k][j +
                                                                           l]:
                                self.colorImageDest[i][j] = [255, 255, 255]
                                checkBreak = True
                                break

        # TODO: When it finds a new region, add it to a list as a region object, with the rectangle for efficiency. When it iterates over the region to set the imgRegions,
        # it adds the value of the respective point in grayImage (or colorImage, whatever) to the region object. When it finishes adding the region, it returns the average value.
        # After we're done, we iterate through the list of regions, using the rectangle to be more efficient, and we set each pixel in grayImageDest that is inside that region
        # to the average value of the region. It should give us a nice image. The only thing left to do is to do *something* with the borders.
        '''
        #Set borders to black.
        for i in range(0, 240, 1):
            for j in range(0, 320, 1):
                if self.imgRegions[i][j] == -1:
                    self.imgRegions[i][j] = 0       
        '''
        #print("Resultado: " + str(self.imgRegions))
        # print(self.imgRegions.shape)
        # print(np.unique(self.imgRegions))

        #plt.subplot(121),plt.imshow(self.imgRegions,cmap = 'gray')
        # plt.show()

        #cv2.imwrite("result.png", self.imgRegions)
        #self.grayImageDest = cv2.resize(self.grayImageDest, (320, 240))
        #self.grayImageDest = cv2.cvtColor(self.grayImageDest, cv2.COLOR_BGR2GRAY)
        self.visorD.set_open_cv_imageColor(self.colorImageDest)
        self.visorD.update()
        self.imgRegions = np.full((240, 320), -1, dtype=np.int32)

    def colorButtonAction(self):
        if self.colorState == False:
            self.colorButton.setText("Gray Image")
            self.colorButton.setChecked(True)
            print("Swapping to Gray")
            self.colorState = True
        else:
            self.colorButton.setText("Color Image")
            self.colorButton.setChecked(False)
            print("Swapping to color")
            self.colorState = False

    def loadAction(self):
        imgPath, _ = QFileDialog.getOpenFileName()

        if imgPath != "":
            if self.colorState == True:
                self.grayImage = np.zeros((240, 320), np.uint8)
                self.grayImageDest = np.zeros((240, 320), np.uint8)
                self.grayImage = cv2.imread(imgPath)
                self.grayImage = cv2.resize(self.grayImage, (320, 240))
                self.grayImage = cv2.cvtColor(self.grayImage,
                                              cv2.COLOR_BGR2GRAY)
                self.fillImgRegions()
                self.visorS.set_open_cv_image(self.grayImage)

            else:
                self.colorImage = np.zeros((240, 320, 3), np.uint8)
                self.colorImageDest = np.zeros((240, 320, 3), np.uint8)
                self.colorImage = cv2.imread(imgPath)
                self.colorImage = cv2.resize(self.colorImage, (320, 240))
                self.colorImage = cv2.cvtColor(self.colorImage,
                                               cv2.COLOR_BGR2RGB)
                self.fillImgRegionsColor()
                self.visorS.set_open_cv_imageColor(self.colorImage)
        self.visorS.update()
        # self.test()

    def captureButtonAction(self):
        if self.captureState == False:
            self.capture = VideoCapture(0)
            self.captureButton.setChecked(True)
            self.captureButton.setText("Stop Capture")
            self.captureState = True

        else:
            self.captureState = False
            self.captureButton.setChecked(False)
            self.captureButton.setText("Start Capture")

    def timerLoop(self):
        if (self.captureState == True and self.capture.isOpened() == True):
            if self.colorState == True:
                ret, self.grayImage = self.capture.read()
                if ret == False:
                    self.capture.release()
                    self.captureState = False
                    self.grayImage = np.zeros((240, 320), np.uint8)
                    self.grayImageDest = np.zeros((240, 320), np.uint8)
                    self.timer.stop()
                    self.timer.start(16)
                    return
                self.grayImage = cv2.resize(self.grayImage, (320, 240))
                self.grayImage = cv2.cvtColor(self.grayImage,
                                              cv2.COLOR_BGR2GRAY)
                self.fillImgRegions()
                self.visorS.set_open_cv_image(self.grayImage)
            else:
                print("Should be here")
                ret, self.colorImage = self.capture.read()
                if ret == False:
                    self.capture.release()
                    self.captureState = False
                    self.colorImage = np.zeros((240, 320, 3))
                    self.colorImageDest = np.zeros((240, 320, 3))
                    self.timer.stop()
                    self.timer.start(16)
                    return
                self.colorImage = cv2.resize(self.colorImage, (320, 240))
                self.colorImage = cv2.cvtColor(self.colorImage,
                                               cv2.COLOR_BGR2RGB)
                self.fillImgRegionsColor()
                self.visorS.set_open_cv_imageColor(self.colorImage)

        # FIXED: astype is needed to convert the cv type to the qt expected one

        # FIXED: astype is needed to convert the cv type to the qt expected one
        self.visorS.update()