Beispiel #1
0
    def __init__(self):
        super(Ui_MainWindow, self).__init__()

        ##################      UI loading      ##################

        #uic.loadUi('mainwindow.ui', self)
        uic.loadUi('Practica 4/mainwindow.ui', self)

        ##########################################################

        self.capture = VideoCapture(0)
        self.captureState = True
        self.captureButtonAction()
        self.colorState = False
        self.imageWindow = QRect()

        self.winSelected = False
        self.actionReady = False
        self.openVideo = False

        # Timer to control the capture.
        self.timer = QTimer()
        self.timer.timeout.connect(self.timerLoop)
        self.timer.start(16)

        ##################      Image arrays and viewer objects     ##################

        # FIXED: Opencv images where created with wrong width height values (switched) so the copy failed
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        self.grayImage = np.zeros((240, 320), np.uint8)
        self.colorImage = np.zeros((240, 320, 3), np.uint8)
        # self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)
        self.imgS = QImage(320, 240, QImage.Format_RGB888)
        self.visorS = ImgViewer(320, 240, self.imgS, self.imageFrameS)

        # FIXED: original removed 2 of the 3 chanels with the np.zeros

        self.grayImageDest = np.zeros((240, 320), np.uint8)
        self.colorImageDest = np.zeros((240, 320, 3), np.uint8)
        self.imgD = QImage(320, 240, QImage.Format_RGB888)
        self.visorD = ImgViewer(320, 240, self.imgD, self.imageFrameD)

        ##############################################################################

        ##################      Buttons     ##################

        self.colorButton.clicked.connect(self.colorButtonAction)
        self.captureButton.clicked.connect(self.captureButtonAction)
        self.loadButton.clicked.connect(self.loadAction)
        self.spinBoxDifference.valueChanged.connect(self.spinBoxAction)

        ######################################################

        ##############################################################

        self.edges = np.zeros((240, 320), np.int8)
        self.imgRegions = np.full((240, 320), -1, dtype=np.int32)
        self.listRegions = []
Beispiel #2
0
 def _imgView(self, openfile_list):
     if openfile_list:
         self.imgViewer = ImgViewer(self)
         frameSize_Width = int(self.YUVviewerConfigFile.config_dict['frameSize_Width'])
         frameSize_Height = int(self.YUVviewerConfigFile.config_dict['frameSize_Height'])
         #多线程的方法
         ret = self.imgViewer.setFileList_multithreading(openfile_list,
                     self.YUVviewerConfigFile.config_dict['YUVFormat'],
                     frameSize_Width,
                     frameSize_Height,
                     int(self.YUVviewerConfigFile.config_dict['startFrame']),
                     int(self.YUVviewerConfigFile.config_dict['endFrame']) - int(self.YUVviewerConfigFile.config_dict['startFrame']) + 1,
                     )
         if not ret:
             QMessageBox.critical(self, 'Error', 'unsupport YUVFormat!!', QMessageBox.Ok)
             self.show()
             return False
         #单线程的方法
         #try:
         #    ret = self.imgViewer.setFileList(openfile_list,
         #                    self.YUVviewerConfigFile.config_dict['YUVFormat'],
         #                    frameSize_Width,
         #                    frameSize_Height,
         #                    int(self.YUVviewerConfigFile.config_dict['startFrame']),
         #                    int(self.YUVviewerConfigFile.config_dict['endFrame']) - int(self.YUVviewerConfigFile.config_dict['startFrame']) + 1,
         #                    )
         #    if not ret:
         #        QMessageBox.critical(self, 'Error', 'unsupport YUVFormat!!', QMessageBox.Ok)
         #        self.show()
         #        return False
         #
         #except Exception as e:
         #    QMessageBox.critical(self, 'Error', 'unknow error!!', QMessageBox.Ok)
         #    self.show()
         #    return False
         if frameSize_Width > frameSize_Height:
             self.imgViewer.resize(800, frameSize_Height/frameSize_Width*800)
         else:
             self.imgViewer.resize(frameSize_Width/frameSize_Height*400, 400)
         screen = QGuiApplication.screenAt(self.mapToGlobal(QPoint(self.width()//2,0))).geometry()
         size = self.imgViewer.geometry()
         self.imgViewer.move((screen.width() - size.width()) // 2, (screen.height() - size.height()) // 2)
         self.hide()
         self.imgViewer.show()
         return True
Beispiel #3
0
    def __init__(self):
        super(Ui_MainWindow, self).__init__()
        uic.loadUi('mainwindow.ui', self)
        print("Trying to connect")

        self.PixelTF = QtWidgets.QDialog()
        uic.loadUi('pixelTForm.ui', self.PixelTF)
        self.PixelTF.okButton.clicked.connect(self.closePixelTransformAction)

        self.Filter = QtWidgets.QDialog()
        uic.loadUi('lFilterForm.ui', self.Filter)
        self.Filter.okButton.clicked.connect(self.closeFilterFormAction)

        self.OrderForm = QtWidgets.QDialog()
        uic.loadUi('operOrderForm.ui', self.OrderForm)
        self.OrderForm.okButton.clicked.connect(self.closeOrderFormAction)

        self.capture = VideoCapture(0)
        self.captureState = True
        self.captureButtonAction()

        #Timer to control the capture.
        self.timer = QTimer()
        self.timer.timeout.connect(self.timerLoop)
        self.timer.start(16)
        
        # FIXED: Opencv images where created with wrong width height values (switched) so the copy failed 
        # self.colorImage = np.zeros((320,240))
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        # self.colorImage = np.zeros((320,240))
        #self.colorImage = np.zeros((240,320,3))
        self.grayImage = np.zeros((240, 320), np.uint8)
        # self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)
        self.imgS = QImage(320, 240, QImage.Format_Grayscale8)
        self.visorS = ImgViewer(320, 240, self.imgS, self.imageFrameS)
        
        #self.visorS.set_open_cv_image(self.grayImage)
        

        #TODO: Delete label, set as attribute of imgViewer
        #Isn't it the same? TODO later, it works *for now*        
    
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        #self.colorImageDest = np.zeros((240,320))
        #self.colorImageDest = np.zeros((240,320,3))
        self.grayImageDest = np.zeros((240,320), np.uint8)
        # self.grayImage = cv2.cvtColor(self.grayImageDest, cv2.COLOR_BGR2GRAY)
        self.imgD = QImage(320, 240, QImage.Format_Grayscale8)
        self.visorD = ImgViewer(320, 240, self.imgD, self.imageFrameD)
        
        #self.visorS.set_open_cv_image(self.grayImageDest)


        self.visorHistoS = ImgViewer(256, self.histoFrameS.height(), None, self.histoFrameS)
        self.visorHistoD = ImgViewer(256, self.histoFrameD.height(), None, self.histoFrameD)


        self.captureButton.clicked.connect(self.captureButtonAction)
        self.loadButton.clicked.connect(self.loadImageAction)
        self.pixelTButton.clicked.connect(self.setPixelTransfAction)
        self.kernelButton.clicked.connect(self.setKernelAction)
        self.operOrderButton.clicked.connect(self.setOperationOrderAction)

        #self.retranslateUi(MainWindow)
        #QtCore.QMetaObject.connectSlotsByName(MainWindow)

        
        self.dictionary = {
            'Transform pixel': self.transformPixelAction,
            'Thresholding': self.thresholdingAction,
            'Equalize': self.equalizeAction,
            'Gaussian Blur': self.gaussianBlurAction,
            'Median Blur': self.medianBlurAction,
            'Linear Filter': self.linearFilterAction,
            'Dilate': self.dilateAction,
            'Erode': self.erodeAction,
            'Apply several...': self.applySeveralAction,
        }
        
        self.operationDictionary = {
            'Negative': [255, 170, 85, 0],
            'Brighten': [0, 140, 220, 255],
            'Darken': [0,40, 85, 120],
            'Increase Contrast': [0, 50, 200, 255],
            'Decrease Contrast': [40, 100, 155, 210]
        }
Beispiel #4
0
class Ui_MainWindow(QtWidgets.QMainWindow):

    def __init__(self):
        super(Ui_MainWindow, self).__init__()
        uic.loadUi('mainwindow.ui', self)
        print("Trying to connect")

        self.PixelTF = QtWidgets.QDialog()
        uic.loadUi('pixelTForm.ui', self.PixelTF)
        self.PixelTF.okButton.clicked.connect(self.closePixelTransformAction)

        self.Filter = QtWidgets.QDialog()
        uic.loadUi('lFilterForm.ui', self.Filter)
        self.Filter.okButton.clicked.connect(self.closeFilterFormAction)

        self.OrderForm = QtWidgets.QDialog()
        uic.loadUi('operOrderForm.ui', self.OrderForm)
        self.OrderForm.okButton.clicked.connect(self.closeOrderFormAction)

        self.capture = VideoCapture(0)
        self.captureState = True
        self.captureButtonAction()

        #Timer to control the capture.
        self.timer = QTimer()
        self.timer.timeout.connect(self.timerLoop)
        self.timer.start(16)
        
        # FIXED: Opencv images where created with wrong width height values (switched) so the copy failed 
        # self.colorImage = np.zeros((320,240))
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        # self.colorImage = np.zeros((320,240))
        #self.colorImage = np.zeros((240,320,3))
        self.grayImage = np.zeros((240, 320), np.uint8)
        # self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)
        self.imgS = QImage(320, 240, QImage.Format_Grayscale8)
        self.visorS = ImgViewer(320, 240, self.imgS, self.imageFrameS)
        
        #self.visorS.set_open_cv_image(self.grayImage)
        

        #TODO: Delete label, set as attribute of imgViewer
        #Isn't it the same? TODO later, it works *for now*        
    
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        #self.colorImageDest = np.zeros((240,320))
        #self.colorImageDest = np.zeros((240,320,3))
        self.grayImageDest = np.zeros((240,320), np.uint8)
        # self.grayImage = cv2.cvtColor(self.grayImageDest, cv2.COLOR_BGR2GRAY)
        self.imgD = QImage(320, 240, QImage.Format_Grayscale8)
        self.visorD = ImgViewer(320, 240, self.imgD, self.imageFrameD)
        
        #self.visorS.set_open_cv_image(self.grayImageDest)


        self.visorHistoS = ImgViewer(256, self.histoFrameS.height(), None, self.histoFrameS)
        self.visorHistoD = ImgViewer(256, self.histoFrameD.height(), None, self.histoFrameD)


        self.captureButton.clicked.connect(self.captureButtonAction)
        self.loadButton.clicked.connect(self.loadImageAction)
        self.pixelTButton.clicked.connect(self.setPixelTransfAction)
        self.kernelButton.clicked.connect(self.setKernelAction)
        self.operOrderButton.clicked.connect(self.setOperationOrderAction)

        #self.retranslateUi(MainWindow)
        #QtCore.QMetaObject.connectSlotsByName(MainWindow)

        
        self.dictionary = {
            'Transform pixel': self.transformPixelAction,
            'Thresholding': self.thresholdingAction,
            'Equalize': self.equalizeAction,
            'Gaussian Blur': self.gaussianBlurAction,
            'Median Blur': self.medianBlurAction,
            'Linear Filter': self.linearFilterAction,
            'Dilate': self.dilateAction,
            'Erode': self.erodeAction,
            'Apply several...': self.applySeveralAction,
        }
        
        self.operationDictionary = {
            'Negative': [255, 170, 85, 0],
            'Brighten': [0, 140, 220, 255],
            'Darken': [0,40, 85, 120],
            'Increase Contrast': [0, 50, 200, 255],
            'Decrease Contrast': [40, 100, 155, 210]
        }
        
            # Get the function from switcher dictionary
            # TODO
            #func = dictionary.get(valorDesplegable, lambda: "Invalid month")
            # Execute the function
            #print func()

    def transformPixelAction(self, startImage):

        lutTable = np.ones((256), np.uint8)

        src_1 = self.PixelTF.origPixelBox1.value()
        src_2 = self.PixelTF.origPixelBox2.value()
        src_3 = self.PixelTF.origPixelBox3.value()
        src_4 = self.PixelTF.origPixelBox4.value()
        if self.PixelTF.operationComboBox1.currentText() == 'User Defined':
            dst_1 = self.PixelTF.newPixelBox1.value()
            dst_2 = self.PixelTF.newPixelBox2.value()
            dst_3 = self.PixelTF.newPixelBox3.value()
            dst_4 = self.PixelTF.newPixelBox4.value()

        else:
            OpList = self.operationDictionary.get(self.PixelTF.operationComboBox1.currentText())
            dst_1 = OpList[0]
            dst_2 = OpList[1]
            dst_3 = OpList[2]
            dst_4 = OpList[3]

        self.applyTransformPixel(src_1, src_2, dst_1, dst_2, lutTable)
        self.applyTransformPixel(src_2, src_3, dst_2, dst_3, lutTable)
        self.applyTransformPixel(src_3, src_4 + 1, dst_3, dst_4 + 1, lutTable)

        returnImage = cv2.LUT(startImage, lutTable)
        return returnImage

    def applyTransformPixel(self, src1, src2, dst1, dst2, lut):
        for src in range(src1,src2):
            s = ((dst2 - dst1))/(src2 - src1)*(src - src1) + dst1
            lut[src] = s
        
    def thresholdingAction(self, startImage):
        _, returnImage = cv2.threshold(startImage, self.thresholdSpinBox.value(), 255, cv2.THRESH_BINARY)
        return returnImage

    def equalizeAction(self, startImage):
        returnImage = cv2.equalizeHist(startImage)
        return returnImage

    def gaussianBlurAction(self, startImage):
        size = (int(self.gaussWidthBox.cleanText()), int(self.gaussWidthBox.cleanText()))
        returnImage = cv2.GaussianBlur(startImage,ksize = size, sigmaX = 0, sigmaY = 0)
        return returnImage

    def medianBlurAction(self, startImage):
        returnImage = cv2.medianBlur(startImage, ksize = 3)
        return returnImage

    def linearFilterAction(self, startImage):
        kernel = np.zeros((3,3), dtype = np.double)
        for i in range (1,4):
            for j in range (1,4):
                result = 'kernelBox' + str(i) + str(j)
                kernel[i-1,j-1] = getattr(self.Filter, result).value()
        
        returnImage = cv2.filter2D(startImage, ddepth = cv2.CV_8U, kernel = kernel, delta = self.Filter.addedVBox.value())
        return returnImage

    def dilateAction(self, startImage):
        kernel = np.ones((3,3), np.uint8)
        _, returnImage = cv2.threshold(startImage, self.thresholdSpinBox.value(), 255, cv2.THRESH_BINARY)
        returnImage = cv2.dilate(returnImage, kernel, iterations=1)
        return returnImage

    def erodeAction(self, startImage):
        kernel = np.ones((3,3), np.uint8)
        _, returnImage = cv2.threshold(startImage, self.thresholdSpinBox.value(), 255, cv2.THRESH_BINARY)
        returnImage = cv2.erode(returnImage, kernel, iterations=1)
        return returnImage

    def applySeveralAction(self, startImage):

        returnImage = startImage

        if self.OrderForm.firstOperCheckBox.isChecked() is True:
            func = self.dictionary.get(self.OrderForm.operationComboBox1.currentText())
            returnImage = func(returnImage)
        

        if self.OrderForm.secondOperCheckBox.isChecked() is True:
            func = self.dictionary.get(self.OrderForm.operationComboBox2.currentText())
            returnImage = func(returnImage)

        if self.OrderForm.thirdOperCheckBox.isChecked() is True:
            func = self.dictionary.get(self.OrderForm.operationComboBox3.currentText())
            returnImage = func(returnImage)

        if self.OrderForm.fourthOperCheckBox.isChecked() is True:
            func = self.dictionary.get(self.OrderForm.operationComboBox4.currentText())
            returnImage = func(returnImage)
        return returnImage

    def closeOrderFormAction(self):
        self.OrderForm.hide()

    def closePixelTransformAction(self):
        self.PixelTF.hide()

    def closeFilterFormAction(self):
        self.Filter.hide()
        
    def captureButtonAction(self):
        if self.captureState == False:
            self.captureButton.setText("Stop Capture")
            self.captureButton.setChecked(True)
            print("Started")
            self.captureState = True
        else: 
            self.captureButton.setText("Start Capture")
            self.captureButton.setChecked(False)
            print("Stopped")
            self.captureState = False

    def timerLoop(self):
        if (self.captureState == True and self.capture.isOpened() == True):
            ret, self.grayImage = self.capture.read()
            self.grayImage = cv2.resize(self.grayImage, (320, 240))
            self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)

            print(self.operationComboBox.currentText())

            



            # self.label_S.setPixmap(QPixmap.fromImage(self.visorS.qimg))
            # self.label_D.setPixmap(QPixmap.fromImage(self.imgVisorD.qimg))
            # self.visorS.repaint()
            # self.visorS.update()
        func = self.dictionary.get(self.operationComboBox.currentText())
        self.grayImageDest = func(self.grayImage)
        self.updateHistograms(self.grayImage, self.visorHistoS)
        self.updateHistograms(self.grayImageDest, self.visorHistoD)
        # FIXED: astype is needed to convert the cv type to the qt expected one
        self.visorS.set_open_cv_image(self.grayImage)
        # FIXED: astype is needed to convert the cv type to the qt expected one
        self.visorD.set_open_cv_image(self.grayImageDest)
        self.visorS.update()
        self.visorD.update()

    def colorImageAction(self):
        pass

    def loadImageAction(self):
        print("Load")
        self.imgPath, _ = QFileDialog.getOpenFileName()
        if self.captureState == True:
            self.captureButtonAction()
                
        self.grayImage = cv2.imread(self.imgPath)
        self.grayImage = cv2.resize(self.grayImage, (320, 240))
        self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)
        
        print(self.imgPath)

    def saveImageAction(self):
        saveImage = self.grayImage
        filename = QFileDialog.getSaveFileName()
        cv2.imWrite(filename, saveImage)
        print("Save")

    def setPixelTransfAction(self):
        self.PixelTF.exec()

    def setKernelAction(self):
        self.Filter.exec()

    def setOperationOrderAction(self):
        self.OrderForm.exec()

    def updateHistograms(self, image, visor):
        histoSize = 256
        range = [0, 256]


        # cv2.calcHist(image, 1, channels, nONE, histogram, 1, histoSize, ranges, True, False )
        histogram = cv2.calcHist(images=[image.astype(np.uint8)], channels=[0], mask=None, histSize=[histoSize], ranges=range, hist=True, accumulate=False)
        minH, maxH,_,_ = cv2.minMaxLoc(histogram)

        maxY = visor.height()

        for i, hVal in enumerate(histogram):
            minY = maxY - hVal * maxY / maxH
            visor.drawLine(QLineF(i, minY, i, maxY), Qt.red)
        visor.update()
Beispiel #5
0
    def __init__(self):
        super(Ui_MainWindow, self).__init__()

        ##################      UI loading      ##################

        uic.loadUi(
            '/Users/dakolas/Documents/GitHub/ArtificialVision/Practica 3/mainwindow.ui',
            self)
        #uic.loadUi('mainwindow.ui', self)

        self.addObject = QtWidgets.QDialog()
        uic.loadUi(
            '/Users/dakolas/Documents/GitHub/ArtificialVision/Practica 3/objectName.ui',
            self.addObject)
        #uic.loadUi('objectName.ui', self.addObject)
        self.addObject.okButton.clicked.connect(self.addOkAction)

        self.renameObject = QtWidgets.QDialog()
        uic.loadUi(
            '/Users/dakolas/Documents/GitHub/ArtificialVision/Practica 3/objectRename.ui',
            self.renameObject)
        #uic.loadUi('objectRename.ui', self.renameObject)
        self.renameObject.okButton.clicked.connect(self.renameOkAction)

        ##########################################################

        self.capture = VideoCapture(0)
        self.captureState = True
        self.captureButtonAction()

        self.imageWindow = QRect()

        self.winSelected = False
        self.actionReady = False
        self.openVideo = False

        #Timer to control the capture.
        self.timer = QTimer()
        self.timer.timeout.connect(self.timerLoop)
        self.timer.start(16)

        ##################      Image arrays and viewer objects     ##################

        # FIXED: Opencv images where created with wrong width height values (switched) so the copy failed
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        self.grayImage = np.zeros((240, 320), np.uint8)
        # self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)
        self.imgS = QImage(320, 240, QImage.Format_Grayscale8)
        self.visorS = ImgViewer(320, 240, self.imgS, self.imageFrameS)

        # FIXED: original removed 2 of the 3 chanels with the np.zeros

        self.grayImageDest = np.zeros((240, 320), np.uint8)
        self.imgD = QImage(320, 240, QImage.Format_Grayscale8)
        self.visorD = ImgViewer(320, 240, self.imgD, self.imageFrameD)

        self.colorImageM = np.zeros((240, 700, 3))
        self.colorImageM = cv2.imread("Practica 3/noMatches.jpg")
        self.imgM = QImage(700, 240, QImage.Format_RGB888)
        self.visorM = ImgViewer(700, 240, self.imgM, self.imageFrameS_2)
        #self.visorS.set_open_cv_image(self.grayImageDest)

        ##############################################################################

        ##################      Buttons     ##################

        self.captureButton.clicked.connect(self.captureButtonAction)
        self.addButton.clicked.connect(self.addAction)
        self.renameButton.clicked.connect(self.renameAction)
        self.removeButton.clicked.connect(self.removeAction)
        self.loadButton.clicked.connect(self.loadAction)
        self.loadButton_Video.clicked.connect(self.loadVideoAction)

        ######################################################

        ##################      Image matching      ##################

        #Actual imageObject objects that represent the images. Their keypoints and descriptors can also be obtained from these directly
        self.imageList = []
        #A dictionary mapping object names in the comboBox to the actual objects
        self.mapObjects = {}
        #In these, 0:2 are the first object, 3:5 the second and 6:8 the third. The last are the keypoints of the actual image.
        #They are all a list of lists.
        self.ObjectKeyPointList = []
        #Keypoints of the captured image.
        self.imageKeypointList = []
        #ORB and BFMatcher, using Hamming distance.
        self.orb = cv2.ORB_create()
        self.bf = cv2.BFMatcher(cv2.NORM_HAMMING)

        ##############################################################

        #self.retranslateUi(MainWindow)
        #QtCore.QMetaObject.connectSlotsByName(MainWindow)

        ##################      Signals     ##################

        self.visorS.windowSelected.connect(self.selectWindow)
        self.visorS.pressEvent.connect(self.deSelectWindow)

        ######################################################
        '''
Beispiel #6
0
class Ui_MainWindow(QtWidgets.QMainWindow):
    def __init__(self):
        super(Ui_MainWindow, self).__init__()

        ##################      UI loading      ##################

        uic.loadUi(
            '/Users/dakolas/Documents/GitHub/ArtificialVision/Practica 3/mainwindow.ui',
            self)
        #uic.loadUi('mainwindow.ui', self)

        self.addObject = QtWidgets.QDialog()
        uic.loadUi(
            '/Users/dakolas/Documents/GitHub/ArtificialVision/Practica 3/objectName.ui',
            self.addObject)
        #uic.loadUi('objectName.ui', self.addObject)
        self.addObject.okButton.clicked.connect(self.addOkAction)

        self.renameObject = QtWidgets.QDialog()
        uic.loadUi(
            '/Users/dakolas/Documents/GitHub/ArtificialVision/Practica 3/objectRename.ui',
            self.renameObject)
        #uic.loadUi('objectRename.ui', self.renameObject)
        self.renameObject.okButton.clicked.connect(self.renameOkAction)

        ##########################################################

        self.capture = VideoCapture(0)
        self.captureState = True
        self.captureButtonAction()

        self.imageWindow = QRect()

        self.winSelected = False
        self.actionReady = False
        self.openVideo = False

        #Timer to control the capture.
        self.timer = QTimer()
        self.timer.timeout.connect(self.timerLoop)
        self.timer.start(16)

        ##################      Image arrays and viewer objects     ##################

        # FIXED: Opencv images where created with wrong width height values (switched) so the copy failed
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        self.grayImage = np.zeros((240, 320), np.uint8)
        # self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)
        self.imgS = QImage(320, 240, QImage.Format_Grayscale8)
        self.visorS = ImgViewer(320, 240, self.imgS, self.imageFrameS)

        # FIXED: original removed 2 of the 3 chanels with the np.zeros

        self.grayImageDest = np.zeros((240, 320), np.uint8)
        self.imgD = QImage(320, 240, QImage.Format_Grayscale8)
        self.visorD = ImgViewer(320, 240, self.imgD, self.imageFrameD)

        self.colorImageM = np.zeros((240, 700, 3))
        self.colorImageM = cv2.imread("Practica 3/noMatches.jpg")
        self.imgM = QImage(700, 240, QImage.Format_RGB888)
        self.visorM = ImgViewer(700, 240, self.imgM, self.imageFrameS_2)
        #self.visorS.set_open_cv_image(self.grayImageDest)

        ##############################################################################

        ##################      Buttons     ##################

        self.captureButton.clicked.connect(self.captureButtonAction)
        self.addButton.clicked.connect(self.addAction)
        self.renameButton.clicked.connect(self.renameAction)
        self.removeButton.clicked.connect(self.removeAction)
        self.loadButton.clicked.connect(self.loadAction)
        self.loadButton_Video.clicked.connect(self.loadVideoAction)

        ######################################################

        ##################      Image matching      ##################

        #Actual imageObject objects that represent the images. Their keypoints and descriptors can also be obtained from these directly
        self.imageList = []
        #A dictionary mapping object names in the comboBox to the actual objects
        self.mapObjects = {}
        #In these, 0:2 are the first object, 3:5 the second and 6:8 the third. The last are the keypoints of the actual image.
        #They are all a list of lists.
        self.ObjectKeyPointList = []
        #Keypoints of the captured image.
        self.imageKeypointList = []
        #ORB and BFMatcher, using Hamming distance.
        self.orb = cv2.ORB_create()
        self.bf = cv2.BFMatcher(cv2.NORM_HAMMING)

        ##############################################################

        #self.retranslateUi(MainWindow)
        #QtCore.QMetaObject.connectSlotsByName(MainWindow)

        ##################      Signals     ##################

        self.visorS.windowSelected.connect(self.selectWindow)
        self.visorS.pressEvent.connect(self.deSelectWindow)

        ######################################################
        '''
            To use: findHomography(), with LMEDS.
            Para hacer la transformación de vectores, se usa perspectiveTransform()
            Se parte de las listas de keypoints: la de la imagen y la del objeto
            con la escala seleccionada. QueryIdx son los de la imagen, trainIdx los del objeto.
            Tras selecciona el knnmatch con mas 
        
        
        '''

    def loadAction(self):
        if len(self.objectList) != 3:
            imgPath, _ = QFileDialog.getOpenFileName()
            if imgPath != "":
                self.grayImageLoad = cv2.imread(imgPath)
                #print("escala: " + str(self.grayImageLoad.shape))
                y, x, a = self.grayImageLoad.shape
                scaleFactor = x / y
                #print("scaleFactor: " + str(scaleFactor))
                width = int(180 * scaleFactor)
                height = int(180)
                dim = (width, height)
                self.grayImageLoad = cv2.resize(self.grayImageLoad, dim)
                self.grayImageLoad = cv2.cvtColor(self.grayImageLoad,
                                                  cv2.COLOR_BGR2GRAY)

                imgName = imgPath
                image = ImageObject(imgName, self.grayImageLoad, self.orb)
                kp, desc, valid = image.returnKpDes()

                if valid is True:
                    self.imageList.append(image)
                    self.mapObjects[imgName] = self.imageList[-1]
                    self.objectList.addItem(imgName)
                    #Get the image descriptors and add them to the descriptor collection

                    print("DESC:")
                    for i in desc:
                        print(len(i))
                        self.bf.add([i])
                    print("KP:")
                    for i in kp:
                        print(len(i))
                        self.ObjectKeyPointList.append([i])
                else:
                    message = QtWidgets.QMessageBox()
                    message.about(
                        None, 'Error',
                        'Error adding object: The selected object is not descriptive enough.'
                    )

        else:
            message = QtWidgets.QMessageBox()
            message.about(
                None, 'Error',
                'Error loading image: Maximum number of objects reached.')

    def loadVideoAction(self):
        imgPath, _ = QFileDialog.getOpenFileName()
        if imgPath != "":
            self.captureState = True
            self.capture = VideoCapture(imgPath)
            self.timer.stop()
            fps = self.capture.get(cv2.CAP_PROP_FPS)
            self.timer.start(1000 / fps)

    #Calculates the matches between the image captured by the webcam/video and the objects stored. Stores them in obtainedMatches().
    #Returns a list containing, for each of the three (or two, or however many there are), the scale with the most matches.
    def calculateMatches(self):
        if len(self.bf.getTrainDescriptors()) != 0:
            self.imageKeypointList, des = self.orb.detectAndCompute(
                self.grayImage, None)
            obtainedMatches = self.bf.knnMatch(des, k=3)

            #print("obtainedMatches" + str([len(z) for z in obtainedMatches]))

            orderedMatches = [[] for z in range(len(self.imageList) * 3)]
            for l in obtainedMatches:
                for m in l:
                    #print("match id: " + str(m.imgIdx))
                    if (
                            m.imgIdx < len(self.imageList) * 3
                    ):  #Ñapa, pero es que daba ID =  1056 y eso no tiene ni puto sentido
                        orderedMatches[m.imgIdx].append(m)

            #print("before" + str(len(orderedMatches[1])))
            #print("obtainedMatches length" + str(len(obtainedMatches)))

            #print("keypoints antes 1: " + str(len(self.imageList[0].returnKpDes()[0][0])))
            #print("keypoints antes 2: " + str(len(self.imageList[0].returnKpDes()[0][1])))
            #print("keypoints antes 3: " + str(len(self.imageList[0].returnKpDes()[0][2])))
            GoodOrderedMatches = []
            #Iterate over the collection of matches
            '''
            for i in orderedMatches:
                newOrderedMatches = []
                for id in range (len(i)):
                    #Tells us that the match is valid, and inserts it in the appropiate list
                    if id < len(i) - 1:
                        #print("id: " + str(id) + "len i: " + str(len(i)))
                        if i[id].distance < i[id + 1].distance * 0.8:
                            newOrderedMatches.append(i[id])
                    GoodOrderedMatches.append(newOrderedMatches)
            
            orderedMatches = GoodOrderedMatches
            '''

            #print("antes " + str(len(orderedMatches[0])))
            '''
            aux = copy.copy(orderedMatches)

            for i in aux:
                for j in i:
                    if j.distance > 0:
                        i.remove(j)
                        
            orderedMatches = copy.copy(aux)
            '''

            for i in orderedMatches:
                j = 0
                while j < len(i):
                    if i[j].distance > 50:
                        i.pop(j)
                    else:
                        j += 1

            #print("despues" + str(len(orderedMatches[0])))

            #print("after" + str(len(orderedMatches[1])))
            #print("orderedMatches" + str([len(z) for z in orderedMatches]))

            #Iterate over the list of objects, and an id from 0 to number of objects
            for id, image in enumerate(self.imageList, 0):
                index = id * 3
                # Sorts the orderedMatches by the number of matches of each scale, picks the one with most matches and
                # assigns it to scaleWithMostMatches, also returns the position on x
                scaleWithMostMatches = sorted(
                    [[x, y]
                     for x, y in enumerate(orderedMatches[index:index + 3], 0)
                     ],
                    key=lambda x: len(x[1]),
                    reverse=True)[0]

                imageScales = orderedMatches[index:index + 3]
                mostMatchesId = -1
                mostMatchesNum = -1
                mostMatches = []
                for i in range(len(imageScales)):
                    #print("Matches for scale " + str(i) + ": " + str(len(imageScales[i])))
                    if len(imageScales[i]) > mostMatchesNum:
                        mostMatches = imageScales[i]
                        mostMatchesNum = len(imageScales[i])
                        mostMatchesId = i

                self.colorImageM = np.zeros((700, 240, 3))
                self.colorImageM = cv2.imread("Practica 3/noMatches.jpg")
                self.visorM.set_open_cv_imageColor(self.colorImageM)
                #self.noMatchesImg = cv2.resize(self.noMatchesImg, (700, 240))
                #self.visorM.set_open_cv_imageColor(self.noMatchesImg)
                self.visorM.update()

                #print(len(scaleWithMostMatches[1]))
                if (len(mostMatches) > 50):
                    #if (len(scaleWithMostMatches[1]) > 10):
                    points1 = []
                    points2 = []
                    for j in mostMatches:
                        #for j in scaleWithMostMatches[1]:
                        points1.append(self.imageKeypointList[j.queryIdx].pt)
                        #print("..." + str(len(image.returnKpDes()[0][scaleWithMostMatches[0]])))
                        #print("trainidx" + str(j.trainIdx))
                        imageKp, _, _ = image.returnKpDes()
                        imageKp = imageKp[mostMatchesId]
                        #print("Should be a number: " + str(len(imageKp)) + " The one that crashes it: " + str(j.trainIdx))
                        points2.append(imageKp[j.trainIdx].pt)

                    #print("Points1: " + str(len(points1)) + " Points2: " + str(len(points2)))
                    h, mask = cv2.findHomography(np.array(points2),
                                                 np.array(points1), cv2.RANSAC)

                    if h is not None:
                        if len(mostMatches) > 50:

                            corners = np.zeros((4, 2), dtype=np.float32)

                            corners[
                                1,
                                0] = image.getScales()[mostMatchesId].shape[1]
                            corners[
                                2,
                                0] = image.getScales()[mostMatchesId].shape[1]
                            corners[
                                2,
                                1] = image.getScales()[mostMatchesId].shape[0]
                            corners[
                                3,
                                1] = image.getScales()[mostMatchesId].shape[0]

                            #for id, i in enumerate(corners, 0):
                            #    print("Corner " + str(id) + " : " + str(i))

                            #print("corners: " + str(corners))

                            M = cv2.perspectiveTransform(
                                np.array([corners]), h)

                            #print("M: " + str(M))

                            cv2.line(self.grayImage, (M[0][0][0], M[0][0][1]),
                                     (M[0][1][0], M[0][1][1]), (255, 255, 255),
                                     4)
                            cv2.line(self.grayImage, (M[0][1][0], M[0][1][1]),
                                     (M[0][2][0], M[0][2][1]), (255, 255, 255),
                                     4)
                            cv2.line(self.grayImage, (M[0][2][0], M[0][2][1]),
                                     (M[0][3][0], M[0][3][1]), (255, 255, 255),
                                     4)
                            cv2.line(self.grayImage, (M[0][3][0], M[0][3][1]),
                                     (M[0][0][0], M[0][0][1]), (255, 255, 255),
                                     4)

                            #imageAux = np.zeros((240, 320), np.uint8)
                            imageAux = self.mapObjects[
                                self.objectList.currentText()]
                            imageAux = np.array(imageAux.getScales()[0],
                                                dtype=np.uint8)

                            self.showMatAction(
                                self.grayImage, self.imageKeypointList,
                                imageAux,
                                self.ObjectKeyPointList[mostMatchesId],
                                orderedMatches)

    def showMatAction(self, img1, kp1, img2, kp2, matches):

        # BGR (142, 255, 132) light blue
        # (255, 102, 51) light green
        self.colorImageM = cv2.drawMatchesKnn(img1,
                                              kp1,
                                              img2,
                                              kp2[0],
                                              matches[0:1],
                                              None,
                                              flags=2,
                                              matchColor=(142, 255, 132))
        #cv2.imwrite('prueba.png', self.colorImageM)
        self.colorImageM = cv2.resize(self.colorImageM, (700, 240))
        self.colorImageM = cv2.cvtColor(self.colorImageM, cv2.COLOR_BGR2RGB)
        self.visorM.set_open_cv_imageColor(self.colorImageM)
        self.visorM.update()

    def addAction(self):
        if len(self.objectList) != 3:
            self.addObject.show()
        else:
            message = QtWidgets.QMessageBox()
            message.about(
                None, 'Error',
                'Error adding object: Maximum number of objects reached.')

    def addOkAction(self):
        self.addObject.hide()

        if self.actionReady is True:
            #Get coordinates and size of the selected rectangle
            y_OffSet = self.imageWindow.y()
            x_OffSet = self.imageWindow.x()
            height = self.imageWindow.height()
            width = self.imageWindow.width()

            #Get the relevant slice of the source image
            crop_img = copy.copy(self.grayImage[y_OffSet:y_OffSet + height,
                                                x_OffSet:x_OffSet + width])

            #Add the image to the comboBox and the list
            imgName = self.addObject.lineEdit.text()
            image = ImageObject(imgName, crop_img, self.orb)
            kp, desc, valid = image.returnKpDes()

            if valid is True:
                self.imageList.append(image)
                self.mapObjects[imgName] = self.imageList[-1]
                self.objectList.addItem(imgName)
                #Get the image descriptors and add them to the descriptor collection

                print("DESC:")
                auxList = []
                for i in desc:
                    print(len(i))
                    auxList.append(i)
                self.bf.add(auxList)
                print("KP:")
                for i in kp:
                    print(len(i))
                    self.ObjectKeyPointList.append([i])
            else:
                message = QtWidgets.QMessageBox()
                message.about(
                    None, 'Error',
                    'Error adding object: The selected object is not descriptive enough.'
                )

    def renameAction(self):
        self.renameObject.show()

    def renameOkAction(self):
        self.renameObject.hide()

    def removeAction(self):
        if self.objectList.currentIndex() is not -1:
            del self.imageList[self.objectList.currentIndex()]
        self.objectList.removeItem(self.objectList.currentIndex())
        for i in range(self.objectList.currentIndex(),
                       self.objectList.currentIndex() + 2, 1):
            if i is not None:
                del self.imageKeypointList[i]
        #TODO: Regenerar bien listas de descriptores y keypoints
        self.bf.clear()
        for i in self.imageList:
            _, des, _ = i.returnKpDes()
            for i in des:
                self.bf.add([i])

    def captureButtonAction(self):
        if self.captureState is False:
            self.capture = VideoCapture(0)
            self.captureButton.setChecked(True)
            self.captureButton.setText("Stop Capture")
            self.captureState = True

        else:
            self.captureState = False
            self.captureButton.setChecked(False)
            self.captureButton.setText("Start Capture")

    def selectWindow(self, p, w, h):
        if w > 0 and h > 0:
            pEnd = QPointF()
            self.imageWindow.setX(p.x() - w / 2)
            if self.imageWindow.x() < 0:
                self.imageWindow.setX(0)
            self.imageWindow.setY(p.y() - h / 2)
            if self.imageWindow.y() < 0:
                self.imageWindow.setY(0)
            pEnd.setX(p.x() + w / 2)
            if pEnd.x() >= 320:
                pEnd.setX(319)
            pEnd.setY(p.y() + h / 2)
            if pEnd.y() >= 240:
                pEnd.setY(239)
            self.imageWindow.setWidth(pEnd.x() - self.imageWindow.x())
            self.imageWindow.setHeight(pEnd.y() - self.imageWindow.y())
            self.winSelected = True

    def deSelectWindow(self):
        self.winSelected = False
        self.actionReady = True

    def timerLoop(self):
        if (self.captureState == True and self.capture.isOpened() == True):
            ret, self.grayImage = self.capture.read()
            if ret is False:
                self.capture.release()
                self.captureState = False
                self.grayImage = np.zeros((240, 320), np.uint8)
                self.grayImageDest = np.zeros((240, 320), np.uint8)
                self.timer.stop()
                self.timer.start(16)
                return
            self.grayImage = cv2.resize(self.grayImage, (320, 240))
            self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)
            #print(self.grayImage.shape)
            kp = self.orb.detect(self.grayImage, None)
            kp, des = self.orb.compute(self.grayImage, kp)
            self.grayImageDest = copy.copy(self.grayImage)
            self.grayImageDest = cv2.drawKeypoints(
                self.grayImage,
                kp,
                self.grayImageDest,
                color=(255, 255, 255),
                flags=cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG)
            self.calculateMatches()
            #print(matches)
            #print(keypoints)
        if self.winSelected:
            self.visorS.drawSquare(self.imageWindow, Qt.green)

        # FIXED: astype is needed to convert the cv type to the qt expected one
        self.visorS.set_open_cv_image(self.grayImage)
        # FIXED: astype is needed to convert the cv type to the qt expected one

        self.visorD.set_open_cv_image(self.grayImageDest)
        self.visorS.update()
        self.visorD.update()

        self.visorM.set_open_cv_imageColor(self.colorImageM)
        self.visorM.update()
Beispiel #7
0
    def __init__(self):
        super(Ui_MainWindow, self).__init__()

        ##################      UI loading      ##################

        uic.loadUi('mainwindow.ui', self)
        #uic.loadUi('Practica 5/mainwindow.ui', self)

        ##########################################################

        self.imageWindow = QRect()

        self.winSelected = False
        self.actionReady = False
        self.openVideo = False
        self.bothImg = False

        self.disparity = np.zeros((240, 320), np.float32)
        #self.timer = QTimer()
        #self.timer.timeout.connect(self.timerExpired)

        ##################      Image arrays and viewer objects     ##################

        # FIXED: Opencv images where created with wrong width height values (switched) so the copy failed 
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        self.grayImage = np.zeros((240, 320), np.uint8)
        # self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)
        self.imgS = QImage(320, 240, QImage.Format_RGB888)
        self.visorS = ImgViewer(320, 240, self.imgS, self.imageFrameS)

        # FIXED: original removed 2 of the 3 chanels with the np.zeros

        self.grayImageDest = np.zeros((240,320), np.uint8)
        self.imgD = QImage(320, 240, QImage.Format_RGB888)
        self.visorD = ImgViewer(320, 240, self.imgD, self.imageFrameD)

        self.estimDispImg = np.zeros((240, 320), np.uint8)
        self.imgS_2 = QImage(320, 240, QImage.Format_RGB888)
        self.visorS_2 = ImgViewer(320, 240, self.imgS, self.imageFrameS_2)
        
        self.realDispImg = np.zeros((240,320), np.uint8)
        self.imgD_2 = QImage(320, 240, QImage.Format_RGB888)
        self.visorD_2 = ImgViewer(320, 240, self.imgD, self.imageFrameD_2)

        ##############################################################################

        ##################      Buttons     ##################

        self.loadButton_1.clicked.connect(self.loadAction)
        self.loadButton_2.clicked.connect(self.loadAction2)
        self.loadTruth_Button.clicked.connect(self.loadGroundTruth)
        self.initDisparity_button.clicked.connect(self.initializeDisparity)
        self.propDisparity_button.clicked.connect(self.propDispAction)
        self.visorS_2.windowSelected.connect(self.dispClick)
        #self.spinBoxDifference.valueChanged.connect(self.fillImgRegions)
        self.checkBoxRange.stateChanged.connect(self.checkBoxAction)
        self.kernelSpinBox.valueChanged.connect(self.kernelAction)
        self.iterationSpinBox.valueChanged.connect(self.iterationAction)
        self.kernel = 0
        self.iterations = 0
        self.goodCorners = []
        self.notSoGoodCorners = []

        ######################################################
        
        ##############################################################

        self.edges = np.zeros((240, 320), np.int8)
        self.imgRegions = np.full((240, 320),-1, dtype = np.int32)
        self.listRegions = []
        self.origWidth = 0
        self.fixedPoints = np.zeros((240, 320), dtype = bool)
        self.shiftedPoints = np.zeros((240, 320), dtype = bool)
Beispiel #8
0
class Ui_MainWindow(QtWidgets.QMainWindow):

    def __init__(self):
        super(Ui_MainWindow, self).__init__()

        ##################      UI loading      ##################

        uic.loadUi('mainwindow.ui', self)
        #uic.loadUi('Practica 5/mainwindow.ui', self)

        ##########################################################

        self.imageWindow = QRect()

        self.winSelected = False
        self.actionReady = False
        self.openVideo = False
        self.bothImg = False

        self.disparity = np.zeros((240, 320), np.float32)
        #self.timer = QTimer()
        #self.timer.timeout.connect(self.timerExpired)

        ##################      Image arrays and viewer objects     ##################

        # FIXED: Opencv images where created with wrong width height values (switched) so the copy failed 
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        self.grayImage = np.zeros((240, 320), np.uint8)
        # self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)
        self.imgS = QImage(320, 240, QImage.Format_RGB888)
        self.visorS = ImgViewer(320, 240, self.imgS, self.imageFrameS)

        # FIXED: original removed 2 of the 3 chanels with the np.zeros

        self.grayImageDest = np.zeros((240,320), np.uint8)
        self.imgD = QImage(320, 240, QImage.Format_RGB888)
        self.visorD = ImgViewer(320, 240, self.imgD, self.imageFrameD)

        self.estimDispImg = np.zeros((240, 320), np.uint8)
        self.imgS_2 = QImage(320, 240, QImage.Format_RGB888)
        self.visorS_2 = ImgViewer(320, 240, self.imgS, self.imageFrameS_2)
        
        self.realDispImg = np.zeros((240,320), np.uint8)
        self.imgD_2 = QImage(320, 240, QImage.Format_RGB888)
        self.visorD_2 = ImgViewer(320, 240, self.imgD, self.imageFrameD_2)

        ##############################################################################

        ##################      Buttons     ##################

        self.loadButton_1.clicked.connect(self.loadAction)
        self.loadButton_2.clicked.connect(self.loadAction2)
        self.loadTruth_Button.clicked.connect(self.loadGroundTruth)
        self.initDisparity_button.clicked.connect(self.initializeDisparity)
        self.propDisparity_button.clicked.connect(self.propDispAction)
        self.visorS_2.windowSelected.connect(self.dispClick)
        #self.spinBoxDifference.valueChanged.connect(self.fillImgRegions)
        self.checkBoxRange.stateChanged.connect(self.checkBoxAction)
        self.kernelSpinBox.valueChanged.connect(self.kernelAction)
        self.iterationSpinBox.valueChanged.connect(self.iterationAction)
        self.kernel = 0
        self.iterations = 0
        self.goodCorners = []
        self.notSoGoodCorners = []

        ######################################################
        
        ##############################################################

        self.edges = np.zeros((240, 320), np.int8)
        self.imgRegions = np.full((240, 320),-1, dtype = np.int32)
        self.listRegions = []
        self.origWidth = 0
        self.fixedPoints = np.zeros((240, 320), dtype = bool)
        self.shiftedPoints = np.zeros((240, 320), dtype = bool)

        ##############################################################

    '''
    What we have to do is fill each region with a value.
    Iterate over the whole image. If we find a point that doesn't have a region we call floodfill
    Floodfill will fill the region using the mask with a value and give us a rectangle. Using those,
    We iterate over that rectangle and add the points with that value to imgRegions, 
    so we don't iterate multiple times over the same region. After we have done that, we regenerate the mask
    to avoid having different regions with the same value.  
    '''
    
    def kernelAction(self):
        self.kernel = self.kernelSpinBox.value()
        print("kernel: " , self.kernel)
    def iterationAction(self):
        self.iterations = self.iterationSpinBox.value()
        print("iterations: " , self.iterations)

    def checkBoxAction(self):
        if self.checkBoxRange.isChecked():
            self.showCorners()

    def showCorners(self):
        auxImg = cv2.cvtColor(self.grayImage, cv2.COLOR_GRAY2RGB)
        auxImg2 = cv2.cvtColor(self.grayImageDest, cv2.COLOR_GRAY2RGB)
        
        auxCorners = self.calculateCorners(5)
        unMatchedCorners = np.zeros((240, 320), np.uint8)

        green = [39, 225, 20]
        red = [207, 4, 44]

        print("shape"+ str(auxCorners.shape))

        for i in range(1,239,1):
            for j in range(1,319,1):
                if auxCorners[i][j] == True and self.fixedPoints[i][j] == False:
                    unMatchedCorners[i][j] = True
                else:
                    unMatchedCorners[i][j] = False

        for i in range(1,237,1):
            for j in range(1,317,1):
                if self.fixedPoints[i][j] == True:
                    auxImg[i][j] = green
                    for k in range(0,4):
                        auxImg[i-k][j-k] = green
                        auxImg[i+k][j+k] = green
                        auxImg[i-k][j+k] = green
                        auxImg[i+k][j-k] = green
                if unMatchedCorners[i][j] == True:
                    auxImg[i][j] = red
                    for k in range(0,4):
                        auxImg[i-k][j-k] = red
                        auxImg[i+k][j+k] = red
                        auxImg[i-k][j+k] = red
                        auxImg[i+k][j-k] = red

        for i in range(1,237,1):
            for j in range(1,317,1):
                if self.shiftedPoints[i][j] == True:
                    auxImg2[i][j] = green
                    for k in range(0,4):
                        auxImg2[i-k][j-k] = green
                        auxImg2[i+k][j+k] = green
                        auxImg2[i-k][j+k] = green
                        auxImg2[i+k][j-k] = green
                        
        #cv2.imshow('img', auxImg)
        #cv2.waitKey(0)
        #cv2.destroyAllWindows()

        #plt.subplot(121),plt.imshow(auxCorners,cmap = 'gray')
        #plt.show()

        self.visorS.set_open_cv_image(auxImg)
        self.visorS.update()
        self.visorD.set_open_cv_image(auxImg2)
        self.visorD.update()


    def calculateCorners(self, w):
        
        dst = cv2.cornerHarris(self.grayImage, 3, 3, 0.04)
        
        self.notSoGoodCorners = dst

        threshArr = (dst > 1e-6)
        #threshArr = (dst > 1e-6)
        
        #List of good corners. Contains HarrisValue, i, j, Deleted.
        cornerList = [] 
        for i in range(240):
            for j in range(320):
                if threshArr[i][j] == True:
                    cornerList.append([dst[i][j], i, j, False])
        #cornerList.sort(key = lambda x: x[0], reverse = True)
        cornerList = sorted(cornerList, reverse = True, key=lambda x: x[0])
        for i in range(len(cornerList)): 
            if cornerList[i][3]==False:
                for j in range(i+1, len(cornerList), 1):
                    if cornerList[j][3]==False:
                        XdistSq = (cornerList[i][1]-cornerList[j][1]) ** 2
                        YdistSq = (cornerList[i][2]-cornerList[j][2]) ** 2
                        dist = math.sqrt(XdistSq+YdistSq)
                        if dist < 3:
                            cornerList[j][3] = True

        for corner in cornerList:
            if corner[3]==True:
                threshArr[corner[1]][corner[2]] = False
        #self.goodCorners = copy.deepcopy(dst)
        self.calculateDisparityCorners(threshArr, w)
        
        #Convert the boolean array into a black and white one
        #self.cornersImg = threshArr * np.full((240,320), 255 ,np.uint8)
        #self.visorS_2.set_open_cv_image(self.cornersImg)
        #self.visorS_2.update()

        #plt.subplot(121),plt.imshow(auxMatrix,cmap = 'gray')
        #plt.show()
        return threshArr


    def calculateDisparityCorners(self, threshArr, w):
        """
        Calculates the disparity value for the fixed points.
        For each of them, it tries to match a region of w points around the
        corner with the horizontal line at that height. When it finds them,
        it calculates the disparity value for the most likely point
        and sets it in the disparity array.
        """
        cornerSquare = np.zeros((2*w+1,2*w+1), dtype=np.uint8)
        method = cv2.TM_CCOEFF_NORMED
        
        for i in range(5,235,1):
            for j in range(5,315,1):
                if threshArr[i][j] == True:
                    yl = i 
                    xl = j
                    for k in range (-w, w+1, 1):
                        if(i+k >= 0 and i+k <240):
                            for l in range(-w, w+1, 1):
                                if (j+l >= 0 and j+l < 320):
                                    cornerSquare[w+k][w+l] = self.grayImage[i+k][j+l]
                    line, heightDiff = self.getEpipolarLine(w, yl)
                    #print("shapes: " , line.shape, cornerSquare.shape)
                    if heightDiff >= 0:
                        res = cv2.matchTemplate(line, cornerSquare, method)
                        
                        #TODO: Check if max_val is good
                        min_val, max_val, minLoc , maxLoc = cv2.minMaxLoc(res)
                        #print("TEST", maxLoc)
                        if (max_val > 0.95):
                            self.fixedPoints[i][j] = True
                            self.disparity[i][j] = xl - (maxLoc[0] + w)

        for i in range(1,239,1):
            for j in range(1,319,1):
                if self.fixedPoints[i][j] == True:
                    shift = self.disparity[i][j]
                    self.shiftedPoints[i][j-int(shift)] = self.fixedPoints[i][j]
                        
                    '''
                    print("Minimum value: ", str(min_val))
                    print("Maximum value: ", str(max_val))
                    print("Minimum location: ", str(minLoc))
                    print("Maximum location: ", str(maxLoc))
                    '''
        if self.checkBoxCorners.isChecked() == True:
            rightCorners = self.calculateGoodCornersRight(w)
            self.shiftedPoints = np.logical_and(self.shiftedPoints, rightCorners)
            
                    
        
        return threshArr

    def calculateGoodCornersRight(self, w):

        dst = cv2.cornerHarris(self.grayImageDest, 3, 3, 0.04)


        threshArr = (dst > 1e-6)
        #threshArr = (dst > 1e-6)
        
        #List of good corners. Contains HarrisValue, i, j, Deleted.
        cornerList = [] 
        for i in range(240):
            for j in range(320):
                if threshArr[i][j] == True:
                    cornerList.append([dst[i][j], i, j, False])
        #cornerList.sort(key = lambda x: x[0], reverse = True)
        cornerList = sorted(cornerList, reverse = True, key=lambda x: x[0])
        for i in range(len(cornerList)): 
            if cornerList[i][3]==False:
                for j in range(i+1, len(cornerList), 1):
                    if cornerList[j][3]==False:
                        XdistSq = (cornerList[i][1]-cornerList[j][1]) ** 2
                        YdistSq = (cornerList[i][2]-cornerList[j][2]) ** 2
                        dist = math.sqrt(XdistSq+YdistSq)
                        if dist < 3:
                            cornerList[j][3] = True

        for corner in cornerList:
            if corner[3]==True:
                threshArr[corner[1]][corner[2]] = False
        #self.goodCorners = copy.deepcopy(dst)

        cornerSquare = np.zeros((2*w+1,2*w+1), dtype=np.uint8)
        method = cv2.TM_CCOEFF_NORMED
        rightCorners = np.full((240,320), False, dtype = bool)
        rightDisp = np.full((240, 320), 0.0, dtype = np.float32)
        for i in range(5,235,1):
            for j in range(5,315,1):
                if threshArr[i][j] == True:
                    yl = i 
                    xl = j
                    for k in range (-w, w+1, 1):
                        if(i+k >= 0 and i+k <240):
                            for l in range(-w, w+1, 1):
                                if (j+l >= 0 and j+l < 320):
                                    cornerSquare[w+k][w+l] = self.grayImageDest[i+k][j+l]
                    line, heightDiff = self.getEpipolarLine(w, yl)
                    #print("shapes: " , line.shape, cornerSquare.shape)
                    if heightDiff >= 0:
                        res = cv2.matchTemplate(line, cornerSquare, method)
                        
                        #TODO: Check if max_val is good
                        min_val, max_val, minLoc , maxLoc = cv2.minMaxLoc(res)
                        #print("TEST", maxLoc)
                        if (max_val > 0.95):
                            rightCorners[i][j] = True
                            rightDisp[i][j] = xl - (maxLoc[0] + w)
        

        return rightCorners
        

    def getEpipolarLine(self, w, yl):
        if yl-w < 0:
            return self.grayImageDest[0:yl+w + 1], yl-w
        elif yl+w >= 240:
            return self.grayImageDest[yl-w:239], yl-w
        else:
            return self.grayImageDest[yl-w:yl+w + 1], yl-w

    def calculateDiff(self):

        count = 0
        percentDiff = 0.0
        for i in range(240):
            for j in range(320):
                if self.estimDispImg[i][j] != 0:
                      count += 1
                      percentDiff += abs(self.realDispImg[i][j]-self.estimDispImg[i][j])/255.0

        self.dispPerc.display(percentDiff/count)

    def fillImgRegions(self):

        regionID = 1
        
        self.edges = cv2.Canny(self.grayImage,40,120)
        
        self.mask = cv2.copyMakeBorder(self.edges, 1,1,1,1, cv2.BORDER_CONSTANT, value = 255)
        floodFlags = cv2.FLOODFILL_MASK_ONLY | 4 | 1 << 8
        
        for i in range(0, 240, 1):
            for j in range(0, 320, 1):
                #We found a new region:
                
                if self.imgRegions[i][j] == -1: #Optimize this, it's the part that makes it stupid slow
                    if self.edges[i][j] == 0:
                    
                        _, _, newMask, rect = cv2.floodFill(self.grayImage, self.mask, (j,i), 1, loDiff = 10, 
                        upDiff = 10, flags = floodFlags)
                    
                        newRegion = region(regionID, rect)

                        for k in range (rect[0], rect[0] + rect[2], 1):
                            for l in range(rect[1], rect[1] + rect[3], 1):
                                if newMask[l+1][k+1] == 1 and self.imgRegions[l][k] == -1:
                                    self.imgRegions[l][k] = regionID
                        
                        self.listRegions.append(copy.deepcopy(newRegion))

                        regionID += 1
                    #self.mask = cv2.copyMakeBorder(self.edges, 1,1,1,1, cv2.BORDER_CONSTANT, value = 255)
        
        for i in range(1,239,1):
            for j in range(1,319,1):
                if self.imgRegions[i][j] == -1:
                    for k in range(-1,2,1):
                        for l in range(-1,2,1):
                            if self.imgRegions[i+k][j+l] != -1 and self.imgRegions[i][j] == -1:
                                self.imgRegions[i][j] = self.imgRegions[i+k][j+l]
        
        #plt.subplot(121),plt.imshow(self.imgRegions,cmap = 'gray')
        #plt.show()   
        #self.visorD.set_open_cv_image(self.grayImageDest)
        #self.visorD.update()
        #self.imgRegions = np.full((240, 320), -1, dtype=np.int32)

    def initializeDisparity(self):

        self.calculateCorners(5)
        
        self.fillImgRegions()   
        #print(len(self.listRegions))
        for i in range(240):
            for j in range(320):
                if self.disparity[i][j] != 0:
                    #print(self.imgRegions[i][j]-1)
                    region = self.listRegions[self.imgRegions[i][j]-1]
                    region.addPoint(self.disparity[i][j])
                    #self.listRegions[self.imgRegions[i][j]-1].addPoint(self.disparity[i][j])
        
        for i in self.listRegions:
            i.calcAverage()
        #print(np.amax(self.imgRegions))
        for i in range(240):
            for j in range(320):
                if self.disparity[i][j] == 0:
                    #print("max reg = ", len(self.listRegions), " index= ", self.imgRegions[i][j])
                    self.disparity[i][j] = self.listRegions[self.imgRegions[i][j]-1].returnAverage()
        
        self.showDisparity()
        self.calculateDiff()

    def propDispAction(self):

        for i in range(self.iterations):
            self.propagateDisparity(self.kernel)


        self.showDisparity()
        self.calculateDiff()
    '''
    def timerExpired(self):
        self.propagateDisparity(1)
    '''

    def propagateDisparity(self, envWidth):
        
        '''
        contTrue = 0
        contFalse = 0

        for i in range(1,240,1):
            for j in range(1,320,1):
                if self.fixedPoints[i][j] == True:
                    contTrue += 1
                else:
                    contFalse += 1

        print("contTrue: " , contTrue)
        print("contFalse: " , contFalse)
        '''
        
        for i in range(envWidth, 240-envWidth):
            for j in range(envWidth, 320-envWidth):
                if self.fixedPoints[i][j] == False: #To avoid changing fixed points
                    avgDisp = 0.0
                    count = 0
                    origRegion = self.imgRegions[i][j]
                    #todo cambiar
                    for k in range(-envWidth,envWidth+1,1):
                        #todo cambiar
                        for l in range(-envWidth,envWidth+1,1):
                            #To avoid taking into account the point itself or adding points from other regions
                            if self.imgRegions[i+k][j+l] == origRegion: 
                                #print("origRegion")
                                avgDisp += self.disparity[i+k][j+l]
                                count += 1
                    if count != 0:
                        self.disparity[i][j] = float(avgDisp/count)
                        #print("disparity i,j: " , i , j , self.disparity[i][j])
        

    def showDisparity(self):
        for i in range(240):
            for j in range(320):
                value = 3*self.disparity[i][j]*self.origWidth/320
                if value > 255:
                    value = 255
                self.estimDispImg[i][j] = value
        
        self.visorS_2.set_open_cv_image(cv2.cvtColor(self.estimDispImg, cv2.COLOR_GRAY2RGB))
        self.visorS_2.update()

    def dispClick(self, point, posX, posY):
        X = int(point.x() - posX/2)
        if X < 0: 
            X = 0
        Y = int(point.y()-posY/2)
        if Y < 0:
            Y = 0
        #print(self.disparity[Y][X])
        #print(self.estimDispImg[Y][X])
        self.estimatedDisp.display(self.estimDispImg[Y][X])
        self.trueDisp.display(self.realDispImg[Y][X])

    def loadAction(self):
        imgPath, _ = QFileDialog.getOpenFileName()
        
        if imgPath != "":
            self.grayImage = cv2.imread(imgPath)
            self.origWidth = self.grayImage.shape[1]
            self.grayImage = cv2.resize(self.grayImage, (320, 240))
            self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)
            
                
            
            self.visorS.set_open_cv_image(cv2.cvtColor(self.grayImage, cv2.COLOR_GRAY2RGB))
            self.visorS.update()

    def loadAction2(self):
        imgPath, _ = QFileDialog.getOpenFileName()
        
        if imgPath != "":
            self.grayImageDest = cv2.imread(imgPath)
            self.grayImageDest = cv2.resize(self.grayImageDest, (320, 240))
            self.grayImageDest = cv2.cvtColor(self.grayImageDest, cv2.COLOR_BGR2GRAY)
            
            self.visorD.set_open_cv_image(cv2.cvtColor(self.grayImageDest, cv2.COLOR_GRAY2RGB))
            
            self.visorD.update()
    
    def loadGroundTruth(self):
        imgPath, _ = QFileDialog.getOpenFileName()
    
        if imgPath != "":
            self.realDispImg = cv2.imread(imgPath)
            self.realDispImg = cv2.resize(self.realDispImg, (320, 240))
            self.realDispImg = cv2.cvtColor(self.realDispImg, cv2.COLOR_BGR2GRAY)
            self.visorD_2.set_open_cv_image(cv2.cvtColor(self.realDispImg, cv2.COLOR_GRAY2RGB))
Beispiel #9
0
class Ui_MainWindow(QtWidgets.QMainWindow):
    def __init__(self):
        super(Ui_MainWindow, self).__init__()

        ##################      UI loading      ##################

        #uic.loadUi('mainwindow.ui', self)
        uic.loadUi('Practica 4/mainwindow.ui', self)

        ##########################################################

        self.capture = VideoCapture(0)
        self.captureState = True
        self.captureButtonAction()
        self.colorState = False
        self.imageWindow = QRect()

        self.winSelected = False
        self.actionReady = False
        self.openVideo = False

        # Timer to control the capture.
        self.timer = QTimer()
        self.timer.timeout.connect(self.timerLoop)
        self.timer.start(16)

        ##################      Image arrays and viewer objects     ##################

        # FIXED: Opencv images where created with wrong width height values (switched) so the copy failed
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        self.grayImage = np.zeros((240, 320), np.uint8)
        self.colorImage = np.zeros((240, 320, 3), np.uint8)
        # self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)
        self.imgS = QImage(320, 240, QImage.Format_RGB888)
        self.visorS = ImgViewer(320, 240, self.imgS, self.imageFrameS)

        # FIXED: original removed 2 of the 3 chanels with the np.zeros

        self.grayImageDest = np.zeros((240, 320), np.uint8)
        self.colorImageDest = np.zeros((240, 320, 3), np.uint8)
        self.imgD = QImage(320, 240, QImage.Format_RGB888)
        self.visorD = ImgViewer(320, 240, self.imgD, self.imageFrameD)

        ##############################################################################

        ##################      Buttons     ##################

        self.colorButton.clicked.connect(self.colorButtonAction)
        self.captureButton.clicked.connect(self.captureButtonAction)
        self.loadButton.clicked.connect(self.loadAction)
        self.spinBoxDifference.valueChanged.connect(self.spinBoxAction)

        ######################################################

        ##############################################################

        self.edges = np.zeros((240, 320), np.int8)
        self.imgRegions = np.full((240, 320), -1, dtype=np.int32)
        self.listRegions = []

        ##############################################################

    def spinBoxAction(self):
        if self.colorState is True:
            self.fillImgRegions()
        else:
            self.fillImgRegionsColor()

    '''
    What we have to do is fill each region with a value.
    Iterate over the whole image. If we find a point that doesn't have a region we call floodfill
    Floodfill will fill the region using the mask with a value and give us a rectangle. Using those,
    We iterate over that rectangle and add the points with that value to imgRegions, 
    so we don't iterate multiple times over the same region. After we have done that, we regenerate the mask
    to avoid having different regions with the same value.  
    '''

    def fillImgRegions(self):

        #print("principio" + str(self.imgRegions))

        #np.set_printoptions(threshold = np.inf)

        regionID = 1
        regionList = []
        #print("imagen: " + str(self.grayImage.shape))
        # self.printNumpyArray(self.grayImage)
        self.edges = cv2.Canny(self.grayImage, 40, 120)

        # print("---")
        #print("bordes: " + str(self.edges))
        # print("Stop1")
        # self.printNumpyArray(self.edges)
        self.mask = cv2.copyMakeBorder(self.edges,
                                       1,
                                       1,
                                       1,
                                       1,
                                       cv2.BORDER_CONSTANT,
                                       value=255)
        # print(self.mask.shape)
        # print("Stop")
        # self.printNumpyArray(self.mask)
        #print("borders shape: " + str(self.mask.shape))
        # print("---")
        # print(self.mask)
        '''
        print("Edge size:" + str(self.edges.shape))
        print("Image shape" + str(self.grayImage.shape))
        print("Regions shape" + str(self.imgRegions.shape))
        print("We got here")
        #plt.subplot(121),plt.imshow(self.edges,cmap = 'gray')
        #plt.show()
        '''
        dialogValue = self.spinBoxDifference.value()
        print(dialogValue)
        if self.checkBoxRange.isChecked() == True:
            floodFlags = cv2.FLOODFILL_MASK_ONLY | 4 | 1 << 8
        else:
            floodFlags = cv2.FLOODFILL_MASK_ONLY | 4 | cv2.FLOODFILL_FIXED_RANGE | 1 << 8

        for i in range(0, 240, 1):
            for j in range(0, 320, 1):
                # We found a new region:

                # Optimize this, it's the part that makes it stupid slow
                if self.imgRegions[i][j] == -1:
                    if self.edges[i][j] == 0:

                        _, _, newMask, rect = cv2.floodFill(self.grayImage,
                                                            self.mask, (j, i),
                                                            1,
                                                            loDiff=dialogValue,
                                                            upDiff=dialogValue,
                                                            flags=floodFlags)
                        print(rect)
                        newRegion = region(regionID, rect)

                        for k in range(rect[0], rect[0] + rect[2], 1):
                            for l in range(rect[1], rect[1] + rect[3], 1):
                                if newMask[l + 1][k +
                                                  1] == 1 and self.imgRegions[
                                                      l][k] == -1:
                                    self.imgRegions[l][k] = regionID
                                    newRegion.addPoint(self.grayImage[l][k])
                        newRegion.calcAverage()
                        regionList.append(copy.deepcopy(newRegion))

                        regionID += 1
                    #self.mask = cv2.copyMakeBorder(self.edges, 1,1,1,1, cv2.BORDER_CONSTANT, value = 255)

        for i in range(1, 239, 1):
            for j in range(1, 319, 1):
                if self.imgRegions[i][j] == -1:
                    for k in range(-1, 2, 1):
                        for l in range(-1, 2, 1):
                            if self.imgRegions[i + k][
                                    j +
                                    l] != -1 and self.imgRegions[i][j] == -1:
                                self.imgRegions[i][j] = self.imgRegions[i +
                                                                        k][j +
                                                                           l]

        if self.checkBoxMerge.isChecked() is True:
            print("Merging")
            for i in range(1, 239, 1):
                for j in range(1, 319, 1):
                    found = False
                    for k in range(-1, 2, 1):
                        if found is True:
                            break
                        for l in range(-1, 2, 1):
                            if found is True:
                                break
                            if self.imgRegions[i][j] != self.imgRegions[i +
                                                                        k][j +
                                                                           l]:
                                regionList[self.imgRegions[i][j] -
                                           1].addFrontierPoint([
                                               i, j,
                                               self.imgRegions[i + k][j + l]
                                           ])
                                #print("Point coords: ", i, " ", j, " Region ID: ", self.imgRegions[i][j])
            for i in regionList:
                if i.deleted == False:
                    borderRegions = i.regionsInBorder()
                    for j in borderRegions:
                        otherRegion = regionList[j - 1]
                        if i.regionSize() < otherRegion.regionSize():
                            smallestRegion = i.id
                            biggest = j
                        else:
                            smallestRegion = j
                            biggest = i.id
                        percentageOfBorder = regionList[smallestRegion -
                                                        1].percentageOfBorder(
                                                            self.edges,
                                                            biggest)
                        percentageOfFrontier = regionList[
                            smallestRegion - 1].percentageOfFrontier(biggest)
                        if percentageOfBorder > 0.4 and percentageOfFrontier > 0.4:
                            for k in range(240):
                                for l in range(320):
                                    if self.imgRegions[k][l] == smallestRegion:
                                        self.imgRegions[k][l] = biggest
                            regionList[biggest - 1].mergeRegion(
                                regionList[smallestRegion - 1])
                            regionList[smallestRegion - 1].deleted = True
                        #regionList.pop(smallestRegion-1)
        ''' 
            Lo que tengo que hacer:
            Para cada región, mirar su frontera. Para cada valor distinto que haya, mirar cual de las dos es más pequeña.
            Para la más pequeña, mirar si el número de puntos de ese valor es mayor de un porcentaje y si no muchos de esos puntos
            pertenecen a un borde de canny. Si es así, recorrer el rectángulo de esa región y poner todos los puntos al otro valor.
        '''

        for i in range(240):
            for j in range(320):
                regionIndex = self.imgRegions[i][j] - 1
                region2 = regionList[regionIndex]
                avgGrey = region2.returnAverage()
                self.grayImageDest[i][j] = int(avgGrey)

        print("Number of regions after: ", len(regionList))

        checkBreak = False
        if self.checkBoxBorders.isChecked() == True:
            #We skip the first to avoid out of bounds. Can be done manually, or adding an if check that makes everything slow as f**k.
            for i in range(1, 239, 1):
                for j in range(1, 319, 1):
                    checkBreak = False
                    for k in range(1, -2, -1):
                        if checkBreak == True:
                            break
                        for l in range(1, -2, -1):
                            if self.imgRegions[i][j] != self.imgRegions[i +
                                                                        k][j +
                                                                           l]:
                                self.grayImageDest[i][j] = 255
                                checkBreak = True
                                break
        '''
        #Set borders to black.
        for i in range(0, 240, 1):
            for j in range(0, 320, 1):
                if self.imgRegions[i][j] == -1:
                    self.imgRegions[i][j] = 0       
        '''
        #print("Resultado: " + str(self.imgRegions))
        # print(self.imgRegions.shape)
        # print(np.unique(self.imgRegions))

        #plt.subplot(121),plt.imshow(self.imgRegions,cmap = 'gray')
        # plt.show()

        #cv2.imwrite("result.png", self.imgRegions)
        #self.grayImageDest = cv2.resize(self.grayImageDest, (320, 240))
        #self.grayImageDest = cv2.cvtColor(self.grayImageDest, cv2.COLOR_BGR2GRAY)
        self.visorD.set_open_cv_image(self.grayImageDest)
        self.visorD.update()
        self.imgRegions = np.full((240, 320), -1, dtype=np.int32)

    def fillImgRegionsColor(self):

        regionID = 1
        self.edges = cv2.Canny(self.colorImage, 40, 120)
        self.mask = cv2.copyMakeBorder(self.edges,
                                       1,
                                       1,
                                       1,
                                       1,
                                       cv2.BORDER_CONSTANT,
                                       value=255)
        '''
        #plt.subplot(121),plt.imshow(self.edges,cmap = 'gray')
        #plt.show()
        '''
        regionList = []
        dialogValue = self.spinBoxDifference.value()
        if self.checkBoxRange.isChecked() == True:
            floodFlags = cv2.FLOODFILL_MASK_ONLY | 4 | 1 << 8
        else:
            floodFlags = cv2.FLOODFILL_MASK_ONLY | 4 | cv2.FLOODFILL_FIXED_RANGE | 1 << 8

        for i in range(0, 240, 1):
            for j in range(0, 320, 1):
                # We found a new region:

                # Optimize this, it's the part that makes it stupid slow
                if self.imgRegions[i][j] == -1:
                    if self.edges[i][j] == 0:

                        dialogValueArray = [
                            dialogValue, dialogValue, dialogValue
                        ]

                        _, _, newMask, rect = cv2.floodFill(
                            self.colorImage,
                            self.mask, (j, i),
                            1,
                            loDiff=dialogValueArray,
                            upDiff=dialogValueArray,
                            flags=floodFlags)

                        newRegion = regionColor(regionID, rect)

                        for k in range(rect[0], rect[0] + rect[2], 1):
                            for l in range(rect[1], rect[1] + rect[3], 1):
                                if newMask[l + 1][k +
                                                  1] == 1 and self.imgRegions[
                                                      l][k] == -1:
                                    self.imgRegions[l][k] = regionID
                                    newRegion.addPoint(self.colorImage[l][k])

                    #This should set the piece of grayImageDest to the correct value. Maybe move outside to increase efficiency.
                    #Use imgRegions and the regionID to set each point to the correct value, that way it's only one big loop instead
                    #of many smaller overlapping ones
                        newRegion.calcAverage()
                        regionList.append(copy.deepcopy(newRegion))
                        #print(regionID)
                        regionID += 1
                    #self.mask = cv2.copyMakeBorder(self.edges, 1,1,1,1, cv2.BORDER_CONSTANT, value = 255)
        checkBreak = False
        print("Number of regions: ", len(regionList))

        for i in range(1, 239, 1):
            for j in range(1, 319, 1):
                if self.imgRegions[i][j] == -1:
                    for k in range(-1, 2, 1):
                        for l in range(-1, 2, 1):
                            if self.imgRegions[i + k][
                                    j +
                                    l] != -1 and self.imgRegions[i][j] == -1:
                                self.imgRegions[i][j] = self.imgRegions[i +
                                                                        k][j +
                                                                           l]

        if self.checkBoxMerge.isChecked() is True:
            print("Merging")
            for i in range(1, 239, 1):
                for j in range(1, 319, 1):
                    found = False
                    for k in range(-1, 2, 1):
                        if found is True:
                            break
                        for l in range(-1, 2, 1):
                            if found is True:
                                break
                            if self.imgRegions[i][j] != self.imgRegions[i +
                                                                        k][j +
                                                                           l]:
                                regionList[self.imgRegions[i][j] -
                                           1].addFrontierPoint([
                                               i, j,
                                               self.imgRegions[i + k][j + l]
                                           ])
                                #print("Point coords: ", i, " ", j, " Region ID: ", self.imgRegions[i][j])
            for i in regionList:
                if i.deleted == False:
                    borderRegions = i.regionsInBorder()
                    for j in borderRegions:
                        otherRegion = regionList[j - 1]
                        if i.regionSize() < otherRegion.regionSize():
                            smallestRegion = i.id
                            biggest = j
                        else:
                            smallestRegion = j
                            biggest = i.id
                        percentageOfBorder = regionList[smallestRegion -
                                                        1].percentageOfBorder(
                                                            self.edges,
                                                            biggest)
                        percentageOfFrontier = regionList[
                            smallestRegion - 1].percentageOfFrontier(biggest)
                        if percentageOfBorder > 0.4 and percentageOfFrontier > 0.4:
                            for k in range(240):
                                for l in range(320):
                                    if self.imgRegions[k][l] == smallestRegion:
                                        self.imgRegions[k][l] = biggest
                            regionList[biggest - 1].mergeRegion(
                                regionList[smallestRegion - 1])
                            regionList[smallestRegion - 1].deleted = True

        for i in range(240):
            for j in range(320):
                regionIndex = self.imgRegions[i][j] - 1
                region2 = regionList[regionIndex]
                avgColor = region2.returnAverage()
                self.colorImageDest[i][j] = avgColor

        if self.checkBoxBorders.isChecked() == True:
            #We skip the first to avoid out of bounds. Can be done manually, or adding an if check that makes everything slow as f**k.
            for i in range(1, 240, 1):
                for j in range(1, 320, 1):
                    checkBreak = False
                    for k in range(1, -2, -1):
                        if checkBreak == True:
                            break
                        for l in range(1, -2, -1):
                            if self.imgRegions[i][j] != self.imgRegions[i +
                                                                        k][j +
                                                                           l]:
                                self.colorImageDest[i][j] = [255, 255, 255]
                                checkBreak = True
                                break

        # TODO: When it finds a new region, add it to a list as a region object, with the rectangle for efficiency. When it iterates over the region to set the imgRegions,
        # it adds the value of the respective point in grayImage (or colorImage, whatever) to the region object. When it finishes adding the region, it returns the average value.
        # After we're done, we iterate through the list of regions, using the rectangle to be more efficient, and we set each pixel in grayImageDest that is inside that region
        # to the average value of the region. It should give us a nice image. The only thing left to do is to do *something* with the borders.
        '''
        #Set borders to black.
        for i in range(0, 240, 1):
            for j in range(0, 320, 1):
                if self.imgRegions[i][j] == -1:
                    self.imgRegions[i][j] = 0       
        '''
        #print("Resultado: " + str(self.imgRegions))
        # print(self.imgRegions.shape)
        # print(np.unique(self.imgRegions))

        #plt.subplot(121),plt.imshow(self.imgRegions,cmap = 'gray')
        # plt.show()

        #cv2.imwrite("result.png", self.imgRegions)
        #self.grayImageDest = cv2.resize(self.grayImageDest, (320, 240))
        #self.grayImageDest = cv2.cvtColor(self.grayImageDest, cv2.COLOR_BGR2GRAY)
        self.visorD.set_open_cv_imageColor(self.colorImageDest)
        self.visorD.update()
        self.imgRegions = np.full((240, 320), -1, dtype=np.int32)

    def colorButtonAction(self):
        if self.colorState == False:
            self.colorButton.setText("Gray Image")
            self.colorButton.setChecked(True)
            print("Swapping to Gray")
            self.colorState = True
        else:
            self.colorButton.setText("Color Image")
            self.colorButton.setChecked(False)
            print("Swapping to color")
            self.colorState = False

    def loadAction(self):
        imgPath, _ = QFileDialog.getOpenFileName()

        if imgPath != "":
            if self.colorState == True:
                self.grayImage = np.zeros((240, 320), np.uint8)
                self.grayImageDest = np.zeros((240, 320), np.uint8)
                self.grayImage = cv2.imread(imgPath)
                self.grayImage = cv2.resize(self.grayImage, (320, 240))
                self.grayImage = cv2.cvtColor(self.grayImage,
                                              cv2.COLOR_BGR2GRAY)
                self.fillImgRegions()
                self.visorS.set_open_cv_image(self.grayImage)

            else:
                self.colorImage = np.zeros((240, 320, 3), np.uint8)
                self.colorImageDest = np.zeros((240, 320, 3), np.uint8)
                self.colorImage = cv2.imread(imgPath)
                self.colorImage = cv2.resize(self.colorImage, (320, 240))
                self.colorImage = cv2.cvtColor(self.colorImage,
                                               cv2.COLOR_BGR2RGB)
                self.fillImgRegionsColor()
                self.visorS.set_open_cv_imageColor(self.colorImage)
        self.visorS.update()
        # self.test()

    def captureButtonAction(self):
        if self.captureState == False:
            self.capture = VideoCapture(0)
            self.captureButton.setChecked(True)
            self.captureButton.setText("Stop Capture")
            self.captureState = True

        else:
            self.captureState = False
            self.captureButton.setChecked(False)
            self.captureButton.setText("Start Capture")

    def timerLoop(self):
        if (self.captureState == True and self.capture.isOpened() == True):
            if self.colorState == True:
                ret, self.grayImage = self.capture.read()
                if ret == False:
                    self.capture.release()
                    self.captureState = False
                    self.grayImage = np.zeros((240, 320), np.uint8)
                    self.grayImageDest = np.zeros((240, 320), np.uint8)
                    self.timer.stop()
                    self.timer.start(16)
                    return
                self.grayImage = cv2.resize(self.grayImage, (320, 240))
                self.grayImage = cv2.cvtColor(self.grayImage,
                                              cv2.COLOR_BGR2GRAY)
                self.fillImgRegions()
                self.visorS.set_open_cv_image(self.grayImage)
            else:
                print("Should be here")
                ret, self.colorImage = self.capture.read()
                if ret == False:
                    self.capture.release()
                    self.captureState = False
                    self.colorImage = np.zeros((240, 320, 3))
                    self.colorImageDest = np.zeros((240, 320, 3))
                    self.timer.stop()
                    self.timer.start(16)
                    return
                self.colorImage = cv2.resize(self.colorImage, (320, 240))
                self.colorImage = cv2.cvtColor(self.colorImage,
                                               cv2.COLOR_BGR2RGB)
                self.fillImgRegionsColor()
                self.visorS.set_open_cv_imageColor(self.colorImage)

        # FIXED: astype is needed to convert the cv type to the qt expected one

        # FIXED: astype is needed to convert the cv type to the qt expected one
        self.visorS.update()
Beispiel #10
0
    def setupUi(self, MainWindow):
        MainWindow.setObjectName("MainWindow")
        MainWindow.resize(875, 378)
        #      self.ui = QWidget()
        #      uic.loadUi("mainwindow.ui", self.ui)

        self.imgPath = ""
        self.capture = VideoCapture(0)
        self.captureState = False
        self.colorState = False  #False =  color, true = gray
        self.winSelected = False
        self.warpState = False

        #Timer to control the capture.
        self.timer = QTimer()
        self.timer.timeout.connect(self.timerLoop)
        self.timer.start(16)

        #Values for window selection
        self.rectHeight = 0
        self.rectWidth = 0
        self.posX = 0
        self.posY = 0

        #Signals for window selection

        #Left image frame. Image prior to transformation
        self.imageFrameS = QtWidgets.QFrame(MainWindow)
        self.imageFrameS.setGeometry(QtCore.QRect(20, 20, 320, 240))
        self.imageFrameS.setFrameShape(QtWidgets.QFrame.StyledPanel)
        self.imageFrameS.setFrameShadow(QtWidgets.QFrame.Raised)
        self.imageFrameS.setObjectName("imageFrameS")
        # FIXED: Opencv images where created with wrong width height values (switched) so the copy failed
        # self.colorImage = np.zeros((320,240))
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        # self.colorImage = np.zeros((320,240))
        self.colorImage = np.zeros((240, 320, 3))
        self.grayImage = np.zeros((240, 320))
        self.imgLeft = QImage(320, 240, QImage.Format_RGB888)
        self.imgVisorS = ImgViewer(320, 240, self.imgLeft, self.imageFrameS)
        self.imgVisorS.windowSelected.connect(self.selectWindow)
        self.label_S = QLabel(self.imgVisorS)
        self.label_S.setObjectName("label_S")
        self.label_S.setGeometry(QRect(0, 0, 320, 240))
        self.label_S.setAttribute(Qt.WA_TransparentForMouseEvents, True)
        #TODO: Delete label, set as attribute of imgViewer
        #Isn't it the same? TODO later, it works *for now*

        #Right image frame. Image after transformation.
        self.imageFrameD = QtWidgets.QFrame(MainWindow)
        self.imageFrameD.setGeometry(QtCore.QRect(390, 20, 320, 240))
        self.imageFrameD.setFrameShape(QtWidgets.QFrame.StyledPanel)
        self.imageFrameD.setFrameShadow(QtWidgets.QFrame.Raised)
        self.imageFrameD.setObjectName("imageFrameD")
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        #self.colorImageDest = np.zeros((240,320))
        self.colorImageDest = np.zeros((240, 320, 3))
        self.grayImageDest = np.zeros((240, 320))
        self.imgRight = QImage(320, 240, QImage.Format_RGB888)
        self.imgVisorD = ImgViewer(320, 240, self.imgRight, self.imageFrameD)

        self.label_D = QLabel(self.imageFrameD)
        self.label_D.setObjectName("label_D")
        self.label_D.setGeometry(QRect(0, 0, 320, 240))

        # self.visorHistoS = ImgViewer(256, self.ui.histoFrameS.height(), self.ui.histoFrameS)
        # self.visorHistoD = ImgViewer(256, self.ui.histoFrameS.height(), self.ui.histoFrameD)

        #Capture button.
        self.captureButton = QtWidgets.QPushButton(MainWindow)
        self.captureButton.setGeometry(QtCore.QRect(740, 20, 101, 31))
        self.captureButton.setCheckable(True)
        self.captureButton.setChecked(False)
        self.captureButton.setObjectName("captureButton")
        self.captureButton.clicked.connect(self.captureButtonAction)

        #Gray/Color button.
        self.colorButton = QtWidgets.QPushButton(MainWindow)
        self.colorButton.setGeometry(QtCore.QRect(740, 60, 101, 31))
        self.colorButton.setCheckable(True)
        self.colorButton.setChecked(False)
        self.colorButton.setObjectName("colorButton")
        self.colorButton.clicked.connect(self.colorButtonAction)

        #Load from file button.
        self.loadButton = QtWidgets.QPushButton(MainWindow)
        self.loadButton.setGeometry(QtCore.QRect(740, 100, 101, 31))
        self.loadButton.setObjectName("loadButton")
        self.loadButton.clicked.connect(self.loadButtonAction)

        #Save to file button.
        self.saveButton = QtWidgets.QPushButton(MainWindow)
        self.saveButton.setGeometry(QtCore.QRect(740, 140, 101, 31))
        self.saveButton.setObjectName("saveButton")
        self.saveButton.clicked.connect(self.saveButtonAction)

        self.retranslateUi(MainWindow)
        QtCore.QMetaObject.connectSlotsByName(MainWindow)
Beispiel #11
0
class MainWindow(object):

    #path to the image, and storage of the origin and transformed image

    def setupUi(self, MainWindow):
        MainWindow.setObjectName("MainWindow")
        MainWindow.resize(875, 378)
        #      self.ui = QWidget()
        #      uic.loadUi("mainwindow.ui", self.ui)

        self.imgPath = ""
        self.capture = VideoCapture(0)
        self.captureState = False
        self.colorState = False  #False =  color, true = gray
        self.winSelected = False
        self.warpState = False

        #Timer to control the capture.
        self.timer = QTimer()
        self.timer.timeout.connect(self.timerLoop)
        self.timer.start(16)

        #Values for window selection
        self.rectHeight = 0
        self.rectWidth = 0
        self.posX = 0
        self.posY = 0

        #Signals for window selection

        #Left image frame. Image prior to transformation
        self.imageFrameS = QtWidgets.QFrame(MainWindow)
        self.imageFrameS.setGeometry(QtCore.QRect(20, 20, 320, 240))
        self.imageFrameS.setFrameShape(QtWidgets.QFrame.StyledPanel)
        self.imageFrameS.setFrameShadow(QtWidgets.QFrame.Raised)
        self.imageFrameS.setObjectName("imageFrameS")
        # FIXED: Opencv images where created with wrong width height values (switched) so the copy failed
        # self.colorImage = np.zeros((320,240))
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        # self.colorImage = np.zeros((320,240))
        self.colorImage = np.zeros((240, 320, 3))
        self.grayImage = np.zeros((240, 320))
        self.imgLeft = QImage(320, 240, QImage.Format_RGB888)
        self.imgVisorS = ImgViewer(320, 240, self.imgLeft, self.imageFrameS)
        self.imgVisorS.windowSelected.connect(self.selectWindow)
        self.label_S = QLabel(self.imgVisorS)
        self.label_S.setObjectName("label_S")
        self.label_S.setGeometry(QRect(0, 0, 320, 240))
        self.label_S.setAttribute(Qt.WA_TransparentForMouseEvents, True)
        #TODO: Delete label, set as attribute of imgViewer
        #Isn't it the same? TODO later, it works *for now*

        #Right image frame. Image after transformation.
        self.imageFrameD = QtWidgets.QFrame(MainWindow)
        self.imageFrameD.setGeometry(QtCore.QRect(390, 20, 320, 240))
        self.imageFrameD.setFrameShape(QtWidgets.QFrame.StyledPanel)
        self.imageFrameD.setFrameShadow(QtWidgets.QFrame.Raised)
        self.imageFrameD.setObjectName("imageFrameD")
        # FIXED: original removed 2 of the 3 chanels with the np.zeros
        #self.colorImageDest = np.zeros((240,320))
        self.colorImageDest = np.zeros((240, 320, 3))
        self.grayImageDest = np.zeros((240, 320))
        self.imgRight = QImage(320, 240, QImage.Format_RGB888)
        self.imgVisorD = ImgViewer(320, 240, self.imgRight, self.imageFrameD)

        self.label_D = QLabel(self.imageFrameD)
        self.label_D.setObjectName("label_D")
        self.label_D.setGeometry(QRect(0, 0, 320, 240))

        # self.visorHistoS = ImgViewer(256, self.ui.histoFrameS.height(), self.ui.histoFrameS)
        # self.visorHistoD = ImgViewer(256, self.ui.histoFrameS.height(), self.ui.histoFrameD)

        #Capture button.
        self.captureButton = QtWidgets.QPushButton(MainWindow)
        self.captureButton.setGeometry(QtCore.QRect(740, 20, 101, 31))
        self.captureButton.setCheckable(True)
        self.captureButton.setChecked(False)
        self.captureButton.setObjectName("captureButton")
        self.captureButton.clicked.connect(self.captureButtonAction)

        #Gray/Color button.
        self.colorButton = QtWidgets.QPushButton(MainWindow)
        self.colorButton.setGeometry(QtCore.QRect(740, 60, 101, 31))
        self.colorButton.setCheckable(True)
        self.colorButton.setChecked(False)
        self.colorButton.setObjectName("colorButton")
        self.colorButton.clicked.connect(self.colorButtonAction)

        #Load from file button.
        self.loadButton = QtWidgets.QPushButton(MainWindow)
        self.loadButton.setGeometry(QtCore.QRect(740, 100, 101, 31))
        self.loadButton.setObjectName("loadButton")
        self.loadButton.clicked.connect(self.loadButtonAction)

        #Save to file button.
        self.saveButton = QtWidgets.QPushButton(MainWindow)
        self.saveButton.setGeometry(QtCore.QRect(740, 140, 101, 31))
        self.saveButton.setObjectName("saveButton")
        self.saveButton.clicked.connect(self.saveButtonAction)

        self.retranslateUi(MainWindow)
        QtCore.QMetaObject.connectSlotsByName(MainWindow)

    def selectWindow(self, point, posX, posY):
        pEnd = QtCore.QPointF()
        if posX > 0 and posY > 0:
            self.posX = int(point.x() - posX / 2)
            if self.posX < 0:
                self.posX = 0
            self.posY = int(point.y() - posY / 2)
            if self.posY < 0:
                self.posY = 0
            pEnd.setX(point.x() + posX / 2)
            if pEnd.x() >= 320:
                pEnd.setX(319)
            pEnd.setY(point.y() + posY / 2)
            if pEnd.y() >= 240:
                pEnd.setY(239)
            self.rectWidth = int(pEnd.x() - self.posX + 1)
            self.rectHeight = int(pEnd.y() - self.posY + 1)
            print("Values: " + str(self.posX) + " " + str(self.posY) + " " +
                  str(self.rectWidth) + " " + str(self.rectHeight))
            self.winSelected = True

    def captureButtonAction(self):
        if self.captureState == False:
            self.captureButton.setText("Stop Capture")
            self.captureButton.setChecked(True)
            print("Started")
            self.captureState = True
        else:
            self.captureButton.setText("Start Capture")
            self.captureButton.setChecked(False)
            print("Stopped")
            self.captureState = False

    def timerLoop(self):
        if (self.captureState == True and self.capture.isOpened() == True):

            if self.colorState == False:
                ret, self.colorImage = self.capture.read()
                #print("Captured shape %s"%str(self.colorImage.shape))
                self.colorImage = cv2.resize(self.colorImage, (320, 240))
                #print("Resized shape %s"%str(self.colorImage.shape))
                self.colorImage = cv2.cvtColor(self.colorImage,
                                               cv2.COLOR_BGR2RGB)
                # FIXED: astype is needed to convert the cv type to the qt expected one
                self.imgVisorS.qimg = QImage(self.colorImage.astype(np.int8),
                                             self.colorImage.shape[1],
                                             self.colorImage.shape[0],
                                             QImage.Format_RGB888)
                #self.colorImageDest = self.colorImage
                # FIXED: astype is needed to convert the cv type to the qt expected one
                self.imgVisorD.qimg = QImage(
                    self.colorImageDest.astype(np.int8),
                    self.colorImageDest.shape[1], self.colorImageDest.shape[0],
                    QtGui.QImage.Format_RGB888)

            else:
                ret, self.grayImage = self.capture.read()
                self.grayImage = cv2.resize(self.grayImage, (320, 240))
                self.grayImage = cv2.cvtColor(self.grayImage,
                                              cv2.COLOR_BGR2GRAY)
                # FIXED: astype is needed to convert the cv type to the qt expected one
                self.imgVisorS.qimg = QImage(self.grayImage.astype(np.int8),
                                             self.grayImage.shape[1],
                                             self.grayImage.shape[0],
                                             self.grayImage.strides[0],
                                             QImage.Format_Grayscale8)
                # FIXED: astype is needed to convert the cv type to the qt expected one
                self.imgVisorD.qimg = QImage(
                    self.grayImageDest.astype(np.int8),
                    self.grayImageDest.shape[1], self.grayImageDest.shape[0],
                    QImage.Format_Grayscale8)

            #To update the warping in real time. TODO translation
            if self.warpState == True:
                rotation_matrix = cv2.getRotationMatrix2D(
                    (320 / 2, 240 / 2), -self.angleDial.value(),
                    1 + self.zoomSlider.value() / 3)
                translation_matrix = np.float32(
                    [[1, 0, self.horizontalSlider.value()],
                     [0, 1, self.verticalSlider.value()]])
                if self.colorState == False:
                    rotated_image = cv2.warpAffine(self.colorImage,
                                                   rotation_matrix, (320, 240))
                    rotated_image = cv2.warpAffine(rotated_image,
                                                   translation_matrix,
                                                   (320, 240))
                    self.colorImageDest = rotated_image
                    self.imgVisorD.qimg = QImage(
                        self.colorImageDest.astype(np.int8),
                        self.colorImageDest.shape[1],
                        self.colorImageDest.shape[0],
                        QtGui.QImage.Format_RGB888)
                else:
                    rotated_image = cv2.warpAffine(self.grayImage,
                                                   rotation_matrix, (320, 240))
                    rotated_image = cv2.warpAffine(rotated_image,
                                                   translation_matrix,
                                                   (320, 240))
                    self.grayImageDest = rotated_image
                    self.imgVisorD.qimg = QImage(
                        self.grayImageDest.astype(np.int8),
                        self.grayImageDest.shape[1],
                        self.grayImageDest.shape[0], QImage.Format_Grayscale8)
            if self.winSelected == True:
                self.imgVisorS.drawSquare(self.posX, self.posY, self.rectWidth,
                                          self.rectHeight)
            self.label_S.setPixmap(QPixmap.fromImage(self.imgVisorS.qimg))
            self.label_D.setPixmap(QPixmap.fromImage(self.imgVisorD.qimg))
            self.imgVisorS.repaint()
            self.imgVisorS.update()

    def colorButtonAction(self):
        if self.colorState == False:
            self.colorButton.setText("Gray Image")
            self.colorButton.setChecked(True)
            print("Swapping to Gray")
            self.colorState = True
        else:
            self.colorButton.setText("Color Image")
            self.colorButton.setChecked(False)
            print("Swapping to color")
            self.colorState = False

    def loadButtonAction(self):
        print("Load")
        self.imgPath, _ = QFileDialog.getOpenFileName()
        if self.captureState == True:
            self.captureButtonAction()

        self.colorImage = cv2.imread(self.imgPath)
        self.colorImage = cv2.resize(self.colorImage, (320, 240))
        self.colorImage = cv2.cvtColor(self.colorImage, cv2.COLOR_BGR2RGB)

        self.grayImage = cv2.imread(self.imgPath)
        self.grayImage = cv2.resize(self.grayImage, (320, 240))
        self.grayImage = cv2.cvtColor(self.grayImage, cv2.COLOR_BGR2GRAY)

        # TODO: remove to avoid double setting here and in the loopTimer method
        if self.colorState == False:
            self.imgLeft = QImage(self.colorImage, self.colorImage.shape[1],
                                  self.colorImage.shape[0],
                                  QImage.Format_RGB888)
        else:
            self.imgLeft = QImage(self.grayImage, self.grayImage.shape[1],
                                  self.grayImage.shape[0],
                                  QImage.Format_Grayscale8)

        self.label_S.setPixmap(QPixmap.fromImage(self.imgLeft))

        print(self.imgPath)

    def saveButtonAction(self):
        if self.colorState == False:
            saveImage = self.colorImage
            saveImage = cv2.cvtColor(saveImage, cv2.COLOR_RGB2BGR)
        else:
            saveImage = self.grayImage

        filename = QFileDialog.getSaveFileName()
        cv2.imwrite(filename, saveImage)
        print("Save")

    def resizeButtonAction(self):
        window_pos_y = copy.deepcopy(self.posY)
        window_pos_x = copy.deepcopy(self.posX)

        window_height = copy.deepcopy(self.rectHeight)
        window_width = copy.deepcopy(self.rectWidth)
        if self.colorState == False:
            #TODO: Perhaps remove useless variable?
            imageCopy = self.colorImage[window_pos_y:window_pos_y +
                                        window_height,
                                        window_pos_x:window_pos_x +
                                        window_width].copy()
            imageCopy = cv2.resize(imageCopy, (320, 240))
            self.colorImageDest = imageCopy.copy()
        else:
            resized_image = cv2.resize(
                self.grayImage[window_pos_y:window_pos_y + window_height,
                               window_pos_x:window_pos_x + window_width],
                (320, 240))
            print(resized_image.shape)
            self.grayImageDest = resized_image.copy()

    def updateHistograms(self, image, visor):
        histogram = np.array()
        channels = [0, 0]
        histoSize = 256
        grange = [0, 256]
        ranges = [grange]
        minH = 0
        maxH = 0

        # cv2.calcHist(image, 1, channels, nONE, histogram, 1, histoSize, ranges, True, False )
        histogram = cv2.calcHist([image], [0], channels, [histoSize], ranges,
                                 True, False)
        minH, maxH = cv2.minMaxLoc(histogram)

        maxY = visor.getHeight()

        for i, hVal in enumerate(histogram):
            minY = maxY - hVal * maxY / maxH
            visor(QLineF(i, minY, i, maxY), Qt.red)

    def retranslateUi(self, MainWindow):
        _translate = QtCore.QCoreApplication.translate
        MainWindow.setWindowTitle(
            _translate("MainWindow", "Proyecto de Visión Artificial"))
        self.captureButton.setText(_translate("MainWindow", "Start Capture"))
        self.colorButton.setText(_translate("MainWindow", "Color Image"))
        self.loadButton.setText(_translate("MainWindow", "Load from File"))
        self.saveButton.setText(_translate("MainWindow", "Save to File"))
Beispiel #12
0
class YUVviewer(QtWidgets.QMainWindow, Ui_YUVviewer):
    UI_SINGLE = (0xff000000)
    UI_R      = (0xff0000|UI_SINGLE)
    UI_G      = (0x00ff00|UI_SINGLE)
    UI_B      = (0x0000ff|UI_SINGLE)
    UI_Y      = (0x808080|UI_SINGLE)
    UI_U      = (0xff00ff|UI_SINGLE)
    UI_V      = (0x00ffff|UI_SINGLE)
    UI_RG53   = (((0xff0000|0x5000000)<<32)|(0x00ff00|0x3000000))
    UI_GB35   = (((0x00ff00|0x3000000)<<32)|(0x0000ff|0x5000000))
    UI_BG53   = (((0x0000ff|0x5000000)<<32)|(0x00ff00|0x3000000))
    UI_GR35   = (((0x00ff00|0x3000000)<<32)|(0xff0000|0x5000000))
    UI_RG44   = (((0xff0000|0x4000000)<<32)|(0x00ff00|0x4000000))
    UI_GB44   = (((0x00ff00|0x4000000)<<32)|(0x0000ff|0x4000000))
    UI_BG44   = (((0x0000ff|0x4000000)<<32)|(0x00ff00|0x4000000))
    UI_GR44   = (((0x00ff00|0x4000000)<<32)|(0xff0000|0x4000000))
    frameSizeTypeDict = {
        'QQCIF':   ['88', '72'],
        'QQVGA':   ['160','120'],
        'QCIF':    ['176','144'],
        'HQVGA':   ['240','160'],
        'QVGA':    ['320','240'],
        'CIF':     ['352','288'],
        'WQVGA':   ['400','240'],
        'HVGA':    ['480','320'],
        'nHD':     ['640','340'],
        'VGA':     ['640','480'],
        'WVGA':    ['800','480'],
        'SVGA':    ['800','600'],
        'qHD':     ['960','540'],
        'DVGA':    ['960','640'],
        'XGA':     ['1024','768'],
        'HD':      ['1280','720'],
        'UVGA':    ['1280','960'],
        'SXGA':    ['1280','1024'],
        'HD+':     ['1600','900'],
        'UXGA':    ['1600','1200'],
        'FHD':     ['1920','1080'],
        'WUXGA':   ['1920','1200'],
        'FHD+':    ['2160','1440'],
        'QXGA':    ['2048','1536'],
        'QHD':     ['2560','1440'],
        'WQXGA':   ['2560','1600'],
        'QSXGA':   ['2560','2048'],
        'QHD+':    ['3200','1800'],
        'QUXGA':   ['3200','2400'],
        '4K UHD':  ['3840','2160'],
        '8K UHD':  ['7680','4320'],
    }
    YUVFormat_list = {
        "YV12":      [UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,
                      UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,
                      UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,
                      UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V],
        "YU12/I420": [UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,
                      UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,
                      UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,UI_V,
                      UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U,UI_U],
        "NV21":      [UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,
                      UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,
                      UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,
                      UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U],
        "NV12":      [UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,
                      UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,UI_Y,
                      UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,
                      UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V,UI_U,UI_V],
        "YUY2/YUYV": [UI_Y,UI_U,UI_Y,UI_V],
        "YVYU":      [UI_Y,UI_V,UI_Y,UI_U],
        "UYVY":      [UI_U,UI_Y,UI_V,UI_Y],
        "4:4:4":     [UI_Y,UI_U,UI_V],
        "RGB565_L":  [UI_GB35,UI_RG53],
        "RGB565_B":  [UI_RG53,UI_GB35],
        "BGR565_L":  [UI_GR35,UI_BG53],
        "BGR565_B":  [UI_BG53,UI_GR35],
        "RGB888":    [UI_R,UI_G,UI_B],
        "BayerBG":   [UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,
                      UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R],
        "BayerGB":   [UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,
                      UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G],
        "BayerRG":   [UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,
                      UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B],
        "BayerGR":   [UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,UI_G,UI_R,
                      UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G,UI_B,UI_G],
        "BayerBG_RAW12": [UI_B,UI_G,UI_BG44,UI_B,UI_G,UI_BG44,UI_B,UI_G,UI_BG44,UI_B,UI_G,UI_BG44,UI_B,UI_G,UI_BG44,UI_B,UI_G,UI_BG44,UI_B,UI_G,UI_BG44,UI_B,UI_G,UI_BG44,
                          UI_G,UI_R,UI_GR44,UI_G,UI_R,UI_GR44,UI_G,UI_R,UI_GR44,UI_G,UI_R,UI_GR44,UI_G,UI_R,UI_GR44,UI_G,UI_R,UI_GR44,UI_G,UI_R,UI_GR44,UI_G,UI_R,UI_GR44],
        "BayerGB_RAW12": [UI_G,UI_B,UI_GB44,UI_G,UI_B,UI_GB44,UI_G,UI_B,UI_GB44,UI_G,UI_B,UI_GB44,UI_G,UI_B,UI_GB44,UI_G,UI_B,UI_GB44,UI_G,UI_B,UI_GB44,UI_G,UI_B,UI_GB44,
                          UI_R,UI_G,UI_RG44,UI_R,UI_G,UI_RG44,UI_R,UI_G,UI_RG44,UI_R,UI_G,UI_RG44,UI_R,UI_G,UI_RG44,UI_R,UI_G,UI_RG44,UI_R,UI_G,UI_RG44,UI_R,UI_G,UI_RG44],
        "BayerRG_RAW12": [UI_R,UI_G,UI_RG44,UI_R,UI_G,UI_RG44,UI_R,UI_G,UI_RG44,UI_R,UI_G,UI_RG44,UI_R,UI_G,UI_RG44,UI_R,UI_G,UI_RG44,UI_R,UI_G,UI_RG44,UI_R,UI_G,UI_RG44,
                          UI_G,UI_B,UI_GB44,UI_G,UI_B,UI_GB44,UI_G,UI_B,UI_GB44,UI_G,UI_B,UI_GB44,UI_G,UI_B,UI_GB44,UI_G,UI_B,UI_GB44,UI_G,UI_B,UI_GB44,UI_G,UI_B,UI_GB44],
        "BayerGR_RAW12": [UI_G,UI_R,UI_GR44,UI_G,UI_R,UI_GR44,UI_G,UI_R,UI_GR44,UI_G,UI_R,UI_GR44,UI_G,UI_R,UI_GR44,UI_G,UI_R,UI_GR44,UI_G,UI_R,UI_GR44,UI_G,UI_R,UI_GR44,
                           UI_B,UI_G,UI_BG44,UI_B,UI_G,UI_BG44,UI_B,UI_G,UI_BG44,UI_B,UI_G,UI_BG44,UI_B,UI_G,UI_BG44,UI_B,UI_G,UI_BG44,UI_B,UI_G,UI_BG44,UI_B,UI_G,UI_BG44],
    }
    def __init__(self):
        super(YUVviewer, self).__init__()
        self.ui = Ui_YUVviewer()
        self.ui.setupUi(self)
        self.setWindowTitle('YUVviewer ' + VERSION)
        screen = QGuiApplication.screenAt(self.mapToGlobal(QPoint(self.width()//2,0))).geometry()
        size = self.geometry()
        self.move((screen.width() - size.width()) // 2, (screen.height() - size.height()) // 2)

        self.ui.frameSizeType_ComboBox.setStyleSheet("combobox-popup: 0;")
        self.ui.YUVFormat_ComboBox.setStyleSheet("combobox-popup: 0;")
        self.ui.frameRate_ComboBox.setStyleSheet("combobox-popup: 0;")

        for key,value in self.frameSizeTypeDict.items():
            self.ui.frameSizeType_ComboBox.insertItem(self.ui.frameSizeType_ComboBox.count(), key)

        if os.path.exists(os.path.join(os.environ['HOME'],'.YUVViewer')):
            os.mkdir(os.path.join(os.environ['HOME'],'.YUVViewer'))
        self.YUVviewerConfigFile = ConfigFile(os.path.join(os.environ['HOME'],'.YUVViewer','YUVViewer.xml'))
        if self.YUVviewerConfigFile.config_dict['frameSizeType'] == 'Other':
            self.ui.frameSizeType_Other_RadioButton.setChecked(True)
            self.ui.frameSizeType_ComboBox.setEnabled(False)
            self.ui.frameSize_Width_LineEdit.setText(self.YUVviewerConfigFile.config_dict['frameSize_Width'])
            self.ui.frameSize_Height_LineEdit.setText(self.YUVviewerConfigFile.config_dict['frameSize_Height'])
        else:
            self.ui.frameSizeType_Combo_RadioButton.setChecked(True)
            self.ui.frameSizeType_ComboBox.setEnabled(True)
            for key,value in self.frameSizeTypeDict.items():
                if key == self.YUVviewerConfigFile.config_dict['frameSizeType']:
                    self.ui.frameSizeType_ComboBox.setCurrentText(self.YUVviewerConfigFile.config_dict['frameSizeType'])
                    self.ui.frameSize_Width_LineEdit.setText(value[0])
                    self.ui.frameSize_Width_LineEdit.setFocusPolicy(QtCore.Qt.NoFocus)
                    self.YUVviewerConfigFile.config_dict['frameSize_Width'] = value[0]
                    self.ui.frameSize_Height_LineEdit.setText(value[1])
                    self.ui.frameSize_Height_LineEdit.setFocusPolicy(QtCore.Qt.NoFocus)
                    self.YUVviewerConfigFile.config_dict['frameSize_Height'] = value[1]
                    break
        
        currentIndex = 0
        for key,value in self.YUVFormat_list.items():
            if key == self.YUVviewerConfigFile.config_dict['YUVFormat']:
                color_list = value
                break
            currentIndex += 1
        self.ui.YUVFormat_ComboBox.setCurrentIndex(currentIndex)
        self.updateUiSvg(color_list)

        frameRate_list = ['30', '60', '120']
        currentIndex = frameRate_list.index(self.YUVviewerConfigFile.config_dict['frameRate'])
        self.ui.frameRate_ComboBox.setCurrentIndex(currentIndex)

        self.ui.startFrame_LineEdit.setText(self.YUVviewerConfigFile.config_dict['startFrame'])
        self.ui.endFrame_LineEdit.setText(self.YUVviewerConfigFile.config_dict['endFrame'])

        self.ui.YUVFormat_ComboBox.currentTextChanged.connect(self.changeFormat)
        self.ui.frameSizeType_Combo_RadioButton.clicked.connect(self.configComboBox)
        self.ui.frameSizeType_Other_RadioButton.clicked.connect(self.configOther)
        self.ui.frameSizeType_ComboBox.currentTextChanged.connect(self.changeFrameSizeType)

        self.ui.frameSize_Height_LineEdit.textEdited.connect(self.frameSizeHeightValidator)
        self.ui.frameSize_Width_LineEdit.textEdited.connect(self.frameSizeWidthValidator)
        self.ui.startFrame_LineEdit.textEdited.connect(self.startFrameValidator)
        self.ui.endFrame_LineEdit.textEdited.connect(self.endFrameValidator)

        self.ui.exchange_PushButton.clicked.connect(self.exchaneSize)
        self.ui.openFile_PushButton.clicked.connect(self.openFile)
        self.ui.openFolder_PushButton.clicked.connect(self.openFolder)
        self.ui.about_PushButton.clicked.connect(self.about)
        self.ui.help_PushButton.clicked.connect(self.help)

    def svgBoxSrc(self, x, y, w, c):
        if (c&self.UI_SINGLE) == self.UI_SINGLE:
            color = c & 0xffffff
            return '<line class=\"0\" x1=\"%d\" y1=\"%d\" x2=\"%d\" y2=\"%d\" stroke=\"#%06x\" fill=\"none\" stroke-width=\"%d\" />\n' % (x,y+w/2,x+w,y+w/2,color,w)
        color0 = (c>>32) & 0xffffff
        color1 = c & 0xffffff
        fix_w = (w*(c>>56))/((c>>56)+((c & 0xff000000)>>24))
        return '<line class=\"0\" x1=\"%d\" y1=\"%d\" x2=\"%d\" y2=\"%d\" stroke=\"#%06x\" fill=\"none\" stroke-width=\"%d\" />\n' % (x,y+w/2,x+fix_w,y+w/2,color0,w) + '<line class=\"0\" x1=\"%d\" y1=\"%d\" x2=\"%d\" y2=\"%d\" stroke=\"#%06x\" fill=\"none\" stroke-width=\"%d\" />\n' % (x+fix_w,y+w/2,x+w,y+w/2,color1,w)

    def svgBoxArraySrc(self, x, y, w, od, xn, yn, ca):
        num = len(ca)
        if ca == 0:
            return ''
        ret = ''
        for i in range(yn):
            for j in range(xn):
                ret += self.svgBoxSrc(x+(w+od)*j,y+(w+od)*i,w,ca[((i*xn+j)%num)])
        return ret

    def updateUiSvg(self,color_list):
        svgXmlStreamReader = QXmlStreamReader('<svg xmlns=\"http://www.w3.org/2000/svg\" version=\"1.1\">\n' +
                            self.svgBoxArraySrc(10,10,40,5,24,4,color_list) +
                            '</svg>\n')
        svgRender = QSvgRenderer()
        svgRender.load(svgXmlStreamReader)
        svgPixmap = QPixmap(480,80)
        svgPixmap.fill(QtCore.Qt.transparent)
        svgPainter = QPainter(svgPixmap)
        svgRender.render(svgPainter)
        self.ui.label_8.setPixmap(svgPixmap)
        self.ui.label_8.setAlignment(QtCore.Qt.AlignCenter)
        svgPainter.end()

    def changeFormat(self,text):
        for key,value in self.YUVFormat_list.items():
            if key == self.ui.YUVFormat_ComboBox.currentText():
                color_list = value
                break
        self.updateUiSvg(color_list)
        self.ui.label_8.repaint()

    def configComboBox(self):
        self.ui.frameSizeType_ComboBox.setEnabled(True)
        for key,value in self.frameSizeTypeDict.items():
            if key == self.ui.frameSizeType_ComboBox.currentText():
                self.ui.frameSize_Width_LineEdit.setText(value[0])
                self.ui.frameSize_Width_LineEdit.setFocusPolicy(QtCore.Qt.NoFocus)
                self.ui.frameSize_Height_LineEdit.setText(value[1])
                self.ui.frameSize_Height_LineEdit.setFocusPolicy(QtCore.Qt.NoFocus)
                break

    def changeFrameSizeType(self, text):
        for key,value in self.frameSizeTypeDict.items():
            if key == text:
                self.ui.frameSize_Width_LineEdit.setText(value[0])
                self.ui.frameSize_Width_LineEdit.setFocusPolicy(QtCore.Qt.NoFocus)
                self.ui.frameSize_Height_LineEdit.setText(value[1])
                self.ui.frameSize_Height_LineEdit.setFocusPolicy(QtCore.Qt.NoFocus)
                break

    def configOther(self):
        self.ui.frameSizeType_ComboBox.setEnabled(False)
        self.ui.frameSize_Width_LineEdit.setText(self.YUVviewerConfigFile.config_dict['frameSize_Width'])
        self.ui.frameSize_Height_LineEdit.setText(self.YUVviewerConfigFile.config_dict['frameSize_Height'])
        self.ui.frameSize_Width_LineEdit.setFocusPolicy(QtCore.Qt.StrongFocus)
        self.ui.frameSize_Height_LineEdit.setFocusPolicy(QtCore.Qt.StrongFocus)

    def frameSizeHeightValidator(self,currentText):
        try:
            currentVale = int(currentText)
            if (currentVale % 2) == 0 and currentVale > 0:
                self.ui.frameSize_Height_LineEdit.setStyleSheet("QLineEdit{border:1px solid gray border-radius:1px}")
            else:
                QToolTip.showText(self.ui.frameSize_Height_LineEdit.mapToGlobal(QPoint(0, 10)), 'Height must be positive even')
                self.ui.frameSize_Height_LineEdit.setStyleSheet("QLineEdit{border: 1px solid red;border-radius: 3px;}")
        except Exception as e:
            QToolTip.showText(self.ui.frameSize_Height_LineEdit.mapToGlobal(QPoint(0, 10)), 'Width must be num')
            self.ui.frameSize_Height_LineEdit.setStyleSheet("QLineEdit{border: 1px solid red;border-radius: 3px;}")

    def frameSizeWidthValidator(self, currentText):
        try:
            currentVale = int(currentText)
            if (currentVale % 2) == 0 and currentVale > 0:
                self.ui.frameSize_Width_LineEdit.setStyleSheet("QLineEdit{border:1px solid gray border-radius:1px}")
            else:
                QToolTip.showText(self.ui.frameSize_Width_LineEdit.mapToGlobal(QPoint(0, 10)),'Width must be positive even')
                self.ui.frameSize_Width_LineEdit.setStyleSheet("QLineEdit{border: 1px solid red;border-radius: 3px;}")
        except Exception as e:
            QToolTip.showText(self.ui.frameSize_Width_LineEdit.mapToGlobal(QPoint(0, 10)), 'Width must be num')
            self.ui.frameSize_Width_LineEdit.setStyleSheet("QLineEdit{border: 1px solid red;border-radius: 3px;}")

    def startFrameValidator(self,currentText):
        try:
            currentVale = int(currentText)
        except Exception as e:
            QToolTip.showText(self.ui.startFrame_LineEdit.mapToGlobal(QPoint(0, 10)), 'startFrame must be num')
            self.ui.startFrame_LineEdit.setStyleSheet("QLineEdit{border: 1px solid red;border-radius: 3px;}")
            return

        if currentVale >= 0:
            self.ui.startFrame_LineEdit.setStyleSheet("QLineEdit{border:1px solid gray border-radius:1px}")
            try:
                endFrameVale = int(self.ui.endFrame_LineEdit.text())
            except Exception as e:
                QToolTip.showText(self.ui.endFrame_LineEdit.mapToGlobal(QPoint(0, 10)), 'endFrame must be num')
                self.ui.endFrame_LineEdit.setStyleSheet("QLineEdit{border: 1px solid red;border-radius: 3px;}")
                return
            if currentVale <= endFrameVale:
                self.ui.endFrame_LineEdit.setStyleSheet("QLineEdit{border:1px solid gray border-radius:1px}")
            else:
                QToolTip.showText(self.ui.endFrame_LineEdit.mapToGlobal(QPoint(0, 10)), 'endFrame must be greater than or equal to startFrame')
                self.ui.endFrame_LineEdit.setStyleSheet("QLineEdit{border: 1px solid red;border-radius: 3px;}")
        else:
            QToolTip.showText(self.ui.startFrame_LineEdit.mapToGlobal(QPoint(0, 10)), 'startFrame must be greater than or equal to 0')
            self.ui.startFrame_LineEdit.setStyleSheet("QLineEdit{border: 1px solid red;border-radius: 3px;}")

    def endFrameValidator(self,currentText):
        try:
            currentVale = int(currentText)
        except Exception as e:
            QToolTip.showText(self.ui.endFrame_LineEdit.mapToGlobal(QPoint(0, 10)), 'endFrame must be num')
            self.ui.endFrame_LineEdit.setStyleSheet("QLineEdit{border: 1px solid red;border-radius: 3px;}")
            return

        self.ui.endFrame_LineEdit.setStyleSheet("QLineEdit{border:1px solid gray border-radius:1px}")

        try:
            startVale = int(self.ui.startFrame_LineEdit.text())
            if currentVale < startVale:
                QToolTip.showText(self.ui.endFrame_LineEdit.mapToGlobal(QPoint(0, 10)), 'endFrame must be greater than or equal to startFrame')
                self.ui.endFrame_LineEdit.setStyleSheet("QLineEdit{border: 1px solid red;border-radius: 3px;}")
        except Exception as e:
            QToolTip.showText(self.ui.startFrame_LineEdit.mapToGlobal(QPoint(0, 10)), 'startFrame must be num')
            self.ui.startFrame_LineEdit.setStyleSheet("QLineEdit{border: 1px solid red;border-radius: 3px;}")

    def exchaneSize(self):
        self.ui.frameSizeType_Other_RadioButton.setChecked(True)
        self.ui.frameSizeType_ComboBox.setEnabled(False)
        width = self.ui.frameSize_Width_LineEdit.text()
        self.ui.frameSize_Width_LineEdit.setText(self.ui.frameSize_Height_LineEdit.text())
        self.ui.frameSize_Height_LineEdit.setText(width)
        self.frameSizeWidthValidator(self.ui.frameSize_Width_LineEdit.text())
        self.frameSizeHeightValidator(self.ui.frameSize_Height_LineEdit.text())

    def _updateConfig(self):
        try:
            if int(self.ui.startFrame_LineEdit.text()) <= int(self.ui.endFrame_LineEdit.text()):
                temp_Width = int(self.ui.frameSize_Width_LineEdit.text())
                temp_Height = int(self.ui.frameSize_Height_LineEdit.text())
                if (temp_Width % 2) == 0 and (temp_Height % 2) == 0 and temp_Width > 0 and temp_Height > 0:
                    if self.ui.frameSizeType_Combo_RadioButton.isChecked():
                        self.YUVviewerConfigFile.config_dict['frameSizeType'] = self.ui.frameSizeType_ComboBox.currentText()
                    elif self.ui.frameSizeType_Other_RadioButton.isChecked():
                        self.YUVviewerConfigFile.config_dict['frameSizeType'] = self.ui.frameSizeType_Other_RadioButton.text()
                    self.YUVviewerConfigFile.config_dict['YUVFormat'] = self.ui.YUVFormat_ComboBox.currentText()
                    self.YUVviewerConfigFile.config_dict['frameSize_Width'] = self.ui.frameSize_Width_LineEdit.text()
                    self.YUVviewerConfigFile.config_dict['frameSize_Height'] = self.ui.frameSize_Height_LineEdit.text()
                    self.YUVviewerConfigFile.config_dict['frameRate'] = self.ui.frameRate_ComboBox.currentText()
                    self.YUVviewerConfigFile.config_dict['startFrame'] = self.ui.startFrame_LineEdit.text()
                    self.YUVviewerConfigFile.config_dict['endFrame'] = self.ui.endFrame_LineEdit.text()
                    ret = True
                else:
                    QMessageBox.critical(self, 'Error', 'frameSize invalid!!', QMessageBox.Ok)
                    ret = False
            else:
                QMessageBox.critical(self, 'Error', 'startFrame or endFrame invalid!!', QMessageBox.Ok)
                ret = False
        except Exception as e:
            QMessageBox.critical(self, 'Error', 'parameter invalid!!', QMessageBox.Ok)
            ret = False
        return ret

    def _imgView(self, openfile_list):
        if openfile_list:
            self.imgViewer = ImgViewer(self)
            frameSize_Width = int(self.YUVviewerConfigFile.config_dict['frameSize_Width'])
            frameSize_Height = int(self.YUVviewerConfigFile.config_dict['frameSize_Height'])
            #多线程的方法
            ret = self.imgViewer.setFileList_multithreading(openfile_list,
                        self.YUVviewerConfigFile.config_dict['YUVFormat'],
                        frameSize_Width,
                        frameSize_Height,
                        int(self.YUVviewerConfigFile.config_dict['startFrame']),
                        int(self.YUVviewerConfigFile.config_dict['endFrame']) - int(self.YUVviewerConfigFile.config_dict['startFrame']) + 1,
                        )
            if not ret:
                QMessageBox.critical(self, 'Error', 'unsupport YUVFormat!!', QMessageBox.Ok)
                self.show()
                return False
            #单线程的方法
            #try:
            #    ret = self.imgViewer.setFileList(openfile_list,
            #                    self.YUVviewerConfigFile.config_dict['YUVFormat'],
            #                    frameSize_Width,
            #                    frameSize_Height,
            #                    int(self.YUVviewerConfigFile.config_dict['startFrame']),
            #                    int(self.YUVviewerConfigFile.config_dict['endFrame']) - int(self.YUVviewerConfigFile.config_dict['startFrame']) + 1,
            #                    )
            #    if not ret:
            #        QMessageBox.critical(self, 'Error', 'unsupport YUVFormat!!', QMessageBox.Ok)
            #        self.show()
            #        return False
            #
            #except Exception as e:
            #    QMessageBox.critical(self, 'Error', 'unknow error!!', QMessageBox.Ok)
            #    self.show()
            #    return False
            if frameSize_Width > frameSize_Height:
                self.imgViewer.resize(800, frameSize_Height/frameSize_Width*800)
            else:
                self.imgViewer.resize(frameSize_Width/frameSize_Height*400, 400)
            screen = QGuiApplication.screenAt(self.mapToGlobal(QPoint(self.width()//2,0))).geometry()
            size = self.imgViewer.geometry()
            self.imgViewer.move((screen.width() - size.width()) // 2, (screen.height() - size.height()) // 2)
            self.hide()
            self.imgViewer.show()
            return True

    def openFile(self):
        if self._updateConfig():
            openDir = ''
            if self.YUVviewerConfigFile.config_dict['lastPath']:
                if os.path.isdir(self.YUVviewerConfigFile.config_dict['lastPath']):
                    openDir = self.YUVviewerConfigFile.config_dict['lastPath']
            openfile_name = QFileDialog.getOpenFileNames(self, '选择文件', openDir, 'YUV files(*.yuv *.data *.raw)')
            openfile_list = openfile_name[0]
            if openfile_list:
                if os.path.exists(openfile_name[0][0]):
                    filepath, filename = os.path.split(openfile_name[0][0])
                    self.YUVviewerConfigFile.config_dict['lastPath'] = filepath
            self._imgView(openfile_list)

    def openFolder(self):
        if self._updateConfig():
            openDir = ''
            if self.YUVviewerConfigFile.config_dict['lastPath']:
                if os.path.isdir(self.YUVviewerConfigFile.config_dict['lastPath']):
                    openDir = self.YUVviewerConfigFile.config_dict['lastPath']
            openfolder_name = QFileDialog.getExistingDirectory(self, '选择文件夹', openDir)
            if openfolder_name:
                self.YUVviewerConfigFile.config_dict['lastPath'] = openfolder_name
                openfile_list = []
                for filename in os.listdir(openfolder_name):
                    filepath = os.path.join(openfolder_name, filename)
                    if os.path.isfile(filepath):
                        file_ext = filepath.rsplit('.', maxsplit=1)
                        if file_ext[1] == 'yuv':
                            openfile_list.append(filepath)
                        elif file_ext[1] == 'data':
                            openfile_list.append(filepath)
                        elif file_ext[1] == 'raw':
                            openfile_list.append(filepath)
                self._imgView(openfile_list)

    def about(self):
        QMessageBox.about(self, 'About', 'Version \n ' + VERSION + "\nCommit \n " + GIT_TAG + '\nAuthor\n [email protected] \n [email protected] \nWebsite\n https://github.com/QQxiaoming/YUVviewer')

    def help(self):
        QMessageBox.question(self, 'Help', '1.主界面选择数据参数。\n2.点击打开文件或文件夹将进行图像数据解析并显示图像。\n3.图像显示界面中使用滚轮放大缩小图像,使用左键可拖动图像,双击左键保存图像为png格式,单击右键复位图像大小和位置,双击右键交换图像R和B通道显示,单机中键显示图像原始大小。', QMessageBox.Ok)

    def closeEvent(self, event):
        del self.YUVviewerConfigFile
        event.accept()

    def exit(self):
        self.close()