Пример #1
0
def main(argv):
    # load image into ndarray
    if len(argv) > 1:
        testimg = mpimg.imread(argv[1])
    else:
        testimg = mpimg.imread('/home/cilsat/Dropbox/kuliah/sem1/pp/portrait.jpeg')

    # calc sorted 2-D representation of image
    #imgs = util.sortimg(util.flattenimg(img))

    # get count of unique colors
    #uniq = util.getunique(imgs)
    #print(uniq)

    #pltshow(img)
    # get and display histogram(s) of color(s)
    #hist = util.gethistogram(img)
    #plthist(hist)

    # equalize image
    #imgeq = util.equalize(img, hist)
    #pltshow(imgeq)

    # get and display background image with dynamic threshold 15
    #background = util.getbackground(img, imgs, 15)
    #pltshow(background)

    # OCR
    # training: only need to run ONCE for each font
    # make sure you have an image containing all lower case letters, upper case letters, and digits in that order
    """
    font = "dejavusans-alphanumeric"
    fontimg = mpimg.imread('train/' + font + '.jpg')
    util.train(fontimg, font)

    # testing: this is only around 50% accurate for text of different font
    print util.test(testimg, font)
    """

    # License Plate recognition
    """
    dataset = "plat"
    util.gnb_train(dataset=dataset)
    """

    # Face detection
    h = testimg.shape[0]
    w = testimg.shape[1]
    face = testimg[int(0*h):int(0.7*h), int(0.15*w):int(0.45*w)]
    pltshow(face)
    imgf = util.mapImage(testimg, util.mapColor(face, 60))
    pltshow(imgf)
    imgt = util.thin(imgf, bg='dark')
    pltshow(imgt)
    obj = util.segment(imgt, minsize=0.5)[0]
    print len(obj)
    util.getobjimg(obj[0])

    frame = np.array(obj[0])
    x = frame[...,1]
    y = frame[...,0]
    h = y.ptp()
    w = x.ptp()
    x0 = x.min()
    y0 = y.min()
    mask = np.zeros(testimg.shape, dtype=np.uint8)
    print mask.shape
Пример #2
0
    def initUI(self):

        """
        Main Window global parameters
        """
        self.imgOriginal = np.array([])
        self.mainWidth = 1280
        self.mainHeight = 640
        self.main = QLabel()
        self.imgNpBefore = np.array([])
        self.imgNpAfter = np.array([])
        self.skin = mpimg.imread('res/skin.jpg')

        grid = QGridLayout()
        self.main.setLayout(grid)

        self.mainBefore = QLabel('Before')
        self.mainBefore.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
        self.mainBefore.setWordWrap(True)
        self.mainBefore.setFont(QFont('Monospace', 10))

        self.mainAfter = QLabel('After')
        self.mainAfter.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
        self.mainAfter.setWordWrap(True)
        self.mainAfter.setFont(QFont('Monospace', 10))

        grid.addWidget(self.mainBefore, 0, 0)
        grid.addWidget(self.mainAfter, 0, 1)

        """
        Menu Bar
        """

        # FILE MENU
        openFile = QAction('Open', self)
        openFile.setShortcut('Ctrl+O')
        openFile.setStatusTip('Open new File')
        openFile.triggered.connect(self.showDialog)

        exitAction = QAction('Exit', self)
        exitAction.setShortcut('Ctrl+Q')
        exitAction.setStatusTip('Exit application')
        exitAction.triggered.connect(self.close)

        moveLeft = QAction('Move Left', self)
        moveLeft.setShortcut('Ctrl+L')
        moveLeft.triggered.connect(lambda: self.updateBefore(self.imgNpAfter))


        # PROCESS MENU
        equalizeMenu = QAction('Equalize', self)
        equalizeMenu.triggered.connect(lambda: self.updateImgAfter(util.equalize(self.imgNpBefore)))

        histogramMenu = QAction('Histogram', self)
        #histogramMenu.triggered.connect(lambda: self.updateImgAfter(

        grayscaleMenu = QAction('Grayscale', self)
        grayscaleMenu.triggered.connect(lambda: self.updateImgAfter(util.getgrayscale(self.imgNpBefore)))

        binarizeMenu = QAction('Binarize', self)
        binarizeMenu.triggered.connect(lambda: self.updateImgAfter(util.otsu(self.imgNpBefore)))

        gaussianMenu = QAction('Smooth', self)
        gaussianMenu.triggered.connect(lambda: self.updateImgAfter(util.convolvefft(util.gaussian_filt(), util.getgrayscale(self.imgNpBefore))))        

        resizeMenu = QAction('Resize', self)
        resizeMenu.triggered.connect(lambda: self.updateImgAfter(util.downsample(self.imgNpBefore)))

        segmentMenu = QAction('Segment', self)
        segmentMenu.triggered.connect(lambda: self.updateImgAfter(util.showobj(util.downsample(self.imgNpBefore, target_height=480), util.segment(util.thin(util.otsu(util.downsample(self.imgNpBefore, target_height=480), bg='light'))), box=False)))

        # EDGE DETECTION MENU
        averageMenu = QAction('Average', self)
        averageMenu.triggered.connect(lambda: self.updateImgAfter(util.degreezero(self.imgNpBefore, type="average")))

        differenceMenu = QAction('Difference', self)
        differenceMenu.triggered.connect(lambda: self.updateImgAfter(util.degreezero(self.imgNpBefore, type="difference")))

        homogenMenu = QAction('Homogen', self)
        homogenMenu.triggered.connect(lambda: self.updateImgAfter(util.degreezero(self.imgNpBefore, type="homogen")))

        sobelMenu = QAction('Sobel', self)
        sobelMenu.triggered.connect(lambda: self.updateImgAfter(util.degreeone(self.imgNpBefore, type="sobel")))

        prewittMenu = QAction('Prewitt', self)
        prewittMenu.triggered.connect(lambda: self.updateImgAfter(util.degreeone(self.imgNpBefore, type="prewitt")))

        freichenMenu = QAction('Frei-Chen', self)
        freichenMenu.triggered.connect(lambda: self.updateImgAfter(util.degreeone(self.imgNpBefore, type="freichen")))

        kirschMenu = QAction('Kirsch', self)
        kirschMenu.triggered.connect(lambda: self.updateImgAfter(util.degreetwo(self.imgNpBefore, type="kirsch")))


        # FEATURE MENU
        chaincodeMenu = QAction('Chain code', self)
        chaincodeMenu.triggered.connect(lambda: self.updateTxtAfter(str([util.getdirection(chain[n][0], chain[n][1]) for chain in util.segment(util.thin(self.imgNpBefore), cc=True) for n in xrange(len(chain))])))

        turncodeMenu = QAction('Turn code', self)
        turncodeMenu.triggered.connect(lambda: self.updateTxtAfter(str([util.getturncode(cc) for cc in util.segment(util.thin(self.imgNpBefore, bg='light'), cc=False)])))

        skeletonMenu = QAction('Zhang-Suen thinning', self)
        skeletonMenu.triggered.connect(lambda:self.updateImgAfter(util.zhangsuen(util.binarize(self.imgNpBefore, bg='light'))))

        skinMenu = QAction('Boundary detection', self)
        skinMenu.triggered.connect(lambda:self.updateImgAfter(util.thin(self.imgNpBefore, bg='light')))

        freemanMenu = QAction('Contour profile', self)


        # RECOGNITION MENU
        freemantrainfontMenu = QAction('Train Contour Font', self)
        freemantrainfontMenu.triggered.connect(lambda: util.train(self.imgNpBefore, feats='zs', order='font', setname='font')) 

        freemantrainplatMenu = QAction('Train ZS Plate (GNB)', self)
        freemantrainplatMenu.triggered.connect(lambda: util.train(self.imgNpBefore, feats='zs', order='plat', setname='plat'))

        cctctrainfontMenu = QAction('Train CC + TC Font', self)

        cctctrainplatMenu = QAction('Train CC + TC Plate', self)

        freemantestfontMenu = QAction('Predict Contour Font', self)
        freemantestfontMenu.triggered.connect(lambda: self.updateTxtAfter(util.test(self.imgNpBefore, feats='zs', order='font', setname='font')))
        
        freemantestplatMenu = QAction('Predict Contour Plate', self)
        freemantestplatMenu.triggered.connect(lambda:self.updateTxtAfter(util.test(self.imgNpBefore, feats='zs', order='plat', setname='plat')))

        cctctestfontMenu = QAction('Predict CC + TC Font', self)

        cctctestplatMenu = QAction('Predict CC + TC Plate', self)

        facesMenu = QAction('Show faces', self)
        facesMenu.triggered.connect(lambda: self.updateImgAfter(util.getFaces(self.imgNpBefore, self.skin, range=70)))

        faceMenu = QAction('Show facial features', self)
        faceMenu.triggered.connect(lambda: self.updateImgAfter(util.showobj(self.imgNpBefore, util.getFaceFeats(self.imgNpBefore, self.skin, range=100), color=False)))

        # MENU BAR
        menubar = self.menuBar()

        fileMenu = menubar.addMenu('&File')
        fileMenu.addAction(openFile)
        fileMenu.addAction(exitAction)
        fileMenu.addAction(moveLeft)

        processMenu = menubar.addMenu('&Preprocess')
        #processMenu.addAction(histogramMenu)
        processMenu.addAction(equalizeMenu)
        processMenu.addAction(grayscaleMenu)
        processMenu.addAction(binarizeMenu)
        processMenu.addAction(gaussianMenu)
        processMenu.addAction(resizeMenu)
        processMenu.addAction(segmentMenu)

        edgeMenu = menubar.addMenu('&Edge Detection')
        edgeMenu.addAction(averageMenu)
        edgeMenu.addAction(differenceMenu)
        edgeMenu.addAction(homogenMenu)
        edgeMenu.addAction(sobelMenu)
        edgeMenu.addAction(prewittMenu)
        edgeMenu.addAction(freichenMenu)
        edgeMenu.addAction(kirschMenu)

        featureMenu = menubar.addMenu('&Features')
        featureMenu.addAction(chaincodeMenu)
        featureMenu.addAction(turncodeMenu)
        featureMenu.addAction(skeletonMenu)
        featureMenu.addAction(skinMenu)
        featureMenu.addAction(freemanMenu)

        recogMenu = menubar.addMenu('&Recognition')
        recogMenu.addAction(freemantrainfontMenu)
        recogMenu.addAction(freemantrainplatMenu)
        recogMenu.addAction(cctctrainfontMenu)
        recogMenu.addAction(cctctrainplatMenu)
        recogMenu.addAction(freemantestfontMenu)
        recogMenu.addAction(freemantestplatMenu)
        recogMenu.addAction(cctctestfontMenu)
        recogMenu.addAction(cctctestplatMenu)
        recogMenu.addAction(facesMenu)
        recogMenu.addAction(faceMenu)
        #recogMenu.addAction(

        """
        Toolbar, Status Bar, Tooltip
        """
        self.statusBar().showMessage('Ready')

        QToolTip.setFont(QFont('SansSerif', 10))
        #self.setToolTip('This is a <b>QWidget</b> widget')

        """
        Displaying
        """

        self.setGeometry(12, 30, self.mainWidth, self.mainHeight+80)
        self.setWindowTitle('Pyxel')
        self.setWindowIcon(QIcon('res/web.png'))

        self.setCentralWidget(self.main)

        self.main.setGeometry(QRect(0, 80, self.mainWidth, self.mainHeight))
        #self.mainAfter.setGeometry(QRect(self.mainWidth/2, 80, self.mainWidth/2, self.mainHeight))

        self.show()
Пример #3
0
def main(argv):
    # load image into ndarray
    if len(argv) > 1:
        testimg = mpimg.imread(argv[1])
    else:
        testimg = mpimg.imread(
            '/home/cilsat/Dropbox/kuliah/sem1/pp/portrait.jpeg')

    # calc sorted 2-D representation of image
    #imgs = util.sortimg(util.flattenimg(img))

    # get count of unique colors
    #uniq = util.getunique(imgs)
    #print(uniq)

    #pltshow(img)
    # get and display histogram(s) of color(s)
    #hist = util.gethistogram(img)
    #plthist(hist)

    # equalize image
    #imgeq = util.equalize(img, hist)
    #pltshow(imgeq)

    # get and display background image with dynamic threshold 15
    #background = util.getbackground(img, imgs, 15)
    #pltshow(background)

    # OCR
    # training: only need to run ONCE for each font
    # make sure you have an image containing all lower case letters, upper case letters, and digits in that order
    """
    font = "dejavusans-alphanumeric"
    fontimg = mpimg.imread('train/' + font + '.jpg')
    util.train(fontimg, font)

    # testing: this is only around 50% accurate for text of different font
    print util.test(testimg, font)
    """

    # License Plate recognition
    """
    dataset = "plat"
    util.gnb_train(dataset=dataset)
    """

    # Face detection
    h = testimg.shape[0]
    w = testimg.shape[1]
    face = testimg[int(0 * h):int(0.7 * h), int(0.15 * w):int(0.45 * w)]
    pltshow(face)
    imgf = util.mapImage(testimg, util.mapColor(face, 60))
    pltshow(imgf)
    imgt = util.thin(imgf, bg='dark')
    pltshow(imgt)
    obj = util.segment(imgt, minsize=0.5)[0]
    print len(obj)
    util.getobjimg(obj[0])

    frame = np.array(obj[0])
    x = frame[..., 1]
    y = frame[..., 0]
    h = y.ptp()
    w = x.ptp()
    x0 = x.min()
    y0 = y.min()
    mask = np.zeros(testimg.shape, dtype=np.uint8)
    print mask.shape