Esempio n. 1
0
    def detectFace(self, fileName, _show=True, _save=False, _saveInfo=False):
        """
        :param fileName: 
        :param _show: 
        :param _save: 
        :return: 
        """
        img = Img(fileName, calIntegral=False)
        #scaledWindows: [[window_x, window_y, window_w, window_h, window_scale],...]
        scaledWindows = []
        for scale in np.arange(
                self.DETECT_START, self.DETECT_END, self.DETECT_STEP
        ):  #np.arange创建数组,scale从DETECT_START到DETECT_END以DETECT_STEP的间距递增
            self._detectInDiffScale(scale, img, scaledWindows)

        scaledWindows = np.array(scaledWindows)  #np.array生成一个数组
        # detect whether the scaledWindows are face
        predWindow = self._detectScaledWindow(scaledWindows, img)

        #optimalWindow = self._optimalWindow(predWindow)

        mostProbWindow = self._getMostProbWindow(predWindow)

        if _show:
            self.show(img.mat, mostProbWindow)
        if _save:
            self.save(img.mat, mostProbWindow, fileName)
        if _saveInfo:
            self.saveProbWindowInfo(mostProbWindow, fileName)
Esempio n. 2
0
def loadImageAndCalFeatureVal(indexlist, trainFiles, mat, haar):
    while len(indexlist) != 0:
        index = indexlist.pop()
        print(index)
        img = Img(fileName=trainFiles[index])
        mat[index, :-1] = haar.calImgFeatureVal(img.integralMat, img.mat)
        if TRAIN_NON_FACE in trainFiles[index]:
            mat[index, -1] = NON_FACE
        else:
            mat[index, -1] = FACE
Esempio n. 3
0
def main():
    cam = False
    cam = input('Use cam? y/n > ')
    if cam == 'y':
        im = Img()
        im.whatup()
    else:
        img1 = Img(get_file())
        print('Select another file!\n')
        img2 = Img(get_file())
        print('\n..Applying a bunch of stuff')
        resized = img1.resize(img2.getSize())
        resized.show()
        cropped = img1.cropTopLeft(500, 500)
        cropped.show()
        cropped2 = img1.cropMiddle(300, 300)
        cropped2.show()
        rot45 = img1.rotate(45, zoom=False)
        rot45.show()
        zoomed = img1.zoom(5, 0.8, center=True)
        zoomed.show()
        fadedmatrix = img1.fadeMat(fade=20)
        fadedmatrix.show()
        blended = img1.blend(img2, 0.7)
        blended.show()
        blendmatrix = img1.blendMat(img2)
        blendmatrix.show()
        gray = img1.gray()
        gray.show()
        bw = img1.bw()
        bw.show()
        sharp = img1.sharpen(factor=0.0)  # blur = 0.0, sharp = 2.0
        sharp.show()
        test = img1.fadeMat(3)
        test.show()
        m = img1.keepMax()
        m.show()
        edge = img1.edge()
        edge.show()
        emboss = img1.emboss()
        emboss.show()
Esempio n. 4
0
def main():
    """Main loop"""
    global AB_ID, MIN_SEC_FADE, MAX_SEC_FADE, log, wait

    # Launch intro image - to make it clear that the launch is on its way. Also: Feel free to update the image. I
    introImage = Img("ImgOffline/polaIntro.jpeg")
    fadeIn(introImage)

    screenIsDark = True
    shakes = 0

    # Setup image library and API connection
    api = APIconnection()
    setup = Settings(api)
    imageList = ImageLib(api)
    log.connect(api)

    log.logevent("Init", AB_ID)
    currentImage = imageList.getNextImage()

    # Fade out intro image - to make it clear that the Pola is ready to go
    fadeOut(IntroImage)

    while True:

        # If no image is displayed - wait for shake
        if screenIsDark == True:

            imu = IMU()

            if imu.checkShake() == True:

                log.logevent("Shake", AB_ID)
                fadeIn(currentImage)

                screenIsDark = False
                shakes = 0
                """After the image fade-in is complete, we do most of our expensive api calls.
                This way, Pola will immediately be ready to show the next image after fadeout, 
                and users will never have to wonder if their shakes are being registered, 
                or if it is working on some other process"""

                # Immediately fade out offline-notifications
                # (we do not want these to be visible for 6 hours if the wifi returns in 6 minutes)
                if currentImage.offline:
                    fadeoutTime = 0

                else:
                    fadeoutTime = random.randint(MIN_SEC_FADE, MAX_SEC_FADE)
                print("Fadeout Time: ", fadeoutTime)
                fadeoutCounter = 0
                nextImage = imageList.getNextImage()

                #Update prototype settings (A/B etc)
                settingUpdate = setup.update()
                if settingUpdate[0]:
                    AB_ID = settingUpdate[1]
                    MIN_SEC_FADE = hoursToMs(settingUpdate[2])
                    MAX_SEC_FADE = hoursToMs(settingUpdate[3])
                    SENSITIVITY = settingUpdate[4]
                    FADEIN_SPEED = settingUpdate[5]
                    FADEOUT_SPEED = settingUpdate[6]

        # If an image is displayed, check if it is time to stop displaying it.
        else:

            if fadeoutCounter == fadeoutTime:
                fadeOut(currentImage)
                # Update the current image to the one we loaded wile displaying the current image
                currentImage.delete()
                currentImage = nextImage
                screenIsDark = True
                fadeoutTime = math.inf
                fadeoutCounter = 0

            else:
                #update fadeout timer
                fadeoutCounter += 1

        checkEvents()
        pygame.time.delay(wait)
        pygame.display.update()
Esempio n. 5
0
    def _detectScaledWindow(self, scaledWindows, img):
        """detect each of scaledWindow
        :param scaledWindows:
        :param img:
        :return:
        """
        scaledWindowsMat = np.zeros(
            (scaledWindows.shape[0], len(self.haar.features)), dtype='float32')

        for window in range(
                scaledWindows.shape[0]):  #.shape[0]返回矩阵第一个维度的长度(行数)
            window_x, window_y, window_w, window_h, scale = scaledWindows[
                window]

            window_x, window_y, window_w, window_h = int(window_x), int(
                window_y), int(window_w), int(window_h)

            subWindowImg          = Img(mat=img.mat[window_y : window_y+window_h, \
                                           window_x : window_x+window_w])
            subWindowImgIntegral = subWindowImg.integralMat

            # #normalization
            # sumVal        = sum(sum(subWindowImg.mat[y:y+h, x:x+w]))
            # sqSumVal      = sum(sum(subWindowImg.mat[y:y+h, x:x+w] ** 2))
            # meanVal       = sumVal   / (w * h)
            # sqMeanVal     = sqSumVal / (w * h)
            # normFactorVal = np.sqrt(sqMeanVal - meanVal ** 2)
            #
            # if normFactorVal == 0:
            #     normFactorVal = 1

            for f in range(len(self.selectedFeatures)):
                type, x, y, w, h, dimension = self.selectedFeatures[f]
                x, y, w, h = int(x * scale), int(y * scale), int(
                    w * scale), int(h * scale)

                if type == "HAAR_TYPE_I":
                    pos = self.haar.getPixelValInIntegralMat(
                        x, y, w, h, subWindowImgIntegral)
                    neg = self.haar.getPixelValInIntegralMat(
                        x, y + h, w, h, subWindowImgIntegral)
                    scaledWindowsMat[window][dimension] = (pos - neg) / (2 *
                                                                         w * h)
                elif type == "HAAR_TYPE_II":
                    neg = self.haar.getPixelValInIntegralMat(
                        x, y, w, h, subWindowImgIntegral)
                    pos = self.haar.getPixelValInIntegralMat(
                        x + w, y, w, h, subWindowImgIntegral)

                    scaledWindowsMat[window][dimension] = (pos - neg) / (2 *
                                                                         w * h)
                elif type == "HAAR_TYPE_III":
                    neg1 = self.haar.getPixelValInIntegralMat(
                        x, y, w, h, subWindowImgIntegral)
                    pos = self.haar.getPixelValInIntegralMat(
                        x + w, y, w, h, subWindowImgIntegral)
                    neg2 = self.haar.getPixelValInIntegralMat(
                        x + 2 * w, y, w, h, subWindowImgIntegral)

                    scaledWindowsMat[window][dimension] = (pos - neg1 -
                                                           neg2) / (3 * w * h)

                elif type == "HAAR_TYPE_IV":
                    neg1 = self.haar.getPixelValInIntegralMat(
                        x, y, w, h, subWindowImgIntegral)
                    pos = self.haar.getPixelValInIntegralMat(
                        x, y + h, w, h, subWindowImgIntegral)
                    neg2 = self.haar.getPixelValInIntegralMat(
                        x, y + 2 * h, w, h, subWindowImgIntegral)

                    scaledWindowsMat[window][dimension] = (pos - neg1 -
                                                           neg2) / (3 * w * h)

                elif type == "HAAR_TYPE_V":
                    neg1 = self.haar.getPixelValInIntegralMat(
                        x, y, w, h, subWindowImgIntegral)
                    pos1 = self.haar.getPixelValInIntegralMat(
                        x + w, y, w, h, subWindowImgIntegral)
                    pos2 = self.haar.getPixelValInIntegralMat(
                        x, y + h, w, h, subWindowImgIntegral)
                    neg2 = self.haar.getPixelValInIntegralMat(
                        x + w, y + h, w, h, subWindowImgIntegral)

                    scaledWindowsMat[window][dimension] = (pos1 + pos2 - neg1 -
                                                           neg2) / (4 * w * h)

        pred = self.model.predict_prob(scaledWindowsMat)
        indexs = np.where(pred > 0)[0]
        predWindow = np.zeros((len(indexs), scaledWindows.shape[1] + 1),
                              dtype=object)
        for i in range(len(indexs)):
            predWindow[i] = np.append(scaledWindows[indexs[i]],
                                      pred[indexs[i]])

        return predWindow
Esempio n. 6
0
import tkinter as tk
from tkinter import messagebox, Button
from nonogram import Nonogram
from image import Img
import numpy as np

# #####  DEFINE GRID HERE  ###### #
ROWS = 10
COLS = 10
# Visual size of grid box
GRID_SIZE = 40

# Initialize
nonogram = Nonogram()
img = Img()
tiles = [[0 for _ in range(COLS)] for _ in range(ROWS)]


def create_grid(event=None):
    w = grid.winfo_width()  # Get current width of canvas
    h = grid.winfo_height()  # Get current height of canvas
    grid.delete('grid_line')  # Will only remove the grid_line

    # Creates all vertical lines at intevals of 100
    for i in range(0, w, GRID_SIZE):
        grid.create_line([(i, 0), (i, h)], tag='grid_line')

    # Creates all horizontal lines at intevals of 100
    for i in range(0, h, GRID_SIZE):
        grid.create_line([(0, i), (w, i)], tag='grid_line')
Esempio n. 7
0
#         # 每隔 1小时 划分一个日志文件,interval 是时间间隔,备份文件为 10 个
#         self.handler = logging.handlers.TimedRotatingFileHandler("log.txt", when="H", interval=1, backupCount=36)
#         # 定义一个RotatingFileHandler,最多备份3个日志文件,每个日志文件最大10*1K
#         # self.handler = RotatingFileHandler('log.txt', maxBytes=50 * 1024, backupCount=100)
#         self.handler.setLevel(logging.INFO)
#
#         # create a logging format
#         formatter = logging.Formatter('%(asctime)s-%(name)s-%(levelname)s-%(thread)d-%(message)s')
#         self.handler.setFormatter(formatter)
#
#         # add the handlders to logger
#         self.logger.addHandler(self.handler)



if __name__ == '__main__':
    img = Img()
    cut_img = Img()
    subImgList = []
    db = sqlserverDB.DBHelper(server=SERVERS, username=DBUSER, password=DBPWD, database=DBNAME)

    app = QtGui.QApplication(sys.argv)
    window = MainForm()
    window.showMaximized()
    window.show()
    sys.exit(app.exec_())