Пример #1
0
class EmotionRecognition:
    def __init__(self):
        self.dataset = DatasetLoader()
        self.networkbuilder = NetworkBuilder()

    def build_network(self):
        self.model = self.networkbuilder.build_vgg()
        # self.load_model()

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print('[+] Dataset found and loaded')

    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()
        print('[+] Training network')
        self.model.fit(self.dataset.images,
                       self.dataset.labels,
                       validation_set=(self.dataset.images_test,
                                       self.dataset.labels_test),
                       n_epoch=100,
                       batch_size=100,
                       shuffle=True,
                       show_metric=True,
                       snapshot_step=200,
                       snapshot_epoch=True,
                       run_id=RUN_NAME)

    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)

    def save_model(self):
        self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
        print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)

    def load_model(self):
        self.model.load(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
        print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)
Пример #2
0
class EmotionRecognition:
    def __init__(self):
        self.dataset = DatasetLoader()

    def build_network(self):
        # Smaller 'AlexNet'
        # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
        print('[+] Building CNN')
        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, GRAY])
        self.network = conv_2d(self.network, 64, 5, activation='relu')
        #self.network = local_response_normalization(self.network)
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network, 64, 5, activation='relu')
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network, 128, 4, activation='relu')
        self.network = dropout(self.network, 0.3)
        self.network = fully_connected(self.network,
                                       3072,
                                       activation='relu',
                                       name='relu-fully-connected')
        self.network = fully_connected(self.network,
                                       len(EMOTIONS),
                                       activation='softmax',
                                       name='softmax-fully-connected')
        self.network = regression(
            self.network,
            optimizer='momentum',
            name='regression',
            #learning_rate= 1.0,
            loss='categorical_crossentropy')
        self.model = tflearn.DNN(self.network,
                                 checkpoint_path='model/turing_60epo_50batch',
                                 max_checkpoints=1,
                                 tensorboard_dir="logs/",
                                 tensorboard_verbose=3)
        self.load_model()

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print('[+] Dataset found and loaded')

    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()
        # Training
        print('[+] Training network')
        print("[+] Size test 1: " + str(len(self.dataset.images_test)))
        print("[+] Size label 1: " + str(len(self.dataset.labels_test)))
        #self.images_test = np.load(SAVE_DATASET_IMAGES_TEST_FILENAME)
        #self.labels_test = np.load(SAVE_DATASET_LABELS_TEST_FILENAME)
        #self.images_test = self.images.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        #self.labels_test = self.labels.reshape([-1, len(EMOTIONS)])
        #print ("[+] Size test 2: " + str(len(self.dataset.images_test)))

        self.model.fit(
            self.dataset.images,
            self.dataset.labels,
            #validation_set = 0.25,
            validation_set=(self.dataset.images_test,
                            self.dataset._labels_test),
            n_epoch=20,
            batch_size=50,
            shuffle=True,
            show_metric=True,
            snapshot_step=200,
            snapshot_epoch=True,
            run_id='turing_140epo_50batch')

    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)

    def save_model(self):
        self.model.save("model/turing_140epo_50batch")
        print('[+] Model trained and saved at model/turing_140epo_50batch')

    def load_model(self):
        #if isfile(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME)):
        #print("aqui\n\n\n")
        self.model.load("model/turing_140epo_50batch")
        print('[+] Model loaded from model/turing_120epo_50batch\n')
Пример #3
0
class EmotionRecognition:
    def __init__(self):
        self.dataset = DatasetLoader()

    def build_network(self):
        # Smaller 'AlexNet'
        # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
        print('[+] Building CNN')
        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1])
        #print(input_data)
        self.network = conv_2d(self.network,
                               64,
                               5,
                               activation='relu',
                               padding='valid')
        #print(self.network)
        #self.network = local_response_normalization(self.network)
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network,
                               64,
                               5,
                               activation='relu',
                               padding='valid')
        #print(self.network)
        #self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network,
                               128,
                               3,
                               activation='relu',
                               padding='valid')
        #print(self.network)
        self.network = dropout(self.network, 0.3)
        self.network = fully_connected(self.network, 3072, activation='relu')
        self.network = fully_connected(self.network,
                                       len(EMOTIONS),
                                       activation='softmax')
        self.network = regression(self.network,
                                  optimizer='momentum',
                                  loss='categorical_crossentropy')
        self.model = tflearn.DNN(self.network,
                                 checkpoint_path=SAVE_DIRECTORY +
                                 '/emotion_recognition',
                                 max_checkpoints=1,
                                 tensorboard_verbose=2
                                 #session = 'session'
                                 )
        #self.load_model()
        self.model.load(
            'C:\\Users\\kingjy79\\Documents\\rlawhdduq1\\pn_emotion_copy3\\emotion\\data\\emotion_recognition-17325'
        )
        convolution_layer2 = tflearn.variables.get_layer_variables_by_name(
            'Conv2D_1')[0]  #return은 tensor
        print(convolution_layer2)
        convolution_layer2_weight = self.model.get_weights(
            convolution_layer2)  #return은 numpy임
        print(convolution_layer2_weight)
        np.savetxt(
            'C:\\Users\\kingjy79\\Documents\\rlawhdduq1\\pn_emotion_copy3\\emotion\\data\\convolution_layer2_weight.txt',
            X=convolution_layer2_weight.flatten(),
            fmt='%.6f')
        #convloution_layer1를 numpy로 변환
        convolution_layer2_weight_file0 = open(
            'C:\\Users\\kingjy79\\Documents\\rlawhdduq1\\pn_emotion_copy3\\emotion\\data\\convolution_layer2_weight_modify.txt',
            'r')
        convolution_layer2_weight_modify = []
        for i in range(102400):
            line = float(convolution_layer2_weight_file0.readline())
            convolution_layer2_weight_modify.append(line)
        convolution_layer2_weight_file0.close()
        convolution_layer2_weight_modify = np.asarray(
            convolution_layer2_weight_modify)
        print('non reshape')
        print(convolution_layer2_weight_modify)
        '''
        convolution_layer2_weight_modify_max = 0
        convolution_layer2_weight_modify_min = 0
        for i in range(102400):
            if(convolution_layer2_weight_modify_max < convolution_layer2_weight_modify[i]):
                convolution_layer2_weight_modify_max = convolution_layer2_weight_modify[i]
            if(convolution_layer2_weight_modify_min > convolution_layer2_weight_modify[i]):
                convolution_layer2_weight_modify_min = convolution_layer2_weight_modify[i]
        RESOLUTION = 65535
        RESOLUTION_WIDTH = (convolution_layer2_weight_modify_max - convolution_layer2_weight_modify_min)/RESOLUTION
        for i in range(102400):
            if(convolution_layer2_weight_modify[i] >= convolution_layer2_weight_modify_min and \
            convolution_layer2_weight_modify[i] < convolution_layer2_weight_modify_min + RESOLUTION):
                convolution_layer2_weight_modify[i]=(2*convolution_layer2_weight_modify_min +RESOLUTION_WIDTH)/2
        '''
        convolution_layer2_weight_modify = np.reshape(
            convolution_layer2_weight_modify, [5, 5, 64, 64])
        print('reshape')
        print(convolution_layer2_weight_modify)

        #convolution_layer1 = np.asarray(convolution_layer1)
        #convolution_layer2 = tf.convert_to_tensor(convolution_layer1)
        #self.model.set_weights(convolution_layer1, convolution_layer1*10) # 이 위치의 두번째 인자에 원하고자하는 weight(tensor형식) 값을 넣으면 된다.
        #self.model.set_weights(convolution_layer1, convolution_layer1 + tf.ones([5, 5, 1, 64]))
        #convolution_layer1_weight = self.model.get_weights(convolution_layer1)
        #print(convolution_layer1_weight)

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print('[+] Dataset found and loaded')

    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()
        # Training
        print('[+] Training network')
        self.model.fit(self.dataset.images,
                       self.dataset.labels,
                       validation_set=(self.dataset.images_test,
                                       self.dataset.labels_test),
                       n_epoch=100,
                       batch_size=50,
                       shuffle=True,
                       show_metric=True,
                       snapshot_step=200,
                       snapshot_epoch=True,
                       run_id='emotion_recognition')

    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)

    def save_model(self):
        self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
        print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)

    def load_model(self):
        if isfile(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME)):
            self.model.load(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
            print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)
Пример #4
0
class EmotionRecognition:
    def __init__(self):
        self.dataset = DatasetLoader()

    def build_network(self):
        padding = 'SAME'
        print(' ')
        print('----------------- Building CNN -----------------')
        print(' ')
        self.network = tflearn.input_data(
            shape=[None, SIZE_FACE, SIZE_FACE, 1])

        conv_1 = tflearn.relu(
            conv_2d(self.network,
                    96,
                    3,
                    strides=1,
                    bias=True,
                    padding=padding,
                    activation=None,
                    name='Conv_1'))
        maxpool_1 = tflearn.max_pool_2d(conv_1,
                                        3,
                                        strides=2,
                                        padding=padding,
                                        name='MaxPool_1')
        maxpool_1 = tflearn.batch_normalization(maxpool_1)

        conv_2 = tflearn.relu(
            conv_2d(maxpool_1,
                    108,
                    2,
                    strides=1,
                    padding=padding,
                    name='Conv_2'))
        maxpool_2 = tflearn.max_pool_2d(conv_2,
                                        2,
                                        strides=1,
                                        padding=padding,
                                        name='MaxPool_2')
        maxpool_2 = tflearn.batch_normalization(maxpool_2)

        conv_3 = tflearn.relu(
            conv_2d(maxpool_2,
                    208,
                    2,
                    strides=1,
                    padding=padding,
                    name='Conv_3'))
        conv_4 = tflearn.relu(
            conv_2d(conv_3, 64, 2, strides=1, padding=padding, name='Conv_4'))
        maxpool_3 = tflearn.max_pool_2d(conv_4,
                                        2,
                                        strides=1,
                                        padding=padding,
                                        name='MaxPool_3')
        maxpool_3 = tflearn.batch_normalization(maxpool_3)

        net = tflearn.flatten(maxpool_3, name='Net')
        net = tflearn.dropout(net, 0.1)

        final_1 = tflearn.fully_connected(net, 512, activation='relu')
        final_1 = tflearn.dropout(final_1, 0.5)

        final_2 = tflearn.fully_connected(final_1, 256, activation='relu')
        final_2 = tflearn.dropout(final_2, 0.5)

        Loss = tflearn.fully_connected(final_2,
                                       7,
                                       activation='softmax',
                                       name='Total_loss')

        self.network = tflearn.regression(Loss,
                                          optimizer='Adam',
                                          loss='categorical_crossentropy',
                                          learning_rate=0.0001)
        self.model = tflearn.DNN(self.network,
                                 tensorboard_verbose=0,
                                 tensorboard_dir=os.getcwd() + '/checkpoint',
                                 checkpoint_path='./data/' +
                                 '/emotion_recognition',
                                 max_checkpoints=None)
        #self.model = tflearn.DNN(self.network)
        self.load_model()

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print(' ')
        print('----------------- Dataset found and loaded -----------------')
        print(' ')

    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()
        # Training
        print(' ')
        print('----------------- Training network -----------------')
        print(' ')
        #self.model.fit(self.dataset.images, self.dataset.labels, validation_set = (self.dataset.images_test, self.dataset._labels_test),
        #	n_epoch = 3,batch_size = 100,shuffle = True,show_metric = True,snapshot_epoch = True,run_id = 'emotion_recognition')
        print('hello world')
        self.model.fit(self.dataset.images,
                       self.dataset.labels,
                       n_epoch=140,
                       validation_set=(self.dataset.images_test,
                                       self.dataset._labels_test),
                       show_metric=True,
                       batch_size=100,
                       run_id='emotion_recognition')

        self.model.predict(self.dataset.images_test)
        predictions = self.model.predict(self.dataset.images_test)
        matrix = confusion_matrix(self.dataset._labels_test.argmax(axis=1),
                                  predictions.argmax(axis=1))
        print(matrix)
        print(
            classification_report(self.dataset._labels_test.argmax(axis=1),
                                  predictions.argmax(axis=1),
                                  target_names=EMOTIONS))

#evali=self.model.evaluate(self.dataset.images_test, self.dataset._labels_test)
#print("Accuracy of the model is :", evali)
#lables = model.predict_label(self.dataset.images_test)
#print("The predicted labels are :",lables[f])
#prediction = model.predict(testImages)
#print("The predicted probabilities are :", prediction[f])

    def load_model(self):
        if isfile("CNN_Trained_model.meta"):
            self.model.load("CNN_Trained_model")

            print(' ')
            print('----------------- Model loaded -----------------')
            print(' ')
        else:
            print(' ')
            print('----------------- Can not load the model -----------------')
            print(' ')

    def save_model(self):
        self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
        print(' ')
        print(
            '------------------ Model trained and saved ----------------------'
        )
        print(' ')

    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)
Пример #5
0
class EmotionRecognition:

  def __init__(self):
    self.dataset = DatasetLoader()

  def build_network(self):
    # Smaller 'Googlenet'
    # https://github.com/tflearn/tflearn/blob/master/examples/images/googlenet.py
    print('[+] Building Inception V3')
    print ('[-] COLOR: ' + str(COLOR))
    print('[-] BATH_SIZE' + str(BATH_SIZE_CONSTANT))
    print('[-] EXPERIMENTAL_LABEL' + EXPERIMENTO_LABEL)

    self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, COLOR])

    self.conv1_7_7 = conv_2d(self.network, 64, 7, strides=2, activation='relu', name='conv1_7_7_s2')
    self.pool1_3_3 = max_pool_2d(self.conv1_7_7, 3, strides=2)
    self.pool1_3_3 = local_response_normalization(self.pool1_3_3)
    self.conv2_3_3_reduce = conv_2d(self.pool1_3_3, 64, 1, activation='relu', name='conv2_3_3_reduce')
    self.conv2_3_3 = conv_2d(self.conv2_3_3_reduce, 192, 3, activation='relu', name='conv2_3_3')
    self.conv2_3_3 = local_response_normalization(self.conv2_3_3)
    self.pool2_3_3 = max_pool_2d(self.conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')

    # 3a
    self.inception_3a_1_1 = conv_2d(self.pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1')
    self.inception_3a_3_3_reduce = conv_2d(self.pool2_3_3, 96, 1, activation='relu', name='inception_3a_3_3_reduce')
    self.inception_3a_3_3 = conv_2d(self.inception_3a_3_3_reduce, 128, filter_size=3,  activation='relu', name='inception_3a_3_3')
    self.inception_3a_5_5_reduce = conv_2d(self.pool2_3_3, 16, filter_size=1, activation='relu', name='inception_3a_5_5_reduce')
    self.inception_3a_5_5 = conv_2d(self.inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name='inception_3a_5_5')
    self.inception_3a_pool = max_pool_2d(self.pool2_3_3, kernel_size=3, strides=1, name='inception_3a_pool')
    self.inception_3a_pool_1_1 = conv_2d(self.inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1')
    self.inception_3a_output = merge([self.inception_3a_1_1, self.inception_3a_3_3, self.inception_3a_5_5, self.inception_3a_pool_1_1], mode='concat', axis=3)

    # 3b
    self.inception_3b_1_1 = conv_2d(self.inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_1_1')
    self.inception_3b_3_3_reduce = conv_2d(self.inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce')
    self.inception_3b_3_3 = conv_2d(self.inception_3b_3_3_reduce, 192, filter_size=3, activation='relu', name='inception_3b_3_3')
    self.inception_3b_5_5_reduce = conv_2d(self.inception_3a_output, 32, filter_size=1, activation='relu', name='inception_3b_5_5_reduce')
    self.inception_3b_5_5 = conv_2d(self.inception_3b_5_5_reduce, 96, filter_size=5,  name='inception_3b_5_5')
    self.inception_3b_pool = max_pool_2d(self.inception_3a_output, kernel_size=3, strides=1,  name='inception_3b_pool')
    self.inception_3b_pool_1_1 = conv_2d(self.inception_3b_pool, 64, filter_size=1, activation='relu', name='inception_3b_pool_1_1')
    self.inception_3b_output = merge([self.inception_3b_1_1, self.inception_3b_3_3, self.inception_3b_5_5, self.inception_3b_pool_1_1], mode='concat', axis=3, name='inception_3b_output')
    self.pool3_3_3 = max_pool_2d(self.inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3')

    # 4a
    self.inception_4a_1_1 = conv_2d(self.pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1')
    self.inception_4a_3_3_reduce = conv_2d(self.pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce')
    self.inception_4a_3_3 = conv_2d(self.inception_4a_3_3_reduce, 208, filter_size=3,  activation='relu', name='inception_4a_3_3')
    self.inception_4a_5_5_reduce = conv_2d(self.pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce')
    self.inception_4a_5_5 = conv_2d(self.inception_4a_5_5_reduce, 48, filter_size=5,  activation='relu', name='inception_4a_5_5')
    self.inception_4a_pool = max_pool_2d(self.pool3_3_3, kernel_size=3, strides=1,  name='inception_4a_pool')
    self.inception_4a_pool_1_1 = conv_2d(self.inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1')
    self.inception_4a_output = merge([self.inception_4a_1_1, self.inception_4a_3_3, self.inception_4a_5_5, self.inception_4a_pool_1_1], mode='concat', axis=3, name='inception_4a_output')

    # 4b
    self.inception_4b_1_1 = conv_2d(self.inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1')
    self.inception_4b_3_3_reduce = conv_2d(self.inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce')
    self.inception_4b_3_3 = conv_2d(self.inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3')
    self.inception_4b_5_5_reduce = conv_2d(self.inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce')
    self.inception_4b_5_5 = conv_2d(self.inception_4b_5_5_reduce, 64, filter_size=5,  activation='relu', name='inception_4b_5_5')
    self.inception_4b_pool = max_pool_2d(self.inception_4a_output, kernel_size=3, strides=1,  name='inception_4b_pool')
    self.inception_4b_pool_1_1 = conv_2d(self.inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1')
    self.inception_4b_output = merge([self.inception_4b_1_1, self.inception_4b_3_3, self.inception_4b_5_5, self.inception_4b_pool_1_1], mode='concat', axis=3, name='inception_4b_output')

    # 4c
    self.inception_4c_1_1 = conv_2d(self.inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_1_1')
    self.inception_4c_3_3_reduce = conv_2d(self.inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce')
    self.inception_4c_3_3 = conv_2d(self.inception_4c_3_3_reduce, 256,  filter_size=3, activation='relu', name='inception_4c_3_3')
    self.inception_4c_5_5_reduce = conv_2d(self.inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce')
    self.inception_4c_5_5 = conv_2d(self.inception_4c_5_5_reduce, 64,  filter_size=5, activation='relu', name='inception_4c_5_5')
    self.inception_4c_pool = max_pool_2d(self.inception_4b_output, kernel_size=3, strides=1)
    self.inception_4c_pool_1_1 = conv_2d(self.inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1')
    self.inception_4c_output = merge([self.inception_4c_1_1, self.inception_4c_3_3, self.inception_4c_5_5, self.inception_4c_pool_1_1], mode='concat', axis=3, name='inception_4c_output')

    # 4d
    self.inception_4d_1_1 = conv_2d(self.inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1')
    self.inception_4d_3_3_reduce = conv_2d(self.inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce')
    self.inception_4d_3_3 = conv_2d(self.inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3')
    self.inception_4d_5_5_reduce = conv_2d(self.inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce')
    self.inception_4d_5_5 = conv_2d(self.inception_4d_5_5_reduce, 64, filter_size=5,  activation='relu', name='inception_4d_5_5')
    self.inception_4d_pool = max_pool_2d(self.inception_4c_output, kernel_size=3, strides=1,  name='inception_4d_pool')
    self.inception_4d_pool_1_1 = conv_2d(self.inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1')
    self.inception_4d_output = merge([self.inception_4d_1_1, self.inception_4d_3_3, self.inception_4d_5_5, self.inception_4d_pool_1_1], mode='concat', axis=3, name='inception_4d_output')

    # 4e
    self.inception_4e_1_1 = conv_2d(self.inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1')
    self.inception_4e_3_3_reduce = conv_2d(self.inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce')
    self.inception_4e_3_3 = conv_2d(self.inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3')
    self.inception_4e_5_5_reduce = conv_2d(self.inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce')
    self.inception_4e_5_5 = conv_2d(self.inception_4e_5_5_reduce, 128,  filter_size=5, activation='relu', name='inception_4e_5_5')
    self.inception_4e_pool = max_pool_2d(self.inception_4d_output, kernel_size=3, strides=1,  name='inception_4e_pool')
    self.inception_4e_pool_1_1 = conv_2d(self.inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1')
    self.inception_4e_output = merge([self.inception_4e_1_1, self.inception_4e_3_3, self.inception_4e_5_5, self.inception_4e_pool_1_1], axis=3, mode='concat')
    self.pool4_3_3 = max_pool_2d(self.inception_4e_output, kernel_size=3, strides=2, name='pool_3_3')

    # 5a
    self.inception_5a_1_1 = conv_2d(self.pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1')
    self.inception_5a_3_3_reduce = conv_2d(self.pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce')
    self.inception_5a_3_3 = conv_2d(self.inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3')
    self.inception_5a_5_5_reduce = conv_2d(self.pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce')
    self.inception_5a_5_5 = conv_2d(self.inception_5a_5_5_reduce, 128, filter_size=5,  activation='relu', name='inception_5a_5_5')
    self.inception_5a_pool = max_pool_2d(self.pool4_3_3, kernel_size=3, strides=1,  name='inception_5a_pool')
    self.inception_5a_pool_1_1 = conv_2d(self.inception_5a_pool, 128, filter_size=1, activation='relu', name='inception_5a_pool_1_1')
    self.inception_5a_output = merge([self.inception_5a_1_1, self.inception_5a_3_3, self.inception_5a_5_5, self.inception_5a_pool_1_1], axis=3, mode='concat')

    # 5b
    self.inception_5b_1_1 = conv_2d(self.inception_5a_output, 384, filter_size=1, activation='relu', name='inception_5b_1_1')
    self.inception_5b_3_3_reduce = conv_2d(self.inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce')
    self.inception_5b_3_3 = conv_2d(self.inception_5b_3_3_reduce, 384,  filter_size=3, activation='relu', name='inception_5b_3_3')
    self.inception_5b_5_5_reduce = conv_2d(self.inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce')
    self.inception_5b_5_5 = conv_2d(self.inception_5b_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5b_5_5')
    self.inception_5b_pool = max_pool_2d(self.inception_5a_output, kernel_size=3, strides=1,  name='inception_5b_pool')
    self.inception_5b_pool_1_1 = conv_2d(self.inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1')
    self.inception_5b_output = merge([self.inception_5b_1_1, self.inception_5b_3_3, self.inception_5b_5_5, self.inception_5b_pool_1_1], axis=3, mode='concat')
    self.pool5_7_7 = avg_pool_2d(self.inception_5b_output, kernel_size=7, strides=1)
    self.pool5_7_7 = dropout(self.pool5_7_7, 0.4)

    # fc
    self.loss = fully_connected(self.pool5_7_7, len(EMOTIONS), activation='softmax')
    self.network = regression(self.loss, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    self.model = tflearn.DNN(
      self.network,
      checkpoint_path = CHECKPOINT_DIR,
      max_checkpoints = 1,
      tensorboard_dir = TENSORBOARD_DIR,
      #best_checkpoint_path = CHECKPOINT_DIR_BEST,
      tensorboard_verbose = 1
    )
    #self.load_model()

  def load_saved_dataset(self):
    self.dataset.load_from_save()
    print('[+] Dataset found and loaded')

  def start_training(self):
    self.load_saved_dataset()
    self.build_network()
    if self.dataset is None:
      self.load_saved_dataset()
    # Training
    print('[+] Training network')

    print ("[+] Size train: " + str(len(self.dataset.images)))
    print ("[+] Size train-label: " + str(len(self.dataset.labels)))
    print ("[+] Size test: " + str(len(self.dataset.images_test)))
    print ("[+] Size test-label: " + str(len(self.dataset.labels_test)))

    self.model.fit(
      self.dataset.images, self.dataset.labels,
      #validation_set = 0.33,
      validation_set = (self.dataset.images_test, self.dataset._labels_test),
      n_epoch = 500,
      batch_size = BATH_SIZE_CONSTANT,
      shuffle = True,
      show_metric = True,
      snapshot_step = 200,
      snapshot_epoch = True,
      run_id = EXPERIMENTO_LABEL
    )

  def predict(self, image):
    if image is None:
      return None
    image = image.reshape([-1, SIZE_FACE, SIZE_FACE, COLOR])
    return self.model.predict(image)

  def save_model(self):
    self.model.save(MODEL_LABEL)
    print('[+] Model trained and saved at ' + MODEL_LABEL )

  def load_model(self):
    self.model.load(MODEL_LABEL)
    print('[+] Model loaded from ' + MODEL_LABEL)
class EmotionClassifier:
    def __init__(self):
        # MANDATORY FOR JETSON
        self.prevent_gpu_sync_failed()

        self.dataset = DatasetLoader()
        self.input_shape = [SIZE_FACE, SIZE_FACE, 1]

    def build_model(self):
        print('[+] Building CNN')

        self.model = Sequential([
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   input_shape=self.input_shape,
                   activation='relu'),
            MaxPool2D(pool_size=(3, 3), strides=2),
            Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
            MaxPool2D(pool_size=(3, 3), strides=2),
            Conv2D(filters=128, kernel_size=(3, 3), activation='relu'),
            Flatten(),
            Dropout(0.5),
            Dense(units=3072, activation='relu'),
            Dense(units=len(EMOTIONS), activation='softmax')
        ])

        self.model.compile(loss='categorical_crossentropy',
                           optimizer='adam',
                           metrics=['categorical_accuracy'])

        self.model.summary()

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print('[+] Dataset found and loaded')

    def start_training(self):
        self.load_saved_dataset()
        self.build_model()
        if self.dataset is None:
            self.load_saved_dataset()

        datagen = ImageDataGenerator(featurewise_center=True,
                                     featurewise_std_normalization=True,
                                     rotation_range=20,
                                     width_shift_range=0.2,
                                     height_shift_range=0.2,
                                     horizontal_flip=True)

        datagen.fit(x=self.dataset.images)

        # Training
        print('[+] Training model')

        #
        checkpointer = ModelCheckpoint(filepath=join(SAVE_DIRECTORY,
                                                     MODEL_FILENAME),
                                       verbose=1,
                                       save_best_only=True)

        history = self.model.fit_generator(
            generator=datagen.flow(x=self.dataset.images,
                                   y=self.dataset.labels,
                                   batch_size=64),
            steps_per_epoch=2 * (len(self.dataset.images) / 64),
            epochs=50,
            validation_data=(self.dataset.images_test,
                             self.dataset.labels_test),
            callbacks=[checkpointer])

    def predict(self, image):
        if image is None:
            return None
        # TODO maybe expect that specific shape
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)

    def load_model(self, model_name=MODEL_FILENAME):
        if isfile(join(SAVE_DIRECTORY, model_name)):
            self.model = load_model(join(SAVE_DIRECTORY, model_name))
            print('[+] Model loaded from ' + model_name)
        else:
            raise FileNotFoundError(join(SAVE_DIRECTORY, model_name))

    def prevent_gpu_sync_failed(self):
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        K.set_session(tf.Session(config=config))
Пример #7
0
import scipy.misc
from dataset_loader import DatasetLoader
import numpy as np
dataset = DatasetLoader()
dataset.load_from_save()
counter = [0, 0, 0, 0, 0, 0]

#Converts npy to folder based structure for Keras ImageDataGenerator

for i, img in enumerate(dataset.images_test):
    x = dataset.labels_test[i]
    print(x)
    scipy.misc.toimage(scipy.misc.imresize(np.reshape(img, (128, 128)),
                                           (224, 224)),
                       cmin=0.0).save('./new_data/imgs_test_new/' + str(x) +
                                      '/image' + str("%05d" % counter[x]) +
                                      '.jpg')
    counter[x] = counter[x] + 1

counter = [0, 0, 0, 0, 0, 0]
counter_val = [0, 0, 0, 0, 0, 0]
for i, img in enumerate(dataset.images):
    if (i % 10 != 0):
        x = dataset.labels[i]
        scipy.misc.toimage(
            scipy.misc.imresize(np.reshape(img, (128, 128)), (224, 224)),
            cmin=0.0).save('./new_data/imgs_train_new/' + str(x) + '/image' +
                           str("%05d" % counter[x]) + '.jpg')
        counter[x] = counter[x] + 1
    else:
        x = dataset.labels[i]
Пример #8
0
class EmotionRecognition:
    def __init__(self):
        self.dataset = DatasetLoader()

    def build_network(self):

        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1])
        self.network = conv_2d(self.network, 64, 5, activation='relu')
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network, 64, 5, activation='relu')
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network, 128, 4, activation='relu')
        self.network = dropout(self.network, 0.3)
        self.network = fully_connected(self.network, 3072, activation='relu')
        self.network = fully_connected(self.network,
                                       len(EMOTIONS),
                                       activation='softmax')
        self.network = regression(self.network,
                                  optimizer='momentum',
                                  loss='categorical_crossentropy')

        self.model = tflearn.DNN(self.network,
                                 checkpoint_path='./data/',
                                 max_checkpoints=1,
                                 tensorboard_verbose=2)
        self.load_model()

    def load_saved_dataset(self):
        self.dataset.load_from_save()

    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()
        self.model.fit(self.dataset.images,
                       self.dataset.labels,
                       validation_set=(self.dataset.images_test,
                                       self.dataset.labels_test),
                       n_epoch=100,
                       batch_size=50,
                       shuffle=True,
                       show_metric=True,
                       snapshot_step=200,
                       snapshot_epoch=True,
                       run_id='emotion_recognition')

    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)

    def save_model(self):
        self.model.save(join('./data/', SAVE_MODEL_FILENAME))
        print('Info: Model saved: ' + SAVE_MODEL_FILENAME)

    def load_model(self):
        # if isfile(join('./data/', SAVE_MODEL_FILENAME)):
        self.model.load(join('./data/', SAVE_MODEL_FILENAME))
        print('Info: Model loaded from: ' + SAVE_MODEL_FILENAME)
class EmotionRecognition:

    def __init__(self):
        self.dataset = DatasetLoader()

    def build_network(self):
        # Smaller 'AlexNet'
        # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
        print('[+] Building CNN')
        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1])
        self.network = conv_2d(self.network, 64, 5, activation='relu')
        #self.network = local_response_normalization(self.network)
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network, 64, 5, activation='relu')
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network, 128, 4, activation='relu')
        self.network = dropout(self.network, 0.3)
        self.network = fully_connected(self.network, 3072, activation='relu')
        self.network = fully_connected(
            self.network, len(EMOTIONS), activation='softmax')
        self.network = regression(
            self.network,
            optimizer='momentum',
            loss='categorical_crossentropy'
        )
        self.model = tflearn.DNN(
            self.network,
            checkpoint_path=SAVE_DIRECTORY,
            max_checkpoints=1,
            tensorboard_verbose=2
        )
        self.load_model()

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print('[+] Dataset found and loaded')

    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()
        # Training
        print('[+] Training network')
        self.model.fit(
            self.dataset.images, self.dataset.labels,
            validation_set=(self.dataset.images_test,
                            self.dataset.labels_test),
            n_epoch=100,
            batch_size=50,
            shuffle=True,
            show_metric=True,
            snapshot_step=200,
            snapshot_epoch=True,
            run_id='emotion_recognition'
        )

    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)

    def save_model(self):
        self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
        print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)

    def load_model(self):
        if not isfile(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME)):
            self.model.load(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
            print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)

    def freeze_model(self):
        model_dir = join(SAVE_DIRECTORY, "model")
        input_graph_name = "input_graph.pb"
        tf.train.write_graph(self.model.session.graph, model_dir, input_graph_name)

        prediction_graph = tf.Graph()
        with prediction_graph.as_default():
            freeze_graph.freeze_graph(input_graph=os.path.join(model_dir, input_graph_name),
                                      input_saver="",
                                      input_binary=False,
                                      input_checkpoint=join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME),
                                      output_node_names="FullyConnected_1/Softmax",
                                      restore_op_name="",
                                      filename_tensor_name="",
                                      output_graph=join(SAVE_DIRECTORY, FREEZE_MODEL_FILENAME),
                                      clear_devices=True,
                                      initializer_nodes=None,
                                      variable_names_blacklist="")
Пример #10
0
class EmotionRecognition:
    def __init__(self):
        self.dataset = DatasetLoader()
        #self.augmentor = DataAugmentation()
    def build_network(self):
        # Smaller 'AlexNet'
        # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
        print('[+] Building CNN')
        #tf.reset_default_graph()
        #tf.set_random_seed(343)
        np.random.seed(343)
        tf.logging.set_verbosity(tf.logging.INFO)

        #tflearn.init_graph(num_cores=4,gpu_memory_fraction=0.5)
        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1])
        self.network = conv_2d(self.network, 64, 5, activation='relu')
        self.network = batch_normalization(self.network)
        #self.network = local_response_normalization(self.network)
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network, 64, 5, activation='relu')
        self.network = batch_normalization(self.network)
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network, 64, 4, activation='relu')
        self.network = batch_normalization(self.network)
        self.network = dropout(self.network, 0.3)
        #self.network = fully_connected(self.network, 3072, activation = 'relu')
        self.network = fully_connected(self.network, 128, activation='relu')
        #self.network = fully_connected(self.network, len(EMOTIONS), activation = 'softmax')
        self.network = fully_connected(self.network, 6, activation='softmax')
        self.network = regression(self.network,
                                  optimizer='momentum',
                                  loss='categorical_crossentropy')
        #with tf.device('/device:GPU:0'):
        tflearn.config.init_graph(log_device=True, soft_placement=True)
        with tf.device('/device:GPU:0'):
            self.model = tflearn.DNN(self.network,
                                     checkpoint_path=SAVE_DIRECTORY +
                                     '/emotion_recognition',
                                     max_checkpoints=1,
                                     tensorboard_verbose=2)
            #self.load_model()

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print('[+] Dataset found and loaded')

    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()
        # Training
        print('[+] Training network')
        #with tf.device('/device:GPU:0'):
        # These can be any tensors of matching type and dimensions.
        #images, labels = augment(self.dataset.images, self.dataset.labels,
        #                         horizontal_flip=True, rotate=15, crop_probability=0.8, mixup=4)
        print('train images, test images, train labels, test labels',
              self.dataset.images.shape, self.dataset.images_test.shape,
              self.dataset.labels.shape, self.dataset.labels_test.shape)
        with tf.device('/device:GPU:0'):
            self.model.fit(self.dataset.images,
                           self.dataset.labels,
                           validation_set=(self.dataset.images_test,
                                           self.dataset.labels_test),
                           n_epoch=50,
                           batch_size=32,
                           shuffle=True,
                           show_metric=True,
                           snapshot_step=200,
                           snapshot_epoch=True,
                           run_id='emotion_recognition')

    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)

    def save_model(self):
        self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
        print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)

    def load_model(self):
        if isfile(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME)):
            self.model.load(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
            print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)
Пример #11
0
class EmotionRecognition:
    def __init__(self):
        self.dataset = DatasetLoader()

    def build_network(self):
        # Smaller 'AlexNet'
        # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py

        print('[+] Building ALEXNET')
        print('[-] COLOR: ' + str(COLOR))
        print('[-] BATH_SIZE: ' + str(BATH_SIZE_CONSTANT))
        print('[-] EXPERIMENTAL_LABEL: ' + EXPERIMENTO_LABEL)

        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, COLOR])

        self.network = conv_2d(self.network,
                               96,
                               11,
                               strides=4,
                               activation='relu')
        self.network = max_pool_2d(self.network, 3, strides=2)

        self.network = local_response_normalization(self.network)
        self.network = conv_2d(self.network, 256, 5, activation='relu')
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = local_response_normalization(self.network)
        self.network = conv_2d(self.network, 384, 3, activation='relu')
        self.network = conv_2d(self.network, 384, 3, activation='relu')
        self.network = conv_2d(self.network, 256, 3, activation='relu')
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = local_response_normalization(self.network)
        self.network = fully_connected(self.network, 4096, activation='tanh')
        self.network = dropout(self.network, 0.5)
        self.network = fully_connected(self.network, 4096, activation='tanh')
        self.network = dropout(self.network, 0.5)
        self.network = fully_connected(self.network,
                                       len(EMOTIONS),
                                       activation='softmax')
        self.network = regression(self.network,
                                  optimizer='momentum',
                                  loss='categorical_crossentropy',
                                  learning_rate=0.001)

        self.model = tflearn.DNN(self.network,
                                 checkpoint_path=CHECKPOINT_DIR,
                                 max_checkpoints=1,
                                 tensorboard_dir=TENSORBOARD_DIR,
                                 best_checkpoint_path=CHECKPOINT_DIR_BEST,
                                 tensorboard_verbose=1)
        self.load_model()

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print('[+] Dataset found and loaded')

    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()

        # Training
        print('[+] Training network')

        print("[+] Size train: " + str(len(self.dataset.images)))
        print("[+] Size train-label: " + str(len(self.dataset.labels)))
        print("[+] Size test: " + str(len(self.dataset.images_test)))
        print("[+] Size test-label: " + str(len(self.dataset.labels_test)))

        self.model.fit(
            self.dataset.images,
            self.dataset.labels,
            #validation_set = 0.33,
            validation_set=(self.dataset.images_test,
                            self.dataset._labels_test),
            n_epoch=100,
            batch_size=BATH_SIZE_CONSTANT,
            shuffle=True,
            show_metric=True,
            snapshot_step=200,
            snapshot_epoch=True,
            run_id=EXPERIMENTO_LABEL)

    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, COLOR])
        return self.model.predict(image)

    def save_model(self):
        self.model.save(MODEL_LABEL)
        print('[+] Model trained and saved at ' + MODEL_LABEL)

    def load_model(self):
        self.model.load(MODEL_LOAD)
        print('[+] Model loaded from ' + MODEL_LOAD)
Пример #12
0
class EmotionRecognition:
    def __init__(self):
        self.dataset = DatasetLoader()

    def build_network(self):
        print('[+] Building CNN')

        self.model = Sequential()
        self.model.add(
            Convolution2D(32,
                          3,
                          3,
                          border_mode='same',
                          activation='relu',
                          input_shape=(48, 48, 1)))
        self.model.add(
            Convolution2D(32, 3, 3, border_mode='same', activation='relu'))
        self.model.add(
            Convolution2D(32, 3, 3, border_mode='same', activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(
            Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
        self.model.add(
            Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
        self.model.add(
            Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(
            Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
        self.model.add(
            Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
        self.model.add(
            Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Flatten(
        ))  # this converts our 3D feature maps to 1D feature vectors
        self.model.add(Dense(64, activation='relu'))
        self.model.add(Dense(64, activation='relu'))
        self.model.add(Dense(7, activation='softmax'))
        # optimizer:
        self.model.compile(loss='categorical_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
        print 'Training....'

        self.load_model()

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print('[+] Dataset found and loaded')

    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()
        # Training
        print('[+] Training network')
        self.model.fit(self.dataset.images,
                       self.dataset.labels,
                       validation_data=(self.dataset.images_test,
                                        self.dataset._labels_test),
                       epochs=3,
                       batch_size=50,
                       shuffle=True,
                       verbose=1)

    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)

    def save_model(self):
        self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
        print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)

    def load_model(self):
        if isfile(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME)):
            self.model = load_model(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
            print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)
Пример #13
0
# -*- coding: utf-8 -*-

import matplotlib.pyplot as plt
import numpy as np

from constants import *
from emotion_recognition import EmotionRecognition
from dataset_loader import DatasetLoader

# Load Model
network = EmotionRecognition()
network.build_network(loadModel=True)

data = DatasetLoader()
data.load_from_save(data_source='fer2013')

images = data.images_test
labels = data.labels_test

print('[+] Loading Data')
data = np.zeros((len(EMOTIONS), len(EMOTIONS)))

for i in range(images.shape[0]):
    if i % 1000 == 0:
        print("Progress: {}/{} {:.2f}%".format(i, images.shape[0],
                                               i * 100.0 / images.shape[0]))
    result = network.predict(images[i])
    data[np.argmax(labels[i]), np.argmax(result[0])] += 1

print("Accuracy: %f" % (np.sum(data.diagonal()) / np.sum(data)))
# Take % by column
Пример #14
0
class EmotionRecognition:

    def __init__(self):
        """
            初始化:读取数据
        """
        self.dataset = DatasetLoader()

    def build_network(self, loadModel=False):
        """
            构建模型
        """
        # Smaller 'AlexNet'
        # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
        print('[+] Building CNN')
        img_aug = ImageAugmentation()
        img_aug.add_random_flip_leftright()
        # img_aug.add_random_rotation(max_angle=25.)
        img_aug.add_random_blur(sigma_max=0.3)
        # 输入数据 http://tflearn.org/layers/core/#input-data
        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1], data_augmentation=img_aug)
        # self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1])
        # 卷积层 http://tflearn.org/layers/conv/#convolution-2d
        # 激活函数 http://tflearn.org/activations/
        self.network = conv_2d(self.network, 64, 3, activation='relu')
        # self.gap1 = global_avg_pool(self.network)
        # 池化层 http://tflearn.org/layers/conv/#max-pooling-2d
        self.network = max_pool_2d(self.network, 2, strides=2)
        # 卷积层
        self.network = conv_2d(self.network, 96, 3, activation='relu')
        # self.gap2 = global_avg_pool(self.network)
        # 池化层
        self.network = max_pool_2d(self.network, 2, strides=2)
        # 卷积层
        self.network = conv_2d(self.network, 128, 3, activation='relu')
        self.network = global_avg_pool(self.network)
        # 全连接层 http://tflearn.org/layers/core/#fully-connected
        self.network = fully_connected(self.network, 2048, activation='relu',
            weight_decay=0.001)

        # dropout随机将部分输出改为0,避免过拟合 http://tflearn.org/layers/core/#dropout
        self.network = dropout(self.network, 0.8)
        # 全连接层:softmax分类
        # self.network = merge([self.gap1, self.gap2, self.gap3], mode="concat", name="concat")
        self.network = fully_connected(self.network, len(EMOTIONS), activation='softmax')

        # 定义损失函数和优化器 http://tflearn.org/layers/estimator/#regression
        self.network = regression(self.network,
            # http://tflearn.org/optimizers/
            optimizer='Adam',
            # optimizer='SGD',
            # http://tflearn.org/objectives/
            loss='categorical_crossentropy',
            learning_rate=0.001)
        # 定义模型 http://tflearn.org/models/dnn/#deep-neural-network-model
        self.model = tflearn.DNN(
            self.network,
            checkpoint_path=SAVE_DIRECTORY + '/emotion_recognition',
            tensorboard_dir='c:\\tmp\\tflearn_logs',
            max_checkpoints=1,
            tensorboard_verbose=2
        )
        if loadModel:
            self.load_model()

    def load_saved_dataset(self):
        self.dataset.load_from_save(data_source='fer2013')
        print('[+] Dataset found and loaded')

    def start_training(self):
        """
            训练模型
        """
        self.load_saved_dataset()
        self.build_network(loadModel=True)
        if self.dataset is None:
            self.load_saved_dataset()
        # Training
        print('[+] Training network')
        # 训练模型 http://tflearn.org/models/dnn/#deep-neural-network-model
        self.model.fit(
            self.dataset.images, self.dataset.labels,
            validation_set = (self.dataset.images_test, self.dataset.labels_test),
            n_epoch=100,
            batch_size=256,
            shuffle=True,
            show_metric=True,
            snapshot_step=200,
            snapshot_epoch=True,
            run_id='emotion_recognition'
        )

    def predict(self, image):
        """
            预测
        """
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)

    def save_model(self):
        self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
        print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)

    def load_model(self):
        model_path = join('.\\', SAVE_DIRECTORY, SAVE_MODEL_FILENAME)
        self.model.load(model_path)
Пример #15
0
class EmotionRecognition:

    def __init__(self):
        self.dataset = DatasetLoader()

    def build_network(self):
        # Smaller 'AlexNet'
        # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
        print('[+] Building CNN')
        '''
        # Why 3 hidden layers?
        # 1986: Backpropagation - Usually more than 3 hidden layer is not helpful
        '''

        '''
        [-]input layer
        #This layer is use for inputting data to a network.
        #List of int, to create a new placeholder
        # shape = [batch, height, width, in_channels]
        '''
        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1]) # add data whose shape is [None,48, 48 ,1] into an 'input_data' layer

        '''
        [-]conv_2d
        #arg1 - incoming: [batch, height, width, in_channels]
        #arg2 - nb_filter: The number oft convolution filters
        #arg3 - filter_size( kernel size ) : Size of filters
        #strides - default : 1
        '''
        self.network = conv_2d(self.network, 64, 5, activation='relu') # 1st layer
        #self.network = local_response_normalization(self.network) #
        '''
        [-]max pooling 2D
        
        # arg1 - incoming:
        # arg2 - kernel_size: Pooling kernel size
        # arg3 - strides : stides of conv operation  e.g,(0,0)->(0,2)->(0,4)
        '''
        self.network = max_pool_2d(self.network, 3, strides=2) # pool

        self.network = conv_2d(self.network, 64, 5, activation='relu') # 2nd layer
        self.network = max_pool_2d(self.network, 3, strides=2) # pool

        self.network = conv_2d(self.network, 128, 4, activation='relu') # 3rd layer
        '''
        [-]Dropout
        reference: tflearn.org/layers/core/#dropout
        Introduction:
        #Outputs the input element scaled up by 1/keep_prob. The scaling is so that the expected sum is unchanged
        #By default, each element is kept or dropped independently. If noise_shape is specified, it must be broadcastable to the shape of x, and only dimensions with noise_shape[i] == shape(x)[i] will make
        independent decisions. For example, if shape(x) = [k, l, m, n] and noise_shape = [k, 1, 1, n], each batch and channel component will be kept independently and each row and column will be kept or not kept together
        
        #arg1 - incoming: []
        #arg2 - keep_prob: A float representing the probability that each element is kept
        '''
        self.network = dropout(self.network, 0.3) # final: output layer

        '''
        [-]fully_connected
        return : 2D Tensor[samples, n_units]
        
        arg1 - incoming: 2+D Tensor []
        arg2 - n_units: the # of units for this layer
        '''
        self.network = fully_connected(self.network, 3072, activation='relu') # A fully connected layer
        self.network = fully_connected(
            self.network, len(EMOTIONS), activation='softmax') # A fully connected layer
        '''
        [-]regression
        To apply a regreesion to the provided input.
        # optimizer: Optimizer to use
        # loss: Loss function used by this layer optimizer  
        '''
        self.network = regression(
            self.network,
            optimizer='momentum',
            loss='categorical_crossentropy'
        )# conput loss and optimizer

        '''
        Deep Neural Network Model
        # network: NN to be used
        # checkpoint_path : the path to store model file
        # max_checkpoint: Maximum amount of checkpoints
        # tensorboard_verbose: Summary verbose level, it accepts different levels of tensorboard logs.
        '''
        self.model = tflearn.DNN(
            self.network,
            checkpoint_path=SAVE_DIRECTORY + '/emotion_recognition',
            max_checkpoints=1,
            tensorboard_verbose=2
        ) #model max_checkpoints = 1: save only one model file.
        self.load_model()

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print('[+] Dataset found and loaded')
    '''training method'''
    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()
        # Training
        print('[+] Training network')
        ''' 
        method fit() : train model, feeding X_inputs and Y_targets to the network.
        # arg1: training data ---- X_inputs; 
        # arg2: training label --- Y_targets; 
        # validation_set : represents data used for validation
        # n_epoch = 10: The # of epoch to run
        # shuffle: overrides all network estimators 'shuffle' by True
        # show_metric: Display or not accuracy at every step
        # snapshot_step: to save the model every X steps
        # snapshot_epoch: to save the model at the end of every epoch
        # run_ip: give a name for this run
        '''
        self.model.fit(
            self.dataset.images, self.dataset.labels,
            validation_set=(self.dataset.images_test,
                            self.dataset.labels_test),
            n_epoch=10,
            batch_size=50,
            shuffle=True,
            show_metric=True,
            snapshot_step=1000,
            snapshot_epoch=True,
            run_id='emotion_recognition'
        )
    '''Model prediction for given input data'''
    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image) # return the predicted probabilities
    '''save the model'''
    def save_model(self):
        self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
        print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)
    '''load the model trained'''
    def load_model(self):
        if os.path.exists(SAVE_DIRECTORY):
            print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)
            self.model.load(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
Пример #16
0
class EmotionRecognition:

  def __init__(self):
    self.dataset = DatasetLoader()
    print("aqui1")

  def build_network(self):
    # 32 layers: n=5, 56 layers: n=9, 110 layers: n=18
    n = 5
    #https://github.com/tflearn/tflearn/blob/master/examples/images/residual_network_cifar10.py
    print('[+] Building RESIDUAL NETWORK')
    print ('[-] COLOR: ' + str(COLOR))
    print('[-] BATH_SIZE' + str(BATH_SIZE_CONSTANT))
    print('[-] EXPERIMENTAL_LABEL' + EXPERIMENTO_LABEL)

    self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, COLOR])
    self.network = tflearn.conv_2d(self.network, 16, 3, regularizer='L2', weight_decay=0.0001)
    self.network = tflearn.residual_block(self.network, n, 16)
    self.network = tflearn.residual_block(self.network, 1, 32, downsample=True)
    self.network = tflearn.residual_block(self.network, n-1, 32)
    self.network = tflearn.residual_block(self.network, 1, 64, downsample=True)
    self.network = tflearn.residual_block(self.network, n-1, 64)
    self.network = tflearn.batch_normalization(self.network)
    self.network = tflearn.activation(self.network, 'relu')
    self.network = tflearn.global_avg_pool(self.network)
    # Regression
    self.network = tflearn.fully_connected(self.network, len(EMOTIONS), activation='softmax')
    self.mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    self.network = tflearn.regression(self.network, optimizer=self.mom,
                                      loss='categorical_crossentropy')

    self.model = tflearn.DNN(
      self.network,
      checkpoint_path = CHECKPOINT_DIR,
      max_checkpoints = 1,
      tensorboard_dir = TENSORBOARD_DIR,
      #best_checkpoint_path = CHECKPOINT_DIR_BEST,
      tensorboard_verbose = 1
    )
    self.load_model()

  def load_saved_dataset(self):
    self.dataset.load_from_save()
    print('[+] Dataset found and loaded')

  def start_training(self):
    self.load_saved_dataset()
    self.build_network()
    if self.dataset is None:
      self.load_saved_dataset()
    # Training
    print('[+] Training network')

    print ("[+] Size train: " + str(len(self.dataset.images)))
    print ("[+] Size train-label: " + str(len(self.dataset.labels)))
    print ("[+] Size test: " + str(len(self.dataset.images_test)))
    print ("[+] Size test-label: " + str(len(self.dataset.labels_test)))

    self.model.fit(
      self.dataset.images, self.dataset.labels,
      #validation_set = 0.33,
      validation_set = (self.dataset.images_test, self.dataset._labels_test),
      n_epoch = 100,
      batch_size = BATH_SIZE_CONSTANT,
      shuffle = True,
      show_metric = True,
      snapshot_step = 200,
      snapshot_epoch = True,
      run_id = EXPERIMENTO_LABEL
    )

  def predict(self, image):
    if image is None:
      return None
    image = image.reshape([-1, SIZE_FACE, SIZE_FACE, COLOR])
    return self.model.predict(image)

  def save_model(self):
    self.model.save(MODEL_LABEL)
    print('[+] Model trained and saved at ' + MODEL_LABEL )

  def load_model(self):
    self.model.load('model-full-data/resnet-full-data-33201')
    #self.model.load(MODEL_LABEL)
    print('[+] Model loaded from ' + MODEL_LABEL)
Пример #17
0
class EmotionRecognition:
    def __init__(self):
        self.dataset = DatasetLoader()

    def build_network(self):
        # Smaller 'AlexNet'
        # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
        print('[+] Building CNN')
        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1])
        self.network = conv_2d(self.network, 64, 5, activation='relu')
        #self.network = local_response_normalization(self.network)
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network, 64, 5, activation='relu')
        self.network = max_pool_2d(self.network, 3, strides=2)
        self.network = conv_2d(self.network, 128, 4, activation='relu')
        self.network = dropout(self.network, 0.3)
        self.network = fully_connected(self.network, 3072, activation='relu')
        self.network = fully_connected(self.network,
                                       len(EMOTIONS),
                                       activation='softmax')
        self.network = regression(self.network,
                                  optimizer='momentum',
                                  loss='categorical_crossentropy')
        self.model = tflearn.DNN(self.network,
                                 checkpoint_path=SAVE_DIRECTORY +
                                 '/emotion_recognition',
                                 max_checkpoints=1,
                                 tensorboard_verbose=2)
        self.load_model()

    def load_saved_dataset(self):
        self.dataset.load_from_save()
        print('[+] Dataset found and loaded')

    def start_training(self):
        self.load_saved_dataset()
        self.build_network()
        if self.dataset is None:
            self.load_saved_dataset()
        # Training
        print('[+] Training network')
        self.model.fit(self.dataset.images,
                       self.dataset.labels,
                       validation_set=(self.dataset.images_test,
                                       self.dataset.labels_test),
                       n_epoch=100,
                       batch_size=50,
                       shuffle=True,
                       show_metric=True,
                       snapshot_step=200,
                       snapshot_epoch=True,
                       run_id='emotion_recognition')

    def predict(self, image):
        if image is None:
            return None
        image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
        return self.model.predict(image)

    def save_model(self):
        self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
        print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)

    def load_model(self):
        if isfile(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME)):
            self.model.load(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
            print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)
class MoodRecognition:

  def __init__(self):
    self.dataset = DatasetLoader()

  def build_network(self):
    # Building 'AlexNet'
    # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
    # https://github.com/DT42/squeezenet_demo
    # https://github.com/yhenon/pysqueezenet/blob/master/squeezenet.py
    print('[+] Building CNN')
    self.network = input_data(shape = [None, SIZE_FACE, SIZE_FACE, 1])
    self.network = conv_2d(self.network, 96, 11, strides = 4, activation = 'relu')
    self.network = max_pool_2d(self.network, 3, strides = 2)
    self.network = local_response_normalization(self.network)
    self.network = conv_2d(self.network, 256, 5, activation = 'relu')
    self.network = max_pool_2d(self.network, 3, strides = 2)
    self.network = local_response_normalization(self.network)
    self.network = conv_2d(self.network, 256, 3, activation = 'relu')
    self.network = max_pool_2d(self.network, 3, strides = 2)
    self.network = local_response_normalization(self.network)
    self.network = fully_connected(self.network, 1024, activation = 'tanh')
    self.network = dropout(self.network, 0.5)
    self.network = fully_connected(self.network, 1024, activation = 'tanh')
    self.network = dropout(self.network, 0.5)
    self.network = fully_connected(self.network, len(EMOTIONS), activation = 'softmax')
    self.network = regression(self.network,
      optimizer = 'momentum',
      loss = 'categorical_crossentropy')
    self.model = tflearn.DNN(
      self.network,
      checkpoint_path = SAVE_DIRECTORY + '/alexnet_mood_recognition',
      max_checkpoints = 1,
      tensorboard_verbose = 2
    )
    self.load_model()

  def load_saved_dataset(self):
    self.dataset.load_from_save()
    print('[+] Dataset found and loaded')

  def start_training(self):
    self.load_saved_dataset()
    self.build_network()
    if self.dataset is None:
      self.load_saved_dataset()
    # Training
    print('[+] Training network')
    self.model.fit(
      self.dataset.images, self.dataset.labels,
      validation_set = (self.dataset.images_test, self.dataset.labels_test),
      n_epoch = 100,
      batch_size = 50,
      shuffle = True,
      show_metric = True,
      snapshot_step = 200,
      snapshot_epoch = True,
      run_id = 'alexnet_mood_recognition'
    )

  def predict(self, image):
    if image is None:
      return None
    image = image.reshape([-1, SIZE_FACE, SIZE_FACE, 1])
    return self.model.predict(image)

  def save_model(self):
    self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
    print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)

  def load_model(self):
    if isfile(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME)):
      self.model.load(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
      print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)