예제 #1
0
    def init_tensors(self, test):
        batchsize = LearningConfig.batch_size
        if test:
            batchsize = 1

        pca_utility = PCAUtility()
        eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(DatasetName.ibug, )

        # print("predicted_tensor " + str(predicted_tensor.shape))
        # print("meanvector " + str(meanvector.shape))
        # print("eigenvalues " + str(eigenvalues.shape))
        # print("eigenvectors " + str(eigenvectors.shape))
        # print("-")

        self._meanvector_arr = np.tile(meanvector, (batchsize, 1))
        # meanvector_arr = np.tile(meanvector[None, :, None], (LearningConfig.batch_size, 1, 1))
        # print("meanvector_arr" + str(meanvector_arr.shape))

        self._eigenvalues_arr = np.tile(eigenvalues, (batchsize, 1))
        # eigenvalues_arr = np.tile(eigenvalues[None, :, None], (LearningConfig.batch_size, 1, 1))
        # print("eigenvalues_arr" + str(eigenvalues_arr.shape))

        self._eigenvectors_arr = np.tile(eigenvectors[None, :, :], (batchsize, 1, 1))
        # print("eigenvectors_arr" + str(eigenvectors_arr.shape))

        self._meanvector_tensor = tf.convert_to_tensor(self._meanvector_arr, dtype=tf.float32)
        self._eigenvalues_tensor = tf.convert_to_tensor(self._eigenvalues_arr, dtype=tf.float32)
        self._eigenvectors_tensor = tf.convert_to_tensor(self._eigenvectors_arr, dtype=tf.float32)

        self._eigenvectors_T = tf.transpose(self._eigenvectors_tensor, perm=[0, 2, 1])
        print("")
예제 #2
0
    def test_pca_validity(self, pca_postfix):
        cnn_model = CNNModel()
        pca_utility = PCAUtility()
        tf_record_utility = TFRecordUtility()
        image_utility = ImageUtility()

        eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(
            dataset_name=DatasetName.ibug, pca_postfix=pca_postfix)

        lbl_arr, img_arr, pose_arr = tf_record_utility.retrieve_tf_record(
            tfrecord_filename=IbugConf.tf_train_path,
            number_of_records=30,
            only_label=False)
        for i in range(20):
            b_vector_p = self.calculate_b_vector(lbl_arr[i], True, eigenvalues,
                                                 eigenvectors, meanvector)
            lbl_new = meanvector + np.dot(eigenvectors, b_vector_p)

            labels_true_transformed, landmark_arr_x_t, landmark_arr_y_t = image_utility. \
                create_landmarks_from_normalized(lbl_arr[i], 224, 224, 112, 112)

            labels_true_transformed_pca, landmark_arr_x_pca, landmark_arr_y_pca = image_utility. \
                create_landmarks_from_normalized(lbl_new, 224, 224, 112, 112)

            image_utility.print_image_arr(i, img_arr[i], landmark_arr_x_t,
                                          landmark_arr_y_t)
            image_utility.print_image_arr(i * 1000, img_arr[i],
                                          landmark_arr_x_pca,
                                          landmark_arr_y_pca)
예제 #3
0
    def __ASM(self, input_tensor, pca_postfix):
        print(pca_postfix)
        pca_utility = PCAUtility()
        image_utility = ImageUtility()
        eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(
            DatasetName.ibug, pca_postfix=pca_postfix)

        input_vector_batch = K.eval(input_tensor)
        out_asm_vector = []
        for i in range(LearningConfig.batch_size):
            b_vector_p = self.calculate_b_vector(input_vector_batch[i], True,
                                                 eigenvalues, eigenvectors,
                                                 meanvector)
            # asm_vector = meanvector + np.dot(eigenvectors, b_vector_p)
            #
            # labels_predict_transformed, landmark_arr_x_p, landmark_arr_y_p = \
            #     image_utility.create_landmarks_from_normalized(asm_vector, 224, 224, 112, 112)
            # imgpr.print_image_arr(i + 1, np.zeros(shape=[224,224,3]), landmark_arr_x_p, landmark_arr_y_p)

            out_asm_vector.append(meanvector +
                                  np.dot(eigenvectors, b_vector_p))

        out_asm_vector = np.array(out_asm_vector)

        tensor_out = K.variable(out_asm_vector)
        return tensor_out
예제 #4
0
    def custom_activation_test(self, predicted_tensor):
        pca_utility = PCAUtility()
        eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(DatasetName.ibug)

        b_vector_tensor = self.calculate_b_vector_tensor_test(predicted_tensor, True, eigenvalues,
                                                              self._eigenvectors_tensor, self._meanvector_tensor)

        out = tf.add(tf.expand_dims(self._meanvector_tensor, 2), tf.matmul(self._eigenvectors_tensor, b_vector_tensor))
        out = tf.reshape(out, [1, 136])

        return out
예제 #5
0
    def _calculate_asm(self, input_tensor):
        pca_utility = PCAUtility()
        eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(self.dataset_name, pca_percentages=self.accuracy)

        input_vector = np.array(input_tensor)
        out_asm_vector = []
        batch_size = input_vector.shape[0]
        for i in range(batch_size):
            b_vector_p = self._calculate_b_vector(input_vector[i], eigenvalues, eigenvectors, meanvector)
            out_asm_vector.append(meanvector + np.dot(eigenvectors, b_vector_p))

        out_asm_vector = np.array(out_asm_vector)
        return out_asm_vector
예제 #6
0
    def __customLoss(self, yTrue, yPred):
        pca_utility = PCAUtility()
        eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(
            DatasetName.ibug)

        # yTrue = tf.constant([[1.0, 2.0, 3.0], [5.0, 4.0, 7.0]])
        # yPred = tf.constant([[2.0, 5.0, 6.0], [7.0, 3.0, 8.0]])
        # session = K.get_session()
        bias = 1
        tensor_mean_square_error = K.log(
            (K.mean(K.square(yPred - yTrue), axis=-1) + bias))
        mse = K.eval(tensor_mean_square_error)
        # print("mse:")
        # print(mse)
        # print("---->>>")

        yPred_arr = K.eval(yPred)
        yTrue_arr = K.eval(yTrue)

        loss_array = []

        for i in range(LearningConfig.batch_size):
            asm_loss = 0

            truth_vector = yTrue_arr[i]
            predicted_vector = yPred_arr[i]

            b_vector_p = self.calculate_b_vector(predicted_vector, True,
                                                 eigenvalues, eigenvectors,
                                                 meanvector)
            y_pre_asm = meanvector + np.dot(eigenvectors, b_vector_p)

            for j in range(len(y_pre_asm)):
                asm_loss += (truth_vector[j] - y_pre_asm[j])**2
            asm_loss /= len(y_pre_asm)

            asm_loss += bias + 1
            asm_loss = math.log(asm_loss, 10)
            asm_loss *= LearningConfig.regularization_term
            loss_array.append(asm_loss)
            print('mse[i]' + str(mse[i]))
            print('asm_loss[i]' + str(asm_loss))
            print('============')

        loss_array = np.array(loss_array)

        tensor_asm_loss = K.variable(loss_array)
        tensor_total_loss = tf.reduce_mean(
            [tensor_mean_square_error, tensor_asm_loss], axis=0)

        return tensor_total_loss
예제 #7
0
from configuration import DatasetName, DatasetType, AffectnetConf, IbugConf, W300Conf, InputDataSize, CofwConf, WflwConf
from cnn_model import CNNModel
from pca_utility import PCAUtility
from image_utility import ImageUtility
from student_train import StudentTrainer
from dif_model_train import StudentDiffTrainer
# from old_student_train import StudentTrainer
from test import Test

# from train import Train

# from Train_Gan import TrainGan

if __name__ == '__main__':
    # tf_record_util = TFRecordUtility(136)
    pca_utility = PCAUtility()
    cnn_model = CNNModel()
    image_utility = ImageUtility()

    # tf_record_util.test_hm_accuracy()

    # tf_record_util.create_adv_att_img_hm()
    '''create and save PCA objects'''
    # pca_utility.create_pca_from_npy(DatasetName.ibug, 85)
    # pca_utility.create_pca_from_npy(DatasetName.ibug, 90)
    # pca_utility.create_pca_from_npy(DatasetName.ibug, 95)
    # pca_utility.create_pca_from_npy(DatasetName.ibug, 97)

    # pca_utility.create_pca_from_points(DatasetName.cofw, 90)
    '''generate points with different accuracy'''
    # tf_record_util.normalize_points_and_save(dataset_name=DatasetName.ibug)
예제 #8
0
    def __customLoss_base(self, yTrue, yPred):
        pca_utility = PCAUtility()
        image_utility = ImageUtility()
        tf_record_utility = TFRecordUtility()

        eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(DatasetName.ibug)

        # yTrue = tf.constant([[1.0, 2.0, 3.0], [5.0, 4.0, 7.0]])
        # yPred = tf.constant([[9.0, 1.0, 2.0], [7.0, 3.0, 8.0]])
        # session = K.get_session()

        tensor_mean_square_error = K.mean(K.square(yPred - yTrue), axis=-1)
        # tensor_mean_square_error = keras.losses.mean_squared_error(yPred, yTrue)
        mse = K.eval(tensor_mean_square_error)

        yPred_arr = K.eval(yPred)
        yTrue_arr = K.eval(yTrue)

        loss_array = []

        for i in range(LearningConfig.batch_size):
            asm_loss = 0

            truth_vector = yTrue_arr[i]
            predicted_vector = yPred_arr[i]

            b_vector_p = self.calculate_b_vector(predicted_vector, True, eigenvalues, eigenvectors, meanvector)
            y_pre_asm = meanvector + np.dot(eigenvectors, b_vector_p)

            """in order to test the results after PCA, you can use these lines of code"""
            # landmark_arr_xy, landmark_arr_x, landmark_arr_y = image_utility.create_landmarks_from_normalized(truth_vector, 224, 224, 112, 112)
            # image_utility.print_image_arr(i, np.ones([224, 224]), landmark_arr_x, landmark_arr_y)
            #
            # landmark_arr_xy_new, landmark_arr_x_new, landmark_arr_y_new= image_utility.create_landmarks_from_normalized(y_pre_asm, 224, 224, 112, 112)
            # image_utility.print_image_arr(i*100, np.ones([224, 224]), landmark_arr_x_new, landmark_arr_y_new)

            for j in range(len(y_pre_asm)):
                asm_loss += (truth_vector[j] - y_pre_asm[j]) ** 2
            asm_loss /= len(y_pre_asm)


            # asm_loss *= mse[i]
            # asm_loss *= LearningConfig.regularization_term

            loss_array.append(asm_loss)

            print('mse[i]' + str(mse[i]))
            print('asm_loss[i]' + str(asm_loss))
            print('============' )

        loss_array = np.array(loss_array)
        tensor_asm_loss = K.variable(loss_array)

        # sum_loss_tensor = tf.math.add(tensor_mean_square_error, tensor_asm_loss)
        tensor_total_loss = tf.reduce_mean([tensor_mean_square_error, tensor_asm_loss], axis=0)

        # sum_loss = np.array(K.eval(tensor_asm_loss))
        # print(mse)
        # print(K.eval(tensor_mean_square_error))
        # print(K.eval(tensor_asm_loss))
        # print('asm_loss  ' + str(loss_array[0]))
        # print('mse_loss  ' + str(mse[0]))
        # print('sum_loss  ' + str(sum_loss[0]))
        # print('total_loss  ' + str(total_loss[0]))
        # print('      ')
        return tensor_total_loss
예제 #9
0
    def asm_assisted_loss(self, yTrue, yPred):
        '''def::
             l_1 = mse(yTrue, yPre)
             yPre_asm = ASM (yPred)
             l_2 = mse(yPre_asm, yPre)
             L = l_1 + (a * l_2)
        '''

        pca_util = PCAUtility()
        # image_utility = ImageUtility()
        # tf_record_utility = TFRecordUtility()
        pca_percentage = self.accuracy

        eigenvalues = load('pca_obj/' + self.dataset_name +
                           pca_util.eigenvalues_prefix + str(pca_percentage) +
                           ".npy")
        eigenvectors = load('pca_obj/' + self.dataset_name +
                            pca_util.eigenvectors_prefix +
                            str(pca_percentage) + ".npy")
        meanvector = load('pca_obj/' + self.dataset_name +
                          pca_util.meanvector_prefix + str(pca_percentage) +
                          ".npy")

        # yTrue = tf.constant([[1.0, 2.0, 3.0], [5.0, 4.0, 7.0]])
        # yPred = tf.constant([[9.0, 1.0, 2.0], [7.0, 3.0, 8.0]])
        # session = K.get_session()

        tensor_mean_square_error = K.mean(K.square(yPred - yTrue), axis=-1)
        mse = K.eval(tensor_mean_square_error)
        yPred_arr = K.eval(yPred)
        yTrue_arr = K.eval(yTrue)

        loss_array = []

        for i in range(LearningConfig.batch_size):
            asm_loss = 0

            truth_vector = yTrue_arr[i]
            predicted_vector = yPred_arr[i]

            b_vector_p = self.calculate_b_vector(predicted_vector, True,
                                                 eigenvalues, eigenvectors,
                                                 meanvector)
            y_pre_asm = meanvector + np.dot(eigenvectors, b_vector_p)
            """in order to test the results after PCA, you can use these lines of code"""
            # landmark_arr_xy, landmark_arr_x, landmark_arr_y = image_utility.create_landmarks_from_normalized(truth_vector, 224, 224, 112, 112)
            # image_utility.print_image_arr(i, np.ones([224, 224]), landmark_arr_x, landmark_arr_y)
            #
            # landmark_arr_xy_new, landmark_arr_x_new, landmark_arr_y_new= image_utility.create_landmarks_from_normalized(y_pre_asm, 224, 224, 112, 112)
            # image_utility.print_image_arr(i*100, np.ones([224, 224]), landmark_arr_x_new, landmark_arr_y_new)
            '''calculate asm loss:   MSE(Y_p_asm , Y_p) '''
            for j in range(len(y_pre_asm)):
                asm_loss += (predicted_vector[j] - y_pre_asm[j])**2
            asm_loss /= len(y_pre_asm)

            asm_loss *= LearningConfig.reg_term_ASM

            loss_array.append(asm_loss)

            # print('mse[i]' + str(mse[i]))
            # print('asm_loss[i]' + str(asm_loss))
            # print('============')

        loss_array = np.array(loss_array)
        tensor_asm_loss = K.variable(loss_array)

        tensor_total_loss = tf.add(tensor_mean_square_error, tensor_asm_loss)
        # tensor_total_loss = tf.reduce_mean([tensor_mean_square_error, tensor_asm_loss], axis=0)

        # sum_loss = np.array(K.eval(tensor_asm_loss))
        # print(mse)
        # print(K.eval(tensor_mean_square_error))
        # print(K.eval(tensor_asm_loss))
        # print('asm_loss  ' + str(loss_array[0]))
        # print('mse_loss  ' + str(mse[0]))
        # print('sum_loss  ' + str(sum_loss[0]))
        # print('total_loss  ' + str(total_loss[0]))
        # print('      ')
        return tensor_total_loss
예제 #10
0
from train import Train
from test import Test
from configuration import DatasetName, ModelArch
from pca_utility import PCAUtility
if __name__ == '__main__':
    '''use the pretrained model'''
    tester = Test()
    tester.test_model(ds_name=DatasetName.w300,
                      pretrained_model_path='./pre_trained_models/ASMNet/ASM_loss/ASMNet_300W_ASMLoss.h5')

    '''training model from scratch'''
    #   pretrain prerequisites
    #       1- PCA calculation:
    pca_calc = PCAUtility()
    pca_calc.create_pca_from_npy(dataset_name=DatasetName.w300,
                                 labels_npy_path='./data/w300/normalized_labels/',
                                 pca_percentages=90)

    #  Train:
    trainer = Train(arch=ModelArch.ASMNet,
                    dataset_name=DatasetName.w300,
                    save_path='./',
                    asm_accuracy=90)