Beispiel #1
0
    def _create_generators(self):
        tf_utils = TFRecordUtility()

        if os.path.isfile('x_train_filenames.npy') and \
                os.path.isfile('x_val_filenames.npy') and \
                os.path.isfile('y_train_filenames.npy') and \
                os.path.isfile('y_val_filenames.npy'):
            x_train_filenames = load('x_train_filenames.npy')
            x_val_filenames = load('x_val_filenames.npy')
            y_train = load('y_train_filenames.npy')
            y_val = load('y_val_filenames.npy')
        else:
            filenames, labels = tf_utils.create_image_and_labels_name()

            filenames_shuffled, y_labels_shuffled = shuffle(filenames, labels)

            x_train_filenames, x_val_filenames, y_train, y_val = train_test_split(
                filenames_shuffled,
                y_labels_shuffled,
                test_size=0.1,
                random_state=1)

            save('x_train_filenames.npy', x_train_filenames)
            save('x_val_filenames.npy', x_val_filenames)
            save('y_train_filenames.npy', y_train)
            save('y_val_filenames.npy', y_val)

        return x_train_filenames, x_val_filenames, y_train, y_val
Beispiel #2
0
    def _create_generators(self):
        """
        check if we have the img & lbls name. and create in case we need it.
        :return:
        """
        fn_prefix = './file_names/' + self.dataset_name + '_'
        x_trains_path = fn_prefix + 'x_train_fns.npy'
        x_validations_path = fn_prefix + 'x_val_fns.npy'
        y_trains_path = fn_prefix + 'y_train_fns.npy'
        y_validations_path = fn_prefix + 'y_val_fns.npy'

        tf_utils = TFRecordUtility(number_of_landmark=self.num_landmark)

        if os.path.isfile(x_trains_path) and os.path.isfile(x_validations_path) \
                and os.path.isfile(y_trains_path) and os.path.isfile(y_validations_path):
            x_train_filenames = load(x_trains_path)
            x_val_filenames = load(x_validations_path)
            y_train = load(y_trains_path)
            y_val = load(y_validations_path)
        else:
            filenames, labels = tf_utils.create_image_and_labels_name(
                dataset_name=self.dataset_name)
            filenames_shuffled, y_labels_shuffled = shuffle(filenames, labels)
            x_train_filenames, x_val_filenames, y_train, y_val = train_test_split(
                filenames_shuffled,
                y_labels_shuffled,
                test_size=0.05,
                random_state=1)

            save(x_trains_path, x_train_filenames)
            save(x_validations_path, x_val_filenames)
            save(y_trains_path, y_train)
            save(y_validations_path, y_val)

        return x_train_filenames, x_val_filenames, y_train, y_val
Beispiel #3
0
    def create_pca(self, dataset_name, pca_postfix):
        tf_record_util = TFRecordUtility()

        lbl_arr = []
        pose_arr = []
        if dataset_name == DatasetName.ibug:
            lbl_arr, img_arr, pose_arr = tf_record_util.retrieve_tf_record(
                IbugConf.tf_train_path,
                IbugConf.sum_of_train_samples,
                only_label=True,
                only_pose=True)
        lbl_arr = np.array(lbl_arr)

        print('PCA-retrieved')
        '''need to be normalized based on the hyper face paper?'''

        # reduced_lbl_arr, eigenvalues, eigenvectors = self.__svd_func(lbl_arr, pca_postfix)
        reduced_lbl_arr, eigenvalues, eigenvectors = self.__func_PCA(
            lbl_arr, pca_postfix)
        mean_lbl_arr = np.mean(lbl_arr, axis=0)
        eigenvectors = eigenvectors.T

        self.__save_obj(
            eigenvalues,
            dataset_name + self.__eigenvalues_prefix + str(pca_postfix))
        self.__save_obj(
            eigenvectors,
            dataset_name + self.__eigenvectors_prefix + str(pca_postfix))
        self.__save_obj(
            mean_lbl_arr,
            dataset_name + self.__meanvector_prefix + str(pca_postfix))
        '''calculate pose min max'''
        p_1_arr = []
        p_2_arr = []
        p_3_arr = []

        for p_item in pose_arr:
            p_1_arr.append(p_item[0])
            p_2_arr.append(p_item[1])
            p_3_arr.append(p_item[2])

        p_1_min = min(p_1_arr)
        p_1_max = max(p_1_arr)

        p_2_min = min(p_2_arr)
        p_2_max = max(p_2_arr)

        p_3_min = min(p_3_arr)
        p_3_max = max(p_3_arr)

        self.__save_obj(p_1_min, 'p_1_min')
        self.__save_obj(p_1_max, 'p_1_max')

        self.__save_obj(p_2_min, 'p_2_min')
        self.__save_obj(p_2_max, 'p_2_max')

        self.__save_obj(p_3_min, 'p_3_min')
        self.__save_obj(p_3_max, 'p_3_max')

        print('PCA-->done')
Beispiel #4
0
    def _create_generators(self):
        fn_prefix = './file_names/' + self.dataset_name + '_'
        # x_trains_path = fn_prefix + 'x_train_fns.npy'
        # x_validations_path = fn_prefix + 'x_val_fns.npy'

        tf_utils = TFRecordUtility(number_of_landmark=self.num_landmark)

        filenames, labels = tf_utils.create_image_and_labels_name(
            img_path=self.img_path, annotation_path=self.annotation_path)
        filenames_shuffled, y_labels_shuffled = shuffle(filenames, labels)
        x_train_filenames, x_val_filenames, y_train, y_val = train_test_split(
            filenames_shuffled,
            y_labels_shuffled,
            test_size=LearningConfig.batch_size,
            random_state=1)

        # save(x_trains_path, filenames_shuffled)
        # save(x_validations_path, y_labels_shuffled)

        # save(x_trains_path, x_train_filenames)
        # save(x_validations_path, x_val_filenames)
        # save(y_trains_path, y_train)
        # save(y_validations_path, y_val)

        # return filenames_shuffled, y_labels_shuffled
        return x_train_filenames, x_val_filenames, y_train, y_val
Beispiel #5
0
 def _eval_model(self, model, img_val_filenames, pn_val_filenames):
     tf_util = TFRecordUtility(number_of_landmark=self.num_landmark)
     nme_sum = 0
     fail_counter_sum = 0
     batch_size = 20  # LearningConfig.batch_size
     step_per_epoch = int(len(img_val_filenames) // (batch_size))
     for batch_index in tqdm(range(step_per_epoch)):
         images, anno_gts = self._get_batch_sample(
             batch_index=batch_index,
             x_train_filenames=img_val_filenames,
             y_train_filenames=pn_val_filenames,
             is_eval=True,
             batch_size=batch_size)
         '''predict:'''
         anno_Pres = model.predict_on_batch(
             images)  # hm_pr: 4, bs, 64, 64, 68
         '''calculate NME for batch'''
         bath_nme, bath_fr = tf_util.calc_NME_over_batch(
             anno_GTs=anno_gts,
             anno_Pres=anno_Pres,
             ds_name=self.dataset_name)
         nme_sum += bath_nme
         fail_counter_sum += bath_fr
     '''calculate total'''
     fr = 100 * fail_counter_sum / len(img_val_filenames)
     nme = 100 * nme_sum / len(img_val_filenames)
     print('nme:' + str(nme))
     print('fr:' + str(fr))
     return nme, fr
Beispiel #6
0
    def test_pca_validity(self, pca_postfix):
        cnn_model = CNNModel()
        pca_utility = PCAUtility()
        tf_record_utility = TFRecordUtility()
        image_utility = ImageUtility()

        eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(
            dataset_name=DatasetName.ibug, pca_postfix=pca_postfix)

        lbl_arr, img_arr, pose_arr = tf_record_utility.retrieve_tf_record(
            tfrecord_filename=IbugConf.tf_train_path,
            number_of_records=30,
            only_label=False)
        for i in range(20):
            b_vector_p = self.calculate_b_vector(lbl_arr[i], True, eigenvalues,
                                                 eigenvectors, meanvector)
            lbl_new = meanvector + np.dot(eigenvectors, b_vector_p)

            labels_true_transformed, landmark_arr_x_t, landmark_arr_y_t = image_utility. \
                create_landmarks_from_normalized(lbl_arr[i], 224, 224, 112, 112)

            labels_true_transformed_pca, landmark_arr_x_pca, landmark_arr_y_pca = image_utility. \
                create_landmarks_from_normalized(lbl_new, 224, 224, 112, 112)

            image_utility.print_image_arr(i, img_arr[i], landmark_arr_x_t,
                                          landmark_arr_y_t)
            image_utility.print_image_arr(i * 1000, img_arr[i],
                                          landmark_arr_x_pca,
                                          landmark_arr_y_pca)
Beispiel #7
0
    def _create_generators(self):
        tf_utils = TFRecordUtility()
        if self.point_wise:
            if True or os.path.isfile('npy/' +'x_train_filenames_pw.npy') and \
                    os.path.isfile('npy/' +'x_val_filenames_pw.npy') and \
                    os.path.isfile('npy/' +'y_train_filenames_pw.npy') and \
                    os.path.isfile('npy/' +'y_val_filenames_pw.npy'):
                x_train_filenames = load('npy/x_train_filenames_pw.npy')
                x_val_filenames = load('npy/x_val_filenames_pw.npy')
                y_train = load('npy/y_train_filenames_pw.npy')
                y_val = load('npy/y_val_filenames_pw.npy')
            else:
                for i in range(68):
                    filenames, labels = tf_utils.create_fused_images_and_labels_name(
                    )

                    filenames_shuffled, y_labels_shuffled = shuffle(
                        filenames, labels)

                    x_train_filenames, x_val_filenames, y_train, y_val = train_test_split(
                        filenames_shuffled,
                        y_labels_shuffled,
                        test_size=0.1,
                        random_state=1)

                    save('npy/' + 'x_train_filenames_pw_.npy',
                         x_train_filenames)
                    save('npy/' + 'x_val_filenames_pw_.npy', x_val_filenames)
                    save('npy/' + 'y_train_filenames_pw_.npy', y_train)
                    save('npy/' + 'y_val_filenames_pw_.npy', y_val)
        else:
            if os.path.isfile('x_train_filenames.npy') and \
                    os.path.isfile('x_val_filenames.npy') and \
                    os.path.isfile('y_train_filenames.npy') and \
                    os.path.isfile('y_val_filenames.npy'):
                x_train_filenames = load('x_train_filenames.npy')
                x_val_filenames = load('x_val_filenames.npy')
                y_train = load('y_train_filenames.npy')
                y_val = load('y_val_filenames.npy')
            else:
                filenames, labels = tf_utils.create_image_and_labels_name(
                    self.point_wise)

                filenames_shuffled, y_labels_shuffled = shuffle(
                    filenames, labels)

                x_train_filenames, x_val_filenames, y_train, y_val = train_test_split(
                    filenames_shuffled,
                    y_labels_shuffled,
                    test_size=0.1,
                    random_state=1)

                save('x_train_filenames.npy', x_train_filenames)
                save('x_val_filenames.npy', x_val_filenames)
                save('y_train_filenames.npy', y_train)
                save('y_val_filenames.npy', y_val)

        return x_train_filenames, x_val_filenames, y_train, y_val
Beispiel #8
0
    def _test_on_W300(self, model):
        tf_record_utility = TFRecordUtility(self.num_landmark)

        lbl_arr_challenging, img_arr_challenging = tf_record_utility.retrieve_tf_record_test_set(
            tfrecord_filename=IbugConf.tf_test_path_challenging,
            number_of_records=IbugConf.orig_number_of_test_challenging,
            only_label=False)

        nme_ch, fr_ch, auc_ch, mae_yaw_ch, mae_pitch_ch, mae_roll_ch = self._calculate_errors(model, W300Conf.number_of_all_sample_challenging,
            img_arr_challenging, lbl_arr_challenging)

        lbl_arr_challenging = np.array(lbl_arr_challenging)
        img_arr_challenging = np.array(img_arr_challenging)

        print('nme_ch: ', str(nme_ch), 'fr_ch: ', str(fr_ch), 'auc_ch: ', str(auc_ch))
        print('mae_yaw: ', str(mae_yaw_ch), 'mae_pitch: ', str(mae_pitch_ch), 'mae_roll: ', str(mae_roll_ch))

        lbl_arr_common, img_arr_common = tf_record_utility.retrieve_tf_record_test_set(
            tfrecord_filename=IbugConf.tf_test_path_common,
            number_of_records=IbugConf.orig_number_of_test_common,
            only_label=False)

        lbl_arr_full, img_arr_full = tf_record_utility.retrieve_tf_record_test_set(
            tfrecord_filename=IbugConf.tf_test_path_full,
            number_of_records=IbugConf.orig_number_of_test_full,
            only_label=False)


        lbl_arr_common = np.array(lbl_arr_common)
        img_arr_common = np.array(img_arr_common)

        lbl_arr_full = np.array(lbl_arr_full)
        img_arr_full = np.array(img_arr_full)


        nme_c, fr_c, auc_c, mae_yaw_c, mae_pitch_c, mae_roll_c = self._calculate_errors(model,
                                                                                        W300Conf.number_of_all_sample_common,
                                                                                        img_arr_common, lbl_arr_common)
        print('nme_c: ', str(nme_c), 'fr_c: ', str(fr_c), 'auc_c: ', str(auc_c))
        print('mae_yaw: ', str(mae_yaw_c), 'mae_pitch: ', str(mae_pitch_c), 'mae_roll: ', str(mae_roll_c))

        nme_f, fr_f, auc_f, mae_yaw_f, mae_pitch_f, mae_roll_f = self._calculate_errors(model,
                                                                                        W300Conf.number_of_all_sample_full,
                                                                                        img_arr_full, lbl_arr_full)
        print('nme_f: ', str(nme_f), 'fr_f: ', str(fr_f), 'auc_f: ', str(auc_f))
        print('mae_yaw: ', str(mae_yaw_f), 'mae_pitch: ', str(mae_pitch_f), 'mae_roll: ', str(mae_roll_f))

        result_str = "-------------------------_test_on_W300------------------------------------" + '\n\r' \
                     'nme_ch: ', str(nme_ch), 'fr_ch: ', str(fr_ch), 'auc_ch: ', str(auc_ch) + '\n\r' \
                     'mae_yaw: ', str(mae_yaw_ch), 'mae_pitch: ', str(mae_pitch_ch), 'mae_roll: ', str(mae_roll_ch) + '\n\r' \
                     'nme_c: ', str(nme_c), 'fr_c: ', str(fr_c), 'auc_c: ', str(auc_c) + '\n\r' \
                     'mae_yaw: ', str(mae_yaw_c), 'mae_pitch: ', str(mae_pitch_c), 'mae_roll: ', str(mae_roll_c) + '\n\r' \
                     'nme_f: ', str(nme_f), 'fr_f: ', str(fr_f), 'auc_f: ', str(auc_f) + '\n\r' \
                     'mae_yaw: ', str(mae_yaw_f), 'mae_pitch: ', str(mae_pitch_f), 'mae_roll: ', str(mae_roll_f)
        return str(result_str)
Beispiel #9
0
    def _test_on_COFW(self, model):
        tf_record_utility = TFRecordUtility(self.output_len)
        lbl_arr_total, img_arr_total = tf_record_utility.retrieve_tf_record_test_set(
            tfrecord_filename=CofwConf.tf_test_path,
            number_of_records=CofwConf.orig_number_of_test,
            only_label=False)
        lbl_arr_total = np.array(lbl_arr_total)
        img_arr_total = np.array(img_arr_total)

        nme, fr, auc, = self._calculate_errors(model,
                                               CofwConf.orig_number_of_test,
                                               img_arr_total, lbl_arr_total)
        print('nme: ', str(nme), 'fr: ', str(fr), 'auc: ', str(auc))
        print("-------------------------------------------------------------")
Beispiel #10
0
    def _create_generators(self, img_path=None, annotation_path=None):
        # x_trains_path = fn_prefix + 'x_train_fns.npy'
        # x_validations_path = fn_prefix + 'x_val_fns.npy'

        tf_utils = TFRecordUtility(number_of_landmark=self.num_landmark)
        if img_path is None:
            filenames, labels = tf_utils.create_image_and_labels_name(
                img_path=self.img_path, annotation_path=self.annotation_path)
        else:
            filenames, labels = tf_utils.create_image_and_labels_name(
                img_path=img_path, annotation_path=annotation_path)

        filenames_shuffled, y_labels_shuffled = shuffle(filenames, labels)

        # x_train_filenames, x_val_filenames, y_train, y_val = train_test_split(
        #     filenames_shuffled, y_labels_shuffled, test_size=LearningConfig.batch_size, random_state=1)

        return filenames_shuffled, y_labels_shuffled
Beispiel #11
0
    def _create_generators(self):
        tf_utils = TFRecordUtility(self.output_len)

        filenames, labels = tf_utils.create_image_and_labels_name()
        filenames_shuffled, y_labels_shuffled = shuffle(filenames, labels)
        x_train_filenames, x_val_filenames, y_train, y_val = train_test_split(
            filenames_shuffled,
            y_labels_shuffled,
            test_size=0.05,
            random_state=100,
            shuffle=True)

        save('x_train_filenames.npy', x_train_filenames)
        save('x_val_filenames.npy', x_val_filenames)
        save('y_train_filenames.npy', y_train)
        save('y_val_filenames.npy', y_val)

        return x_train_filenames, x_val_filenames, y_train, y_val
Beispiel #12
0
 def convert_pts_to_hm(self, pts):
     """
             we want to convert pts to hm{56 * 56 * self.num_landmark//2} and then calculate loss
             :param pts: bs * self.num_landmark: pts are normal between -0.5, +0.5 --> need to upsample
             :return:
             """
     pts = pts.numpy()
     # pts = tf.keras.backend.eval(pts)
     hm_arr = []
     tf_util = TFRecordUtility(self.num_landmark // 2)
     for i in range(LearningConfig.batch_size):
         hm_t = tf_util.generate_hm_Ten(height=InputDataSize.hm_size,
                                        width=InputDataSize.hm_size,
                                        landmarks=pts[i],
                                        s=3.0,
                                        upsample=True)
         hm_arr.append(hm_t)
     hm_pts = tf.convert_to_tensor(np.array(hm_arr))
     return hm_pts
Beispiel #13
0
    def train_fit(self):
        tf_record_util = TFRecordUtility(self.output_len)
        '''prepare callbacks'''
        callbacks_list = self._prepare_callback()
        ''' define optimizers'''
        optimizer = self._get_optimizer()
        '''create train, validation, test data iterator'''
        train_images, train_landmarks = tf_record_util.create_training_tensor_points(
            tfrecord_filename=self.tf_train_path, batch_size=self.BATCH_SIZE)
        validation_images, validation_landmarks = \
            tf_record_util.create_training_tensor_points(tfrecord_filename=self.tf_eval_path,
                                                         batch_size=self.BATCH_SIZE)
        '''creating model'''
        cnn = CNNModel()
        # model = tf.keras.models.load_model(self.weight)

        model = cnn.get_model(train_images=train_images,
                              arch=self.arch,
                              num_output_layers=self.num_output_layers,
                              output_len=self.output_len,
                              input_tensor=train_images,
                              inp_shape=None)
        if self.weight is not None:
            model.load_weights(self.weight)
        '''compiling model'''
        model.compile(
            loss=self._generate_loss(),
            optimizer=optimizer,
            metrics=['mse', 'mae'],
            target_tensors=self._generate_target_tensors(train_landmarks),
            loss_weights=self._generate_loss_weights())
        '''train Model '''
        print('< ========== Start Training ============= >')

        history = model.fit(train_images,
                            train_landmarks,
                            epochs=self.EPOCHS,
                            steps_per_epoch=self.STEPS_PER_EPOCH,
                            validation_data=(validation_images,
                                             validation_landmarks),
                            validation_steps=self.STEPS_PER_VALIDATION_EPOCH,
                            verbose=1,
                            callbacks=callbacks_list)
Beispiel #14
0
    def _test_on_W300(self, model):
        tf_record_utility = TFRecordUtility(self.output_len)
        lbl_arr_challenging, img_arr_challenging = tf_record_utility.retrieve_tf_record_test_set(
            tfrecord_filename=W300Conf.tf_challenging,
            number_of_records=W300Conf.number_of_all_sample_challenging,
            only_label=False)
        lbl_arr_common, img_arr_common = tf_record_utility.retrieve_tf_record_test_set(
            tfrecord_filename=W300Conf.tf_common,
            number_of_records=W300Conf.number_of_all_sample_common,
            only_label=False)
        lbl_arr_full, img_arr_full = tf_record_utility.retrieve_tf_record_test_set(
            tfrecord_filename=W300Conf.tf_full,
            number_of_records=W300Conf.number_of_all_sample_full,
            only_label=False)

        lbl_arr_challenging = np.array(lbl_arr_challenging)
        img_arr_challenging = np.array(img_arr_challenging)

        lbl_arr_common = np.array(lbl_arr_common)
        img_arr_common = np.array(img_arr_common)

        lbl_arr_full = np.array(lbl_arr_full)
        img_arr_full = np.array(img_arr_full)

        nme_ch, fr_ch, auc_ch = self._calculate_errors(
            model, W300Conf.number_of_all_sample_challenging,
            img_arr_challenging, lbl_arr_challenging)
        print('nme_ch: ', str(nme_ch), 'fr_ch: ', str(fr_ch), 'auc_ch: ',
              str(auc_ch))

        nme_c, fr_c, auc_c = self._calculate_errors(
            model, W300Conf.number_of_all_sample_common, img_arr_common,
            lbl_arr_common)
        print('nme_c: ', str(nme_c), 'fr_c: ', str(fr_c), 'auc_c: ',
              str(auc_c))

        nme_f, fr_f, auc_f = self._calculate_errors(
            model, W300Conf.number_of_all_sample_full, img_arr_full,
            lbl_arr_full)
        print('nme_f: ', str(nme_f), 'fr_f: ', str(fr_f), 'auc_f: ',
              str(auc_f))
Beispiel #15
0
    def _points_to_2d(self, _points):
        """

        :param _points:
        :return:
        """
        tf_rec = TFRecordUtility(self.num_landmark)
        image_utility = ImageUtility()
        hm_arr = []
        for i in range(LearningConfig.batch_size):
            _x_y, _x, _y = image_utility.create_landmarks_from_normalized(
                _points[i], InputDataSize.image_input_size,
                InputDataSize.image_input_size, InputDataSize.img_center,
                InputDataSize.img_center)
            hm_multi_layer = tf_rec.generate_hm(InputDataSize.hm_size,
                                                InputDataSize.hm_size,
                                                np.array(_x_y),
                                                self.hm_stride / 2, False)
            hm = np.sum(hm_multi_layer, axis=2)
            hm_arr.append(hm)
        return np.array(hm_arr)
Beispiel #16
0
    def convert_hm_to_pts(self, hm):
        x_center = InputDataSize.image_input_size // 2
        width = InputDataSize.image_input_size
        hm_arr = []
        tf_util = TFRecordUtility(self.num_landmark // 2)

        for i in range(LearningConfig.batch_size):
            hm_t = tf_util.from_heatmap_to_point_tensor(heatmaps=hm[i],
                                                        number_of_points=5)
            hm_t = tf.reshape(tensor=hm_t, shape=self.num_landmark)
            '''hm is in [0,224] --> should be in [-0.5,+0.5]'''
            hm_t_norm = tf.math.scalar_mul(
                scalar=1 / width,
                x=tf.math.subtract(hm_t, np.repeat(x_center,
                                                   self.num_landmark)))
            hm_arr.append(hm_t_norm)
        '''reshape hm'''
        hm_pts = tf.stack(
            [hm_arr[i] for i in range(LearningConfig.batch_size)],
            0)  # bs * self.num_landmark
        return hm_pts
Beispiel #17
0
    def custom_loss_hm(self, ten_hm_t, ten_hm_p):
        # print(ten_hm_t.get_shape())  #  [None, 56, 56, 68]
        # print(ten_hm_p.get_shape())

        tf_utility = TFRecordUtility()

        sqr = K.square(ten_hm_t - ten_hm_p)  # [None, 56, 56, 68]
        mean1 = K.mean(sqr, axis=1)
        mean2 = K.mean(mean1, axis=1)
        tensor_mean_square_error = K.mean(mean2, axis=1)

        # print(tensor_mean_square_error.get_shape().as_list())  # [None, 68]

        # vec_mse = K.eval(tensor_mean_square_error)
        # print("mse.shape:")
        # print(vec_mse.shape)  # (50, 68)
        # print(vec_mse)
        # print("----------->>>")
        '''calculate points from generated hm'''

        p_points_batch = tf.stack([
            tf_utility.from_heatmap_to_point_tensor(ten_hm_p[i], 5, 1)
            for i in range(LearningConfig.batch_size)
        ])

        t_points_batch = tf.stack([
            tf_utility.from_heatmap_to_point_tensor(ten_hm_t[i], 5, 1)
            for i in range(LearningConfig.batch_size)
        ])
        '''p_points_batch is [batch, 2, 68]'''
        sqr_2 = K.square(t_points_batch - p_points_batch)  # [None, 2, 68]
        mean_1 = K.mean(sqr_2, axis=1)
        tensor_indices_mean_square_error = K.mean(mean_1, axis=1)

        # tensor_total_loss = tf.reduce_mean([tensor_mean_square_error, tensor_indices_mean_square_error])

        tensor_total_loss = tf.add(tensor_mean_square_error,
                                   tensor_indices_mean_square_error)
        return tensor_total_loss
Beispiel #18
0
    def _test_on_COFW(self, model):
        tf_record_utility = TFRecordUtility(self.output_len)
        lbl_arr_total, img_arr_total = tf_record_utility.retrieve_tf_record_test_set(
            tfrecord_filename=CofwConf.tf_test_path,
            number_of_records=CofwConf.orig_number_of_test,
            only_label=False)
        lbl_arr_total = np.array(lbl_arr_total)
        img_arr_total = np.array(img_arr_total)

        nme, fr, auc, mae_yaw, mae_pitch, mae_roll = self._calculate_errors(model,
                                                                            CofwConf.orig_number_of_test,
                                                                            img_arr_total, lbl_arr_total)

        result_str = "-------------------------_test_on_COFW------------------------------------" + '\n\r' \
                     + 'nme: ', str(nme), 'fr: ', str(fr), 'auc: ', str(auc) + '\n\r' \
                     + 'mae_yaw: ', str(mae_yaw), 'mae_pitch: ', str(mae_pitch), 'mae_roll: ', str(mae_roll)

        print('nme: ', str(nme), 'fr: ', str(fr), 'auc: ', str(auc))
        print('mae_yaw: ', str(mae_yaw), 'mae_pitch: ', str(mae_pitch), 'mae_roll: ', str(mae_roll))
        print("-------------------------------------------------------------")

        return result_str
Beispiel #19
0
    def train_fit(self):
        tf_record_util = TFRecordUtility()
        '''prepare callbacks'''
        callbacks_list = self._prepare_callback()
        ''' define optimizers'''
        optimizer = self._get_optimizer()
        '''create train, validation, test data iterator'''
        train_images, _, _, _, _, _, _, train_heatmap, _ = \
            tf_record_util.create_training_tensor(tfrecord_filename=IbugConf.tf_train_path_heatmap,
                                                  batch_size=self.BATCH_SIZE, reduced=True)
        validation_images, _, _, _, _, _, _, validation_heatmap, _ = \
            tf_record_util.create_training_tensor(tfrecord_filename=IbugConf.tf_evaluation_path_heatmap,
                                                  batch_size=self.BATCH_SIZE, reduced=True)
        '''creating model'''
        model = self._get_model(train_images)

        if self.weight is not None:
            model.load_weights(self.weight)
        '''compiling model'''
        model.compile(
            loss=self._generate_loss(),
            optimizer=optimizer,
            metrics=['mse', 'mae'],
            target_tensors=self._generate_target_tensors(train_heatmap),
            loss_weights=self._generate_loss_weights())
        '''train Model '''
        print('< ========== Start Training ============= >')

        history = model.fit(
            train_images,
            train_heatmap,
            epochs=self.EPOCHS,
            steps_per_epoch=self.STEPS_PER_EPOCH,
            validation_data=(validation_images, validation_heatmap),
            validation_steps=self.STEPS_PER_VALIDATION_EPOCH,
            verbose=1,
            callbacks=callbacks_list,
        )
Beispiel #20
0
    def calculate_adoptive_weight(self, epoch, batch_index, y_train_filenames):
        tf_utils = TFRecordUtility(self.num_landmark)
        batch_y = y_train_filenames[batch_index *
                                    LearningConfig.batch_size:(batch_index +
                                                               1) *
                                    LearningConfig.batch_size]
        if 0 <= epoch <= 10:
            asm_acc = 80
        elif 10 < epoch <= 20:
            asm_acc = 85
        elif 20 < epoch <= 50:
            asm_acc = 90
        elif 50 < epoch <= 100:
            asm_acc = 95
        else:
            asm_acc = 97

        asm_acc = 97
        '''for each point in training set, calc delta_i = ASM(gt_i)-pr_i: '''
        if self.dataset_name == DatasetName.cofw:  # this ds is not normalized
            if self.arch == 'efNb0':
                pn_batch = np.array([
                    self._load_and_normalize(self.annotation_path + file_name)
                    for file_name in batch_y
                ])
                pn_batch_asm = np.array([
                    tf_utils.get_asm(
                        input=self._load_and_normalize(self.annotation_path +
                                                       file_name),
                        dataset_name=self.dataset_name,
                        accuracy=asm_acc) for file_name in batch_y
                ])
            else:
                pn_batch = np.array([
                    load(self.annotation_path + file_name)
                    for file_name in batch_y
                ])
                pn_batch_asm = np.array([
                    tf_utils.get_asm(input=load(self.annotation_path +
                                                file_name),
                                     dataset_name=self.dataset_name,
                                     accuracy=asm_acc) for file_name in batch_y
                ])
        else:
            pn_batch = np.array([
                self._load_and_normalize(self.annotation_path + file_name)
                for file_name in batch_y
            ])
            pn_batch_asm = np.array([
                tf_utils.get_asm(
                    input=self._load_and_normalize(self.annotation_path +
                                                   file_name),
                    dataset_name=self.dataset_name,
                    accuracy=asm_acc) for file_name in batch_y
            ])
        '''phi = mean(delta_i s)'''
        '''1d'''
        delta = np.array([
            abs(pn_batch[i] - pn_batch_asm[i]) for i in range(len(pn_batch))
        ])  # 1-d
        '''2d'''
        # pn_batch = np.reshape(pn_batch, [pn_batch.shape[0], pn_batch.shape[1] // 2, 2])  # bs, 68, 2
        # pn_batch_asm = np.reshape(pn_batch_asm, [pn_batch_asm.shape[0], pn_batch_asm.shape[1] // 2, 2])  # bs, 68, 2
        # delta = np.sqrt(np.sum(np.square(pn_batch - pn_batch_asm), axis=2))

        omega = np.zeros_like(delta)
        for i in range(len(delta)):
            omega[i] = np.array([
                1 / (0.2 + math.exp(-50 * delta[i][j]))
                for j in range(len(delta[i]))
            ])
        return omega
Beispiel #21
0
    def _test_on_WFLW(self, model):
        tf_record_utility = TFRecordUtility(self.num_landmark)

        result_str = ''

        # lbl_arr_total, img_arr_total = tf_record_utility.retrieve_tf_record_test_set(
        #     tfrecord_filename=WflwConf.tf_test_path,
        #     number_of_records=WflwConf.orig_number_of_test,
        #     only_label=False)
        # nme, fr, auc, mae_yaw, mae_pitch, mae_roll = self._calculate_errors(detect, model, WflwConf.orig_number_of_test,
        #                                                                     img_arr_total, lbl_arr_total)

        # result_str += "\n\r\n\r-------------------------_test_on_WFLW:total------------------------------------" + '\n\r' \
        #              + 'nme: ', str(nme), 'fr: ', str(fr), 'auc: ', str(auc) + '\n\r' \
        #              + 'mae_yaw: ', str(mae_yaw), 'mae_pitch: ', str(mae_pitch), 'mae_roll: ', str(mae_roll)
        #
        # print('nme: ', str(nme), 'fr: ', str(fr), 'auc: ', str(auc))
        # print('mae_yaw: ', str(mae_yaw), 'mae_pitch: ', str(mae_pitch), 'mae_roll: ', str(mae_roll))
        # print("-------------------------------------------------------------")
        #
        lbl_arr_largepose, img_arr_largepose = tf_record_utility.retrieve_tf_record_test_set(
            tfrecord_filename=WflwConf.tf_test_path_largepose,
            number_of_records=WflwConf.orig_of_all_test_largepose,
            only_label=False)
        nme, fr, auc, mae_yaw, mae_pitch, mae_roll = self._calculate_errors(model, WflwConf.orig_of_all_test_largepose,
                                                                            img_arr_largepose, lbl_arr_largepose)
        result_str += '\n\r\n\r-------------------------_test_on_WFLW: largepose------------------------------------nme: '\
                      + str(nme)+'fr: ' + str(fr) + 'auc: '+ str(auc) + '\n\r' + 'mae_yaw: '+ str(mae_yaw)+ 'mae_pitch: '\
                      + str(mae_pitch)+ 'mae_roll: '+ str(mae_roll)
        print('nme: '+ str(nme)+ 'fr: ' + str(fr)+ 'auc: '+ str(auc))
        print('mae_yaw: '+ str(mae_yaw)+ 'mae_pitch: '+ str(mae_pitch)+ 'mae_roll: '+ str(mae_roll))
        print("-------------------------------------------------------------")


        lbl_arr_occlusion, img_arr_occlusion = tf_record_utility.retrieve_tf_record_test_set(
            tfrecord_filename=WflwConf.tf_test_path_occlusion,
            number_of_records=WflwConf.orig_of_all_test_occlusion,
            only_label=False)
        nme, fr, auc, mae_yaw, mae_pitch, mae_roll = self._calculate_errors(model, WflwConf.orig_of_all_test_occlusion,
                                                                            img_arr_occlusion, lbl_arr_occlusion)
        result_str += '\n\r\n\r-------------------------_test_on_WFLW _occlusion------------------------------------'+ '\n\r' \
                      + 'nme: '+ str(nme)+ 'fr: '+ str(fr)+ 'auc: '+ str(auc) + '\n\r' \
                      + 'mae_yaw: '+ str(mae_yaw)+ 'mae_pitch: '+ str(mae_pitch)+ 'mae_roll: '+ str(mae_roll)

        print('nme: '+ str(nme)+ 'fr: '+ str(fr)+ 'auc: '+ str(auc))
        print('mae_yaw: '+ str(mae_yaw)+ 'mae_pitch: '+ str(mae_pitch)+ 'mae_roll: '+ str(mae_roll))
        print("-------------------------------------------------------------")


        lbl_arr_makeup, img_arr_makeup = tf_record_utility.retrieve_tf_record_test_set(
            tfrecord_filename=WflwConf.tf_test_path_makeup,
            number_of_records=WflwConf.orig_of_all_test_makeup,
            only_label=False)
        nme, fr, auc, mae_yaw, mae_pitch, mae_roll = self._calculate_errors(model, WflwConf.orig_of_all_test_makeup,
                                                                            img_arr_makeup, lbl_arr_makeup)
        result_str += '\n\r\n\r-------------------------_test_on_WFLW_makeup------------------------------------' + '\n\r' \
                      + 'nme: '+ str(nme)+ 'fr: '+ str(fr)+ 'auc: '+ str(auc) + '\n\r' \
                      + 'mae_yaw: '+ str(mae_yaw)+ 'mae_pitch: '+ str(mae_pitch)+ 'mae_roll: '+ str(mae_roll)
        print('nme: '+ str(nme)+ 'fr: '+ str(fr)+ 'auc: '+ str(auc))
        print('mae_yaw: '+ str(mae_yaw)+ 'mae_pitch: '+ str(mae_pitch)+ 'mae_roll: '+ str(mae_roll))
        print("-------------------------------------------------------------")

        lbl_arr_expression, img_arr_expression = tf_record_utility.retrieve_tf_record_test_set(
            tfrecord_filename=WflwConf.tf_test_path_expression,
            number_of_records=WflwConf.orig_of_all_test_expression,
            only_label=False)
        nme, fr, auc, mae_yaw, mae_pitch, mae_roll = self._calculate_errors(model, WflwConf.orig_of_all_test_expression,
                                                                            img_arr_expression, lbl_arr_expression)
        result_str += '\n\r\n\r-------------------------_test_on_WFLW_expression------------------------------------' + '\n\r' \
                      + 'nme: '+ str(nme)+ 'fr: '+ str(fr)+ 'auc: '+ str(auc) + '\n\r' \
                      + 'mae_yaw: '+ str(mae_yaw)+ 'mae_pitch: '+ str(mae_pitch)+ 'mae_roll: '+ str(mae_roll)
        print('nme: '+ str(nme)+ 'fr: '+ str(fr)+ 'auc: '+ str(auc))
        print('mae_yaw: '+ str(mae_yaw)+ 'mae_pitch: '+ str(mae_pitch)+ 'mae_roll: '+ str(mae_roll))
        print("-------------------------------------------------------------")


        lbl_arr_illumination, img_arr_illumination = tf_record_utility.retrieve_tf_record_test_set(
            tfrecord_filename=WflwConf.tf_test_path_illumination,
            number_of_records=WflwConf.orig_of_all_test_illumination,
            only_label=False)
        nme, fr, auc, mae_yaw, mae_pitch, mae_roll = self._calculate_errors(model, WflwConf.orig_of_all_test_illumination,
                                                                            img_arr_illumination, lbl_arr_illumination)

        result_str += '\n\r\n\r-------------------------_test_on_WFLW _illumination ------------------------------------' + '\n\r' \
                      + 'nme: '+ str(nme)+ 'fr: '+ str(fr)+ 'auc: '+ str(auc) + '\n\r' \
                      + 'mae_yaw: '+ str(mae_yaw)+ 'mae_pitch: '+ str(mae_pitch)+ 'mae_roll: '+ str(mae_roll)

        print('nme: '+ str(nme)+ 'fr: '+ str(fr)+ 'auc: '+ str(auc))
        print('mae_yaw: '+ str(mae_yaw)+ 'mae_pitch: '+ str(mae_pitch) + 'mae_roll: ' + str(mae_roll))
        print("-------------------------------------------------------------")

        lbl_arr_blur, img_arr_blur = tf_record_utility.retrieve_tf_record_test_set(
            tfrecord_filename=WflwConf.tf_test_path_blur,
            number_of_records=WflwConf.orig_of_all_test_blur,
            only_label=False)
        nme, fr, auc, mae_yaw, mae_pitch, mae_roll = self._calculate_errors(model, WflwConf.orig_of_all_test_blur,
                                                                            img_arr_blur, lbl_arr_blur)

        result_str += '\n\r\n\r-------------------------_test_on_WFLW_blur------------------------------------' + '\n\r' \
                      + 'nme: '+ str(nme)+ 'fr: '+ str(fr)+ 'auc: '+ str(auc) + '\n\r' \
                      + 'mae_yaw: '+ str(mae_yaw)+ 'mae_pitch: '+ str(mae_pitch)+ 'mae_roll: '+ str(mae_roll)

        print('nme: '+ str(nme)+ 'fr: '+ str(fr)+ 'auc: '+ str(auc))
        print('mae_yaw: '+ str(mae_yaw)+ 'mae_pitch: '+ str(mae_pitch)+ 'mae_roll: '+ str(mae_roll))
        print("-------------------------------------------------------------")

        return result_str
Beispiel #22
0
    def custom_loss_hm(self, ten_hm_t, ten_hm_p):
        # print(ten_hm_t.get_shape().as_list())  #  [None, 56, 56, 68]
        # print(ten_hm_p.get_shape())

        tf_utility = TFRecordUtility()

        sqr = K.square(ten_hm_t - ten_hm_p)  # [None, 56, 56, 68]
        mean1 = K.mean(sqr, axis=1)
        mean2 = K.mean(mean1, axis=1)
        tensor_mean_square_error = K.mean(mean2, axis=1)
        # print(tensor_mean_square_error.get_shape().as_list())  # [None, 68]

        # vec_mse = K.eval(tensor_mean_square_error)
        # print("mse.shape:")
        # print(vec_mse.shape)  # (50, 68)
        # print(vec_mse)
        # print("----------->>>")

        '''convert tensor to vector'''
        vec_hm_p = K.eval(ten_hm_p)
        vec_hm_t = K.eval(ten_hm_t)

        loss_array = []

        for i in range(LearningConfig.batch_size):
            '''convert heatmap to points'''
            x_h_p, y_h_p, xy_h_p = tf_utility.from_heatmap_to_point(vec_hm_p[i], 5, 1)
            x_h_t, y_h_t, xy_h_t = tf_utility.from_heatmap_to_point(vec_hm_t[i], 5, 1)

            '''normalise points to be in [0, 1]'''
            x_h_p = x_h_p / 56
            y_h_p = y_h_p / 56
            xy_h_p = xy_h_p / 56
            x_h_t = x_h_t / 56
            y_h_t = y_h_t / 56
            xy_h_t = xy_h_t / 56
            '''test print images'''
            # imgpr.print_image_arr(i + 1, np.zeros(shape=[56, 56]), x_h_t, y_h_t)
            # imgpr.print_image_arr((i + 1)*1000, np.zeros(shape=[56, 56]), x_h_p, y_h_p)

            # print('--xy_h_p:---')
            # print(xy_h_p)
            # print('--xy_h_t:---')
            # print(xy_h_t)

            face_p, mouth_p, nose_p, leye_p, reye_p = self._depart_facial_point(xy_h_p)
            face_t, mouth_t, nose_t, leye_t, reye_t = self._depart_facial_point(xy_h_t)

            '''generate facial distance matrix'''
            face_p_mat, face_t_mat = self._generate_distance_matrix(face_p), self._generate_distance_matrix(face_t)
            mouth_p_mat, mouth_t_mat = self._generate_distance_matrix(mouth_p), self._generate_distance_matrix(mouth_t)
            nose_p_mat, nose_t_mat = self._generate_distance_matrix(nose_p), self._generate_distance_matrix(nose_t)
            leye_p_mat, leye_t_mat = self._generate_distance_matrix(leye_p), self._generate_distance_matrix(leye_t)
            reye_p_mat, reye_t_mat = self._generate_distance_matrix(reye_p), self._generate_distance_matrix(reye_t)

            '''calculate loss from each pair matrices'''

            face_loss = LearningConfig.reg_term_face * self._calculate_mse(face_p_mat, face_t_mat) / len(face_p)
            mouth_loss = LearningConfig.reg_term_mouth * self._calculate_mse(mouth_p_mat, mouth_t_mat) / len(mouth_p)
            nose_loss = LearningConfig.reg_term_nose * self._calculate_mse(nose_p_mat, nose_t_mat) / len(nose_p)
            leye_loss = LearningConfig.reg_term_leye * self._calculate_mse(leye_p_mat, leye_t_mat) / len(leye_p)
            reye_loss = LearningConfig.reg_term_reye * self._calculate_mse(reye_p_mat, reye_t_mat) / len(reye_p)

            loss_array.append(face_loss + mouth_loss + nose_loss + leye_loss + reye_loss)

            # print('mse[i]: ' + str(vec_mse[i]))
            # print('face_loss[i]: ' + str(face_loss))
            # print('mouth_loss[i]: ' + str(mouth_loss))
            # print('nose_loss[i]: ' + str(nose_loss))
            # print('leye_loss[i]: ' + str(leye_loss))
            # print('reye_loss[i]: ' + str(reye_loss))
            # print('============')

        loss_array = np.array(loss_array)
        tensor_distance_loss = K.variable(loss_array)

        # tensor_total_loss = tf.reduce_mean([tensor_mean_square_error, loss_array])
        tensor_total_loss = tf.add(tensor_mean_square_error, tensor_distance_loss)
        return tensor_total_loss
Beispiel #23
0
    def __customLoss_base(self, yTrue, yPred):
        pca_utility = PCAUtility()
        image_utility = ImageUtility()
        tf_record_utility = TFRecordUtility()

        eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(DatasetName.ibug)

        # yTrue = tf.constant([[1.0, 2.0, 3.0], [5.0, 4.0, 7.0]])
        # yPred = tf.constant([[9.0, 1.0, 2.0], [7.0, 3.0, 8.0]])
        # session = K.get_session()

        tensor_mean_square_error = K.mean(K.square(yPred - yTrue), axis=-1)
        # tensor_mean_square_error = keras.losses.mean_squared_error(yPred, yTrue)
        mse = K.eval(tensor_mean_square_error)

        yPred_arr = K.eval(yPred)
        yTrue_arr = K.eval(yTrue)

        loss_array = []

        for i in range(LearningConfig.batch_size):
            asm_loss = 0

            truth_vector = yTrue_arr[i]
            predicted_vector = yPred_arr[i]

            b_vector_p = self.calculate_b_vector(predicted_vector, True, eigenvalues, eigenvectors, meanvector)
            y_pre_asm = meanvector + np.dot(eigenvectors, b_vector_p)

            """in order to test the results after PCA, you can use these lines of code"""
            # landmark_arr_xy, landmark_arr_x, landmark_arr_y = image_utility.create_landmarks_from_normalized(truth_vector, 224, 224, 112, 112)
            # image_utility.print_image_arr(i, np.ones([224, 224]), landmark_arr_x, landmark_arr_y)
            #
            # landmark_arr_xy_new, landmark_arr_x_new, landmark_arr_y_new= image_utility.create_landmarks_from_normalized(y_pre_asm, 224, 224, 112, 112)
            # image_utility.print_image_arr(i*100, np.ones([224, 224]), landmark_arr_x_new, landmark_arr_y_new)

            for j in range(len(y_pre_asm)):
                asm_loss += (truth_vector[j] - y_pre_asm[j]) ** 2
            asm_loss /= len(y_pre_asm)


            # asm_loss *= mse[i]
            # asm_loss *= LearningConfig.regularization_term

            loss_array.append(asm_loss)

            print('mse[i]' + str(mse[i]))
            print('asm_loss[i]' + str(asm_loss))
            print('============' )

        loss_array = np.array(loss_array)
        tensor_asm_loss = K.variable(loss_array)

        # sum_loss_tensor = tf.math.add(tensor_mean_square_error, tensor_asm_loss)
        tensor_total_loss = tf.reduce_mean([tensor_mean_square_error, tensor_asm_loss], axis=0)

        # sum_loss = np.array(K.eval(tensor_asm_loss))
        # print(mse)
        # print(K.eval(tensor_mean_square_error))
        # print(K.eval(tensor_asm_loss))
        # print('asm_loss  ' + str(loss_array[0]))
        # print('mse_loss  ' + str(mse[0]))
        # print('sum_loss  ' + str(sum_loss[0]))
        # print('total_loss  ' + str(total_loss[0]))
        # print('      ')
        return tensor_total_loss
Beispiel #24
0
    def train(self, teachers_arch, teachers_weight_files, teachers_weight_loss,
              teachers_tf_train_paths, student_weight_file):
        """
        :param teachers_arch: an array containing architecture of teacher networks
        :param teachers_weight_files: an array containing teachers h5 files
        :param teachers_weight_loss: an array containing weight of teachers model in loss function
        :param teachers_tf_train_paths: an array containing path of train tf records
        :param student_weight_file : student h5 weight path
        :return: null
        """

        tf_record_util = TFRecordUtility(self.output_len)
        c_loss = Custom_losses()
        '''-------------------------------------'''
        '''     preparing student models        '''
        '''-------------------------------------'''
        teacher_models = []
        cnn = CNNModel()
        for i in range(len(teachers_arch)):
            student_train_images, student_train_landmarks = tf_record_util.create_training_tensor_points(
                tfrecord_filename=teachers_tf_train_paths[i],
                batch_size=self.BATCH_SIZE)
            model = cnn.get_model(train_images=student_train_images,
                                  arch=teachers_arch[i],
                                  num_output_layers=1,
                                  output_len=self.output_len,
                                  input_tensor=None)

            model.load_weights(teachers_weight_files[i])
            teacher_models.append(model)
        '''---------------------------------'''
        '''     creating student model      '''
        '''---------------------------------'''
        '''retrieve tf data'''
        train_images, train_landmarks = tf_record_util.create_training_tensor_points(
            tfrecord_filename=self.tf_train_path, batch_size=self.BATCH_SIZE)
        validation_images, validation_landmarks = tf_record_util.create_training_tensor_points(
            tfrecord_filename=self.tf_eval_path, batch_size=self.BATCH_SIZE)
        '''create model'''
        student_model = cnn.get_model(train_images=train_images,
                                      arch=self.arch,
                                      num_output_layers=1,
                                      output_len=self.output_len,
                                      input_tensor=train_images,
                                      inp_shape=None)
        if student_weight_file is not None:
            student_model.load_weights(student_weight_file)
        '''prepare callbacks'''
        callbacks_list = self._prepare_callback()
        ''' define optimizers'''
        optimizer = Adam(lr=1e-3,
                         beta_1=0.9,
                         beta_2=0.999,
                         decay=1e-5,
                         amsgrad=False)
        '''create loss'''
        # file = open("map_aug" + self.dataset_name, 'rb')
        file = open("map_orig" + self.dataset_name, 'rb')
        landmark_img_map = pickle.load(file)
        file.close()

        # loss_func = c_loss.custom_teacher_student_loss_cos(img_path=self.img_path, lnd_img_map=landmark_img_map,
        #                                                    teacher_models=teacher_models,
        #                                                    teachers_weight_loss=teachers_weight_loss,
        #                                                    bath_size=self.BATCH_SIZE,
        #                                                    num_points=self.output_len, cos_weight=cos_weight)

        loss_func = c_loss.custom_teacher_student_loss(
            img_path=self.img_path,
            lnd_img_map=landmark_img_map,
            teacher_models=teacher_models,
            teachers_weight_loss=teachers_weight_loss,
            bath_size=self.BATCH_SIZE,
            num_points=self.output_len,
            ds_name=self.dataset_name,
            loss_type=0)
        '''compiling model'''
        student_model.compile(loss=loss_func,
                              optimizer=optimizer,
                              metrics=['mse', 'mae'],
                              target_tensors=train_landmarks)

        print('< ========== Start Training Student============= >')
        history = student_model.fit(
            train_images,
            train_landmarks,
            epochs=self.EPOCHS,
            steps_per_epoch=self.STEPS_PER_EPOCH,
            validation_data=(validation_images, validation_landmarks),
            validation_steps=self.STEPS_PER_VALIDATION_EPOCH,
            verbose=1,
            callbacks=callbacks_list)
from tf_record_utility import TFRecordUtility
from configuration import DatasetName, DatasetType, AffectnetConf, IbugConf, W300Conf, InputDataSize
from cnn_model import CNNModel
from pca_utility import PCAUtility
from image_utility import ImageUtility
import numpy as np
from train import Train
from test import Test

if __name__ == '__main__':
    tf_record_util = TFRecordUtility()
    pca_utility = PCAUtility()
    cnn_model = CNNModel()
    image_utility = ImageUtility()

    # trainer = Train(use_tf_record=False,
    #                 dataset_name=DatasetName.ibug,
    #                 custom_loss=False,
    #                 arch='mn_main',
    #                 inception_mode=True,
    #                 num_output_layers=1,
    #                 point_wise=True,
    #                 weight=None
    #                 )

    # tester = Test()

    # trainer = Train(use_tf_record=True,
    #                 dataset_name=DatasetName.ibug,
    #                 custom_loss=False,
    #                 arch='mn_r',