예제 #1
0
 def train(self, arch, weight_path):
     """"""
     '''create loss'''
     c_loss = Custom_losses()
     '''create summary writer'''
     summary_writer = tf.summary.create_file_writer(
         "./train_logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S"))
     '''making models'''
     model = self.make_model(arch=arch, w_path=weight_path, is_old=False)
     '''create optimizer'''
     _lr = 1e-3
     optimizer_student = self._get_optimizer(lr=_lr)
     '''create sample generator'''
     x_train_filenames, x_val_filenames, y_train_filenames, y_val_filenames = self._create_generators(
     )
     '''create train configuration'''
     step_per_epoch = len(x_train_filenames) // LearningConfig.batch_size
     '''start train:'''
     for epoch in range(LearningConfig.epochs):
         x_train_filenames, y_train_filenames = self._shuffle_data(
             x_train_filenames, y_train_filenames)
         for batch_index in range(step_per_epoch):
             '''load annotation and images'''
             images, annotation_gr = self._get_batch_sample(
                 batch_index=batch_index,
                 x_train_filenames=x_train_filenames,
                 y_train_filenames=y_train_filenames,
                 model=model)
             '''convert to tensor'''
             images = tf.cast(images, tf.float32)
             annotation_gr = tf.cast(annotation_gr, tf.float32)
             '''train step'''
             self.train_step(epoch=epoch,
                             step=batch_index,
                             total_steps=step_per_epoch,
                             images=images,
                             model=model,
                             annotation_gr=annotation_gr,
                             optimizer=optimizer_student,
                             summary_writer=summary_writer,
                             c_loss=c_loss)
         '''evaluating part'''
         img_batch_eval, pn_batch_eval = self._create_evaluation_batch(
             x_val_filenames, y_val_filenames)
         # loss_eval, loss_eval_tol_dif_stu, loss_eval_tol_dif_gt, loss_eval_tou_dif_stu, loss_eval_tou_dif_gt = \
         loss_eval = self._eval_model(img_batch_eval, pn_batch_eval, model)
         with summary_writer.as_default():
             tf.summary.scalar('Eval-LOSS', loss_eval, step=epoch)
         '''save weights'''
         model.save('./models/teacher_model_' + str(epoch) + '_' +
                    self.dataset_name + '_' + str(loss_eval) + '.h5')
예제 #2
0
    def __init__(self,
                 use_tf_record,
                 dataset_name,
                 custom_loss,
                 arch,
                 inception_mode,
                 num_output_layers,
                 train_on_batch,
                 weight=None,
                 accuracy=100):
        c_loss = Custom_losses(dataset_name, accuracy)

        if dataset_name == DatasetName.ibug:
            self.SUM_OF_ALL_TRAIN_SAMPLES = IbugConf.number_of_all_sample
            self.tf_train_path = IbugConf.tf_train_path
            self.tf_eval_path = IbugConf.tf_evaluation_path
            self.output_len = IbugConf.num_of_landmarks * 2
        elif dataset_name == DatasetName.cofw:
            self.SUM_OF_ALL_TRAIN_SAMPLES = CofwConf.number_of_all_sample
            self.tf_train_path = CofwConf.tf_train_path
            self.tf_eval_path = CofwConf.tf_evaluation_path
            self.output_len = CofwConf.num_of_landmarks * 2
        elif dataset_name == DatasetName.wflw:
            self.SUM_OF_ALL_TRAIN_SAMPLES = WflwConf.number_of_all_sample
            self.tf_train_path = WflwConf.tf_train_path
            self.tf_eval_path = WflwConf.tf_evaluation_path
            self.output_len = WflwConf.num_of_landmarks * 2

        self.BATCH_SIZE = LearningConfig.batch_size
        self.STEPS_PER_VALIDATION_EPOCH = LearningConfig.steps_per_validation_epochs
        self.STEPS_PER_EPOCH = self.SUM_OF_ALL_TRAIN_SAMPLES // self.BATCH_SIZE
        self.EPOCHS = LearningConfig.epochs

        if custom_loss:
            self.loss = c_loss.asm_assisted_loss
        else:
            self.loss = losses.mean_squared_error

        self.arch = arch
        self.inception_mode = inception_mode
        self.weight = weight
        self.num_output_layers = num_output_layers

        self.accuracy = accuracy

        if train_on_batch:
            self.train_fit_on_batch()
        elif use_tf_record:
            self.train_fit()
        else:
            self.train_fit_gen()
예제 #3
0
 def __init__(self, dataset_name, geo_custom_loss, regressor_arch,
              discriminator_arch, regressor_weight, discriminator_weight,
              input_shape_reg, input_shape_disc):
     self.dataset_name = dataset_name
     self.geo_custom_loss = geo_custom_loss
     self.regressor_arch = regressor_arch
     self.discriminator_arch = discriminator_arch
     self.regressor_weight = regressor_weight
     self.discriminator_weight = discriminator_weight
     self.input_shape_reg = input_shape_reg
     self.input_shape_disc = input_shape_disc
     if dataset_name == DatasetName.ibug:
         self.SUM_OF_ALL_TRAIN_SAMPLES = IbugConf.number_of_all_sample
         self.num_landmark = IbugConf.num_of_landmarks * 2
         self.num_face_graph_elements = IbugConf.num_face_graph_elements
         self.train_images_dir = IbugConf.train_images_dir
         self.train_hm_dir = IbugConf.graph_face_dir
         self.train_point_dir = IbugConf.normalized_points_npy_dir
         self.num_face_graph_elements = IbugConf.num_face_graph_elements
     elif dataset_name == DatasetName.cofw:
         self.SUM_OF_ALL_TRAIN_SAMPLES = CofwConf.number_of_all_sample
         self.num_landmark = CofwConf.num_of_landmarks * 2
         self.num_face_graph_elements = CofwConf.num_face_graph_elements
         self.train_images_dir = CofwConf.train_images_dir
         self.train_hm_dir = CofwConf.graph_face_dir
         self.train_point_dir = CofwConf.normalized_points_npy_dir
         self.num_face_graph_elements = CofwConf.num_face_graph_elements
     elif dataset_name == DatasetName.wflw:
         self.SUM_OF_ALL_TRAIN_SAMPLES = WflwConf.number_of_all_sample
         self.num_landmark = WflwConf.num_of_landmarks * 2
         self.num_face_graph_elements = WflwConf.num_face_graph_elements
         self.train_images_dir = WflwConf.train_images_dir
         self.train_hm_dir = WflwConf.graph_face_dir
         self.train_point_dir = WflwConf.normalized_points_npy_dir
         self.num_face_graph_elements = WflwConf.num_face_graph_elements
     c_loss = Custom_losses(dataset_name, accuracy=100)
     if geo_custom_loss:
         self.geo_loss = c_loss.inter_landmark_loss
     else:
         self.geo_loss = losses.mean_squared_error
예제 #4
0
    def __init__(self,
                 use_tf_record,
                 dataset_name,
                 custom_loss,
                 arch,
                 inception_mode,
                 num_output_layers,
                 point_wise,
                 weight=None):
        c_loss = Custom_losses()

        if dataset_name == DatasetName.ibug:
            self.SUM_OF_ALL_TRAIN_SAMPLES = IbugConf.sum_of_train_samples
        elif dataset_name == DatasetName.affectnet:
            self.SUM_OF_ALL_TRAIN_SAMPLES = AffectnetConf.sum_of_train_samples

        self.BATCH_SIZE = LearningConfig.batch_size
        self.STEPS_PER_VALIDATION_EPOCH = LearningConfig.steps_per_validation_epochs
        self.STEPS_PER_EPOCH = self.SUM_OF_ALL_TRAIN_SAMPLES // self.BATCH_SIZE
        self.EPOCHS = LearningConfig.epochs

        if custom_loss:
            self.loss = c_loss.custom_loss_hm
        else:
            self.loss = losses.mean_squared_error

        self.arch = arch
        self.inception_mode = inception_mode
        self.weight = weight
        self.num_output_layers = num_output_layers
        self.point_wise = point_wise

        if use_tf_record:
            self.train_fit()
        else:
            if point_wise:
                self.train_fit_gen_point_wise()
            else:
                self.train_fit_gen()
예제 #5
0
    def train(self, teachers_arch, teachers_weight_files, teachers_weight_loss,
              teachers_tf_train_paths, student_weight_file):
        """
        :param teachers_arch: an array containing architecture of teacher networks
        :param teachers_weight_files: an array containing teachers h5 files
        :param teachers_weight_loss: an array containing weight of teachers model in loss function
        :param teachers_tf_train_paths: an array containing path of train tf records
        :param student_weight_file : student h5 weight path
        :return: null
        """

        tf_record_util = TFRecordUtility(self.output_len)
        c_loss = Custom_losses()
        '''-------------------------------------'''
        '''     preparing student models        '''
        '''-------------------------------------'''
        teacher_models = []
        cnn = CNNModel()
        for i in range(len(teachers_arch)):
            student_train_images, student_train_landmarks = tf_record_util.create_training_tensor_points(
                tfrecord_filename=teachers_tf_train_paths[i],
                batch_size=self.BATCH_SIZE)
            model = cnn.get_model(train_images=student_train_images,
                                  arch=teachers_arch[i],
                                  num_output_layers=1,
                                  output_len=self.output_len,
                                  input_tensor=None)

            model.load_weights(teachers_weight_files[i])
            teacher_models.append(model)
        '''---------------------------------'''
        '''     creating student model      '''
        '''---------------------------------'''
        '''retrieve tf data'''
        train_images, train_landmarks = tf_record_util.create_training_tensor_points(
            tfrecord_filename=self.tf_train_path, batch_size=self.BATCH_SIZE)
        validation_images, validation_landmarks = tf_record_util.create_training_tensor_points(
            tfrecord_filename=self.tf_eval_path, batch_size=self.BATCH_SIZE)
        '''create model'''
        student_model = cnn.get_model(train_images=train_images,
                                      arch=self.arch,
                                      num_output_layers=1,
                                      output_len=self.output_len,
                                      input_tensor=train_images,
                                      inp_shape=None)
        if student_weight_file is not None:
            student_model.load_weights(student_weight_file)
        '''prepare callbacks'''
        callbacks_list = self._prepare_callback()
        ''' define optimizers'''
        optimizer = Adam(lr=1e-3,
                         beta_1=0.9,
                         beta_2=0.999,
                         decay=1e-5,
                         amsgrad=False)
        '''create loss'''
        # file = open("map_aug" + self.dataset_name, 'rb')
        file = open("map_orig" + self.dataset_name, 'rb')
        landmark_img_map = pickle.load(file)
        file.close()

        # loss_func = c_loss.custom_teacher_student_loss_cos(img_path=self.img_path, lnd_img_map=landmark_img_map,
        #                                                    teacher_models=teacher_models,
        #                                                    teachers_weight_loss=teachers_weight_loss,
        #                                                    bath_size=self.BATCH_SIZE,
        #                                                    num_points=self.output_len, cos_weight=cos_weight)

        loss_func = c_loss.custom_teacher_student_loss(
            img_path=self.img_path,
            lnd_img_map=landmark_img_map,
            teacher_models=teacher_models,
            teachers_weight_loss=teachers_weight_loss,
            bath_size=self.BATCH_SIZE,
            num_points=self.output_len,
            ds_name=self.dataset_name,
            loss_type=0)
        '''compiling model'''
        student_model.compile(loss=loss_func,
                              optimizer=optimizer,
                              metrics=['mse', 'mae'],
                              target_tensors=train_landmarks)

        print('< ========== Start Training Student============= >')
        history = student_model.fit(
            train_images,
            train_landmarks,
            epochs=self.EPOCHS,
            steps_per_epoch=self.STEPS_PER_EPOCH,
            validation_data=(validation_images, validation_landmarks),
            validation_steps=self.STEPS_PER_VALIDATION_EPOCH,
            verbose=1,
            callbacks=callbacks_list)
예제 #6
0
    def train(self, weight_path):
        '''create loss'''
        c_loss = Custom_losses(dataset_name=self.dataset_name, accuracy=90)
        '''create summary writer'''
        summary_writer = tf.summary.create_file_writer(
            "./train_logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S"))
        '''making models'''
        model = self.make_model(arch=self.arch, w_path=weight_path)
        # model.save('stu.h5')
        # '''create optimizer'''
        # optimizer = self._get_optimizer(lr=_lr)
        '''create sample generator'''
        # x_train_filenames, x_val_filenames, y_train_filenames, y_val_filenames = self._create_generators()
        x_train_filenames, y_train_filenames = self._create_generators()
        img_val_filenames, pn_val_filenames = self._create_generators(
            img_path=self.eval_img_path,
            annotation_path=self.eval_annotation_path)

        nme, fr = self._eval_model(model, img_val_filenames, pn_val_filenames)
        print('nme:' + str(nme))
        print('fr:' + str(fr))
        '''create train configuration'''
        step_per_epoch = len(x_train_filenames) // LearningConfig.batch_size
        '''create highlighted points'''
        # todo: need to complete this:
        # bold_landmarks_point_map = self.create_FL_highligted_points_map(batch_size=LearningConfig.batch_size,
        #                                                                 num_of_landmark=self.num_landmark,
        #                                                                 ds_name=self.dataset_name)
        '''loss array to figure out '''
        '''start train:'''
        # optimizer = self._get_optimizer(lr=0.1)
        for epoch in range(LearningConfig.epochs):
            '''calculate Learning rate'''
            _lr = self.calc_learning_rate(iterations=epoch,
                                          step_size=20,
                                          base_lr=1e-3,
                                          max_lr=1e-7)
            optimizer = self._get_optimizer(lr=_lr)
            ''''''
            x_train_filenames, y_train_filenames = self._shuffle_data(
                x_train_filenames, y_train_filenames)
            for batch_index in range(step_per_epoch):
                '''load annotation and images'''
                images, annotation_gr = self._get_batch_sample(
                    batch_index=batch_index,
                    x_train_filenames=x_train_filenames,
                    y_train_filenames=y_train_filenames)
                adoptive_weight = self.calculate_adoptive_weight(
                    epoch=epoch,
                    batch_index=batch_index,
                    y_train_filenames=y_train_filenames)
                '''convert to tensor'''
                images = tf.cast(images, tf.float32)
                annotation_gr = tf.cast(annotation_gr, tf.float32)
                '''train step'''
                self.train_step(epoch=epoch,
                                step=batch_index,
                                total_steps=step_per_epoch,
                                images=images,
                                model=model,
                                annotation_gr=annotation_gr,
                                adoptive_weight=adoptive_weight,
                                optimizer=optimizer,
                                summary_writer=summary_writer,
                                c_loss=c_loss)
            '''evaluating part'''
            nme, fr = self._eval_model(model, img_val_filenames,
                                       pn_val_filenames)
            print('nme:' + str(nme))
            print('fr:' + str(fr))
            with summary_writer.as_default():
                tf.summary.scalar('eval-nme', nme, step=epoch)
                tf.summary.scalar('eval-fr', fr, step=epoch)
            '''save weights'''
            model.save(self.save_path + 'ASM_' + str(epoch) + '_' +
                       self.dataset_name + '_nme_' + str(nme) + '_fr_' +
                       str(fr) + '.h5')
예제 #7
0
파일: train.py 프로젝트: aliprf/KT_ASM_Net
    def __init__(self,
                 use_tf_record,
                 dataset_name,
                 custom_loss,
                 arch,
                 inception_mode,
                 num_output_layers,
                 train_on_batch,
                 on_point,
                 heatmap,
                 weight=None,
                 accuracy=100):
        c_loss = Custom_losses()
        self.dataset_name = dataset_name

        if dataset_name == DatasetName.ibug:
            # self.SUM_OF_ALL_TRAIN_SAMPLES = IbugConf.orig_number_of_training
            self.SUM_OF_ALL_TRAIN_SAMPLES = IbugConf.number_of_all_sample
            self.output_len = IbugConf.num_of_landmarks * 2
            if accuracy == 100:
                # self.tf_train_path = IbugConf.no_aug_train_tf_path+'train100.tfrecords'
                self.tf_train_path = IbugConf.augmented_train_tf_path + 'train100.tfrecords'
                self.tf_eval_path = IbugConf.augmented_train_tf_path + 'eval100.tfrecords'
            elif accuracy == 90:
                # self.tf_train_path = IbugConf.augmented_train_tf_path+'train90.tfrecords'
                self.tf_train_path = IbugConf.augmented_train_tf_path + 'train90.tfrecords'
                self.tf_eval_path = IbugConf.augmented_train_tf_path + 'eval90.tfrecords'

        elif dataset_name == DatasetName.cofw:
            '''we use AUGmented data for teacher'''
            self.SUM_OF_ALL_TRAIN_SAMPLES = CofwConf.number_of_all_sample
            self.output_len = CofwConf.num_of_landmarks * 2
            if accuracy == 100:
                self.tf_train_path = CofwConf.augmented_train_tf_path + 'train100.tfrecords'
                self.tf_eval_path = CofwConf.augmented_train_tf_path + 'eval100.tfrecords'
            elif accuracy == 90:
                self.tf_train_path = CofwConf.augmented_train_tf_path + 'train90.tfrecords'
                self.tf_eval_path = CofwConf.augmented_train_tf_path + 'eval90.tfrecords'

        elif dataset_name == DatasetName.wflw:
            '''we use original data for teacher'''
            self.SUM_OF_ALL_TRAIN_SAMPLES = WflwConf.number_of_all_sample
            self.output_len = WflwConf.num_of_landmarks * 2
            if accuracy == 100:
                self.tf_train_path = WflwConf.augmented_train_tf_path + 'train100.tfrecords'
                self.tf_eval_path = WflwConf.augmented_train_tf_path + 'eval100.tfrecords'
            elif accuracy == 90:
                self.tf_train_path = WflwConf.no_aug_train_tf_path + 'train90.tfrecords'
                self.tf_eval_path = WflwConf.no_aug_train_tf_path + 'eval90.tfrecords'

        self.BATCH_SIZE = LearningConfig.batch_size
        self.STEPS_PER_VALIDATION_EPOCH = LearningConfig.steps_per_validation_epochs
        self.STEPS_PER_EPOCH = self.SUM_OF_ALL_TRAIN_SAMPLES // self.BATCH_SIZE
        self.EPOCHS = LearningConfig.epochs

        if custom_loss:
            self.loss = c_loss.custom_loss_hm
        else:
            # self.loss = losses.mean_squared_error
            self.loss = losses.mean_absolute_error

        self.arch = arch
        self.inception_mode = inception_mode
        self.weight = weight
        self.num_output_layers = num_output_layers

        self.accuracy = accuracy

        if train_on_batch:
            self.train_fit_on_batch()
        elif heatmap and use_tf_record:
            self.train_fit_heatmap()
        elif use_tf_record:
            self.train_fit()
        else:
            self.train_fit_gen(on_point)