Beispiel #1
0
    def test_accuracy_dynamic(self, model):
        dhp = DataHelper()
        '''create batches'''
        img_filenames, exp_filenames, spm_up_filenames, spm_md_filenames, spm_bo_filenames = \
            dhp.create_generator_full_path_with_spm(img_path=self.img_path,
                                                    annotation_path=self.anno_path)
        print(len(img_filenames))
        exp_pr_lbl = []
        exp_gt_lbl = []

        dds = DynamicDataset()
        ds = dds.create_dataset(img_filenames=img_filenames,
                                spm_up_filenames=spm_up_filenames,
                                spm_md_filenames=spm_md_filenames,
                                spm_bo_filenames=spm_bo_filenames,
                                anno_names=exp_filenames,
                                is_validation=True)
        batch_index = 0
        for global_bunch, upper_bunch, middle_bunch, bottom_bunch, exp_gt_b in ds:
            '''predict on batch'''
            global_bunch = global_bunch[:, -1, :, :]
            upper_bunch = upper_bunch[:, -1, :, :]
            middle_bunch = middle_bunch[:, -1, :, :]
            bottom_bunch = bottom_bunch[:, -1, :, :]

            probab_exp_pr_b, _, _, _, _ = model.predict_on_batch(
                [global_bunch, upper_bunch, middle_bunch, bottom_bunch])
            exp_pr_b = np.array([
                np.argmax(probab_exp_pr_b[i])
                for i in range(len(probab_exp_pr_b))
            ])

            exp_pr_lbl += np.array(exp_pr_b).tolist()
            exp_gt_lbl += np.array(exp_gt_b).tolist()
            batch_index += 1

        exp_pr_lbl = np.int64(np.array(exp_pr_lbl))
        exp_gt_lbl = np.int64(np.array(exp_gt_lbl))

        global_accuracy = accuracy_score(exp_gt_lbl, exp_pr_lbl)
        conf_mat = confusion_matrix(exp_gt_lbl, exp_pr_lbl) / 500.0
        # conf_mat = tf.math.confusion_matrix(exp_gt_lbl, exp_pr_lbl, num_classes=7)/500.0

        ds = None
        face_img_filenames = None
        eyes_img_filenames = None
        nose_img_filenames = None
        mouth_img_filenames = None
        exp_filenames = None
        global_bunch = None
        upper_bunch = None
        middle_bunch = None
        bottom_bunch = None

        avg_accuracy = global_accuracy  # the class numbers are the same in the validation
        return global_accuracy, conf_mat
Beispiel #2
0
    def train(self, arch, weight_path):
        """"""

        '''create loss'''
        c_loss = CustomLosses()

        '''create summary writer'''
        summary_writer = tf.summary.create_file_writer(
            "./train_logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S"))
        start_train_date = datetime.now().strftime("%Y%m%d-%H%M%S")

        '''making models'''

        model = self.make_model(arch=arch, w_path=weight_path)
        '''create save path'''

        if self.dataset_name == DatasetName.affectnet:
            save_path = AffectnetConf.weight_save_path + start_train_date + '/'
        elif self.dataset_name == DatasetName.rafdb:
            save_path = RafDBConf.weight_save_path + start_train_date + '/'
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        '''create sample generator'''
        dhp = DataHelper()
        '''     Train   Generator'''
        '''     Train   Generator'''
        img_filenames, exp_filenames, spm_up_filenames, spm_md_filenames, spm_bo_filenames = \
            dhp.create_generator_full_path_with_spm(img_path=self.img_path,
                                                    annotation_path=self.annotation_path)
        dds = DynamicDataset()
        ds = dds.create_dataset(img_filenames=img_filenames,
                                spm_up_filenames=spm_up_filenames,
                                spm_md_filenames=spm_md_filenames,
                                spm_bo_filenames=spm_bo_filenames,
                                anno_names=exp_filenames)

        # global_accuracy, conf_mat = self._eval_model(model=model)

        '''create train configuration'''
        step_per_epoch = len(img_filenames) // LearningConfig.batch_size
        gradients = None
        virtual_step_per_epoch = LearningConfig.virtual_batch_size // LearningConfig.batch_size

        '''create optimizer'''
        optimizer = tf.keras.optimizers.SGD(self.lr, momentum=0.9)
        # optimizer = tf.keras.optimizers.Adam(self.lr)

        '''start train:'''
        for epoch in range(LearningConfig.epochs):
            batch_index = 0
            for global_bunch, upper_bunch, middle_bunch, bottom_bunch, exp_batch in ds:
                exp_batch = exp_batch[:, -1]
                global_bunch = global_bunch[:, -1, :, :]
                upper_bunch = upper_bunch[:, -1, :, :]
                middle_bunch = middle_bunch[:, -1, :, :]
                bottom_bunch = bottom_bunch[:, -1, :, :]

                # self.test_print_batch(global_bunch, upper_bunch, middle_bunch, bottom_bunch, batch_index)
                '''train step'''
                step_gradients = self.train_step(epoch=epoch, step=batch_index, total_steps=step_per_epoch,
                                                 global_bunch=global_bunch,
                                                 upper_bunch=upper_bunch,
                                                 middle_bunch=middle_bunch,
                                                 bottom_bunch=bottom_bunch,
                                                 anno_exp=exp_batch,
                                                 model=model, optimizer=optimizer, c_loss=c_loss,
                                                 summary_writer=summary_writer)
                # '''apply gradients'''
                # if batch_index > 0 and batch_index % virtual_step_per_epoch == 0:
                #     '''apply gradient'''
                #     print("===============apply gradient================= ")
                #     optimizer.apply_gradients(zip(gradients, model.trainable_variables))
                #     gradients = None
                # else:
                #     '''accumulate gradient'''
                #     if gradients is None:
                #         gradients = [self._flat_gradients(g) / LearningConfig.virtual_batch_size for g in
                #                      step_gradients]
                #     else:
                #         for i, g in enumerate(step_gradients):
                #             gradients[i] += self._flat_gradients(g) / LearningConfig.virtual_batch_size
                ''''''
                batch_index += 1
            '''evaluating part'''
            global_accuracy, conf_mat = self._eval_model(model=model)
            '''save weights'''
            model.save(save_path + '_' + str(epoch) + '_' + self.dataset_name +
                       '_AC_' + str(global_accuracy) +
                       '.h5')

            '''revise lr'''
            if epoch > 0 and epoch % self.epochs_drop == 0:
                self.lr *= self.drop
                optimizer = tf.keras.optimizers.SGD(self.lr, momentum=0.9)
                print(self.lr)