def train(self, arch, weight_path):
        """"""
        '''create loss'''
        c_loss = CustomLosses()
        '''create summary writer'''
        summary_writer = tf.summary.create_file_writer(
            "./train_logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S"))
        start_train_date = datetime.now().strftime("%Y%m%d-%H%M%S")
        '''making models'''
        model = self.make_model(arch=arch, w_path=weight_path)
        '''create save path'''
        if self.dataset_name == DatasetName.affectnet:
            save_path = AffectnetConf.weight_save_path + start_train_date + '/'
        elif self.dataset_name == DatasetName.rafdb:
            save_path = RafDBConf.weight_save_path + start_train_date + '/'
        if not os.path.exists(save_path):
            os.makedirs(save_path)
        '''create sample generator'''
        dhp = DataHelper()
        '''     Train   Generator'''
        face_img_filenames, eyes_img_filenames, nose_img_filenames, mouth_img_filenames, exp_filenames = \
            dhp.create_masked_generator_full_path(img_path=self.masked_img_path, annotation_path=self.annotation_path,
                                                  num_of_samples=self.num_of_samples)
        '''create dataset'''
        cds = CustomDataset()
        ds = cds.create_dataset(file_names_face=face_img_filenames,
                                file_names_eyes=eyes_img_filenames,
                                file_names_nose=nose_img_filenames,
                                file_names_mouth=mouth_img_filenames,
                                anno_names=exp_filenames)

        # global_accuracy, conf_mat = self._eval_model(model=model)
        conf_mat = None
        # if weight_path is not None:
        #     global_accuracy, conf_mat = self._eval_model(model=model)
        # else:
        #     conf_mat = np.ones_like([7 * 7])
        '''create train configuration'''
        step_per_epoch = len(face_img_filenames) // LearningConfig.batch_size
        gradients = None
        virtual_step_per_epoch = LearningConfig.virtual_batch_size // LearningConfig.batch_size
        '''create optimizer'''
        optimizer = tf.keras.optimizers.SGD(self.lr, momentum=0.9)
        # optimizer = tf.keras.optimizers.Adam(self.lr)
        '''start train:'''
        for epoch in range(LearningConfig.epochs):
            batch_index = 0
            for global_bunch, upper_bunch, middle_bunch, bottom_bunch, exp_batch in ds:
                '''load annotation and images'''
                # print('load data...')
                # start_time = time.perf_counter()
                '''squeeze'''
                # print('squeeze...')
                exp_batch = exp_batch[:, -1]
                global_bunch = global_bunch[:, -1, :, :]
                upper_bunch = upper_bunch[:, -1, :, :]
                middle_bunch = middle_bunch[:, -1, :, :]
                bottom_bunch = bottom_bunch[:, -1, :, :]
                # [:,:,-1,:],

                # self.test_print_batch(global_bunch, upper_bunch, middle_bunch, bottom_bunch, batch_index)
                '''train step'''
                # print("Execution time:", time.perf_counter() - start_time)

                # print('train step->')
                step_gradients = self.train_step(epoch=epoch,
                                                 step=batch_index,
                                                 total_steps=step_per_epoch,
                                                 global_bunch=global_bunch,
                                                 upper_bunch=upper_bunch,
                                                 middle_bunch=middle_bunch,
                                                 bottom_bunch=bottom_bunch,
                                                 anno_exp=exp_batch,
                                                 model=model,
                                                 optimizer=optimizer,
                                                 c_loss=c_loss,
                                                 summary_writer=summary_writer,
                                                 conf_mat=conf_mat)
                '''apply gradients'''
                # print('gradients->')
                # if batch_index > 0 and batch_index % virtual_step_per_epoch == 0:
                #     '''apply gradient'''
                #     print("===============apply gradient================= ")
                #     optimizer.apply_gradients(zip(gradients, model.trainable_variables))
                #     gradients = None
                # else:
                #     '''accumulate gradient'''
                #     if gradients is None:
                #         gradients = [self._flat_gradients(g) / LearningConfig.virtual_batch_size for g in
                #                      step_gradients]
                #     else:
                #         for i, g in enumerate(step_gradients):
                #             gradients[i] += self._flat_gradients(g) / LearningConfig.virtual_batch_size
                batch_index += 1
            '''evaluating part'''
            global_accuracy, conf_mat = self._eval_model(model=model)
            '''save weights'''
            model.save(save_path + '_' + str(epoch) + '_' + self.dataset_name +
                       '_AC_' + str(global_accuracy) + '.h5')
            '''revise lr'''
            if epoch > 0 and epoch % self.epochs_drop == 0:
                self.lr *= self.drop
                optimizer = tf.keras.optimizers.SGD(self.lr, momentum=0.9)
    def test_accuracy(self, model):
        dhp = DataHelper()
        batch_size = LearningConfig.batch_size
        # exp_pr_glob = []
        # exp_gt_glob = []
        # acc_per_label = []
        '''create batches'''
        face_img_filenames, eyes_img_filenames, nose_img_filenames, mouth_img_filenames, exp_filenames = \
            dhp.create_masked_generator_full_path(
                img_path=self.masked_img_path,
                annotation_path=self.anno_path, label=None, num_of_samples=None)
        print(len(face_img_filenames))
        step_per_epoch = int(len(face_img_filenames) // batch_size)
        exp_pr_lbl = []
        exp_gt_lbl = []

        cds = CustomDataset()
        ds = cds.create_dataset(file_names_face=face_img_filenames,
                                file_names_eyes=eyes_img_filenames,
                                file_names_nose=nose_img_filenames,
                                file_names_mouth=mouth_img_filenames,
                                anno_names=exp_filenames,
                                is_validation=True,
                                ds=DatasetName.rafdb)

        batch_index = 0
        for global_bunch, upper_bunch, middle_bunch, bottom_bunch, exp_gt_b in tqdm(
                ds):
            '''predict on batch'''
            exp_gt_b = exp_gt_b[:, -1]
            global_bunch = global_bunch[:, -1, :, :]
            upper_bunch = upper_bunch[:, -1, :, :]
            middle_bunch = middle_bunch[:, -1, :, :]
            bottom_bunch = bottom_bunch[:, -1, :, :]

            probab_exp_pr_b, _, _, _, _ = model.predict_on_batch(
                [global_bunch, upper_bunch, middle_bunch, bottom_bunch])
            # scores_b = np.array([tf.nn.softmax(probab_exp_pr_b[i]) for i in range(len(probab_exp_pr_b))])
            # exp_pr_b = np.array([np.argmax(scores_b[i]) for i in range(len(probab_exp_pr_b))])
            exp_pr_b = np.array([
                np.argmax(probab_exp_pr_b[i])
                for i in range(len(probab_exp_pr_b))
            ])

            exp_pr_lbl += np.array(exp_pr_b).tolist()
            exp_gt_lbl += np.array(exp_gt_b).tolist()
            batch_index += 1
        exp_pr_lbl = np.float64(np.array(exp_pr_lbl))
        exp_gt_lbl = np.float64(np.array(exp_gt_lbl))

        global_accuracy = accuracy_score(exp_gt_lbl, exp_pr_lbl)
        conf_mat = confusion_matrix(exp_gt_lbl, exp_pr_lbl)
        # conf_mat = tf.math.confusion_matrix(exp_gt_lbl, exp_pr_lbl, num_classes=7)

        ds = None
        face_img_filenames = None
        eyes_img_filenames = None
        nose_img_filenames = None
        mouth_img_filenames = None
        exp_filenames = None

        return global_accuracy, conf_mat