Example #1
0
    def evaluate(self, current_step):
        '''
        Evaluate the results of the model
        !!! Will change, e.g. accuracy, mAP.....
        !!! Or can call other methods written by the official
        '''
        self.engine.set_requires_grad(self.engine.MemAE, False)
        self.engine.MemAE.eval()
        tb_writer = self.engine.kwargs['writer_dict']['writer']
        global_steps = self.engine.kwargs['writer_dict'][
            'global_steps_{}'.format(self.engine.kwargs['model_type'])]
        frame_num = self.engine.config.DATASET.val.clip_length
        clip_step = self.engine.config.DATASET.val.clip_step
        psnr_records = []
        score_records = []
        # total = 0
        num_videos = 0
        random_video_sn = torch.randint(0, len(self.engine.test_dataset_keys),
                                        (1, ))
        # calc the score for the test dataset
        for sn, video_name in enumerate(self.engine.test_dataset_keys):
            num_videos += 1
            # need to improve
            dataset = self.engine.test_dataset_dict[video_name]
            len_dataset = dataset.pics_len
            test_iters = len_dataset - frame_num + 1
            # test_iters = len_dataset // clip_step
            test_counter = 0

            data_loader = DataLoader(dataset=dataset,
                                     batch_size=1,
                                     shuffle=False,
                                     num_workers=1)
            vis_range = range(int(len_dataset * 0.5),
                              int(len_dataset * 0.5 + 5))
            # scores = np.empty(shape=(len_dataset,),dtype=np.float32)
            scores = torch.zeros(len_dataset)
            # scores = [0.0 for i in range(len_dataset)]
            for clip_sn, (test_input, anno, meta) in enumerate(data_loader):
                test_target = test_input.cuda()
                time_len = test_input.shape[2]
                output, _ = self.engine.MemAE(test_target)
                clip_score = reconstruction_loss(output, test_target)

                # scores[test_counter*time_len:(test_counter + 1)*time_len] = clip_score.squeeze(0)
                if (frame_num + test_counter) > len_dataset:
                    temp = test_counter + frame_num - len_dataset
                    scores[test_counter:len_dataset] = clip_score[temp:]
                else:
                    scores[test_counter:(frame_num +
                                         test_counter)] = clip_score

                test_counter += 1

                if sn == random_video_sn and (clip_sn in vis_range):
                    vis_objects = OrderedDict({
                        'memae_eval_clip':
                        test_target.detach(),
                        'memae_eval_clip_hat':
                        output.detach()
                    })
                    tensorboard_vis_images(
                        vis_objects,
                        tb_writer,
                        global_steps,
                        normalize=self.engine.normalize.param['val'])

                if test_counter >= test_iters:
                    # import ipdb; ipdb.set_trace()
                    # scores[:frame_num-1]=(scores[frame_num-1],) # fix the bug: TypeError: can only assign an iterable
                    smax = max(scores)
                    smin = min(scores)
                    # normal_scores = np.array([(1.0 - np.divide(s-smin, smax)) for s in scores])
                    normal_scores = (1.0 - torch.div(
                        scores - smin, smax - smin)).detach().cpu().numpy()
                    normal_scores = np.clip(normal_scores, 0, None)
                    score_records.append(normal_scores)
                    print(f'finish test video set {video_name}')
                    break

        self.engine.pkl_path = save_score_results(
            self.engine.config,
            self.engine.logger,
            verbose=self.engine.verbose,
            config_name=self.engine.config_name,
            current_step=current_step,
            time_stamp=self.engine.kwargs["time_stamp"],
            score=score_records)
        results = self.engine.evaluate_function(
            self.engine.pkl_path, self.engine.logger, self.engine.config,
            self.engine.config.DATASET.score_type)
        self.engine.logger.info(results)
        tb_writer.add_text('amc: AUC of ROC curve', f'auc is {results.auc}',
                           global_steps)
        return results.auc
Example #2
0
    def evaluate(self, current_step):
        '''
        Evaluate the results of the model
        !!! Will change, e.g. accuracy, mAP.....
        !!! Or can call other methods written by the official
        '''
        # self.trainer.set_requires_grad(self.trainer.A, False)
        # self.trainer.set_requires_grad(self.trainer.B, False)
        # self.trainer.set_requires_grad(self.trainer.C, False)
        # self.trainer.set_requires_grad(self.trainer.Detector, False)
        # self.trainer.A.eval()
        # self.trainer.B.eval()
        # self.trainer.C.eval()
        # self.trainer.Detector.eval()
        self.engine.set_all(False)
        frame_num = self.engine.config.DATASET.test_clip_length
        tb_writer = self.engine.kwargs['writer_dict']['writer']
        global_steps = self.engine.kwargs['writer_dict']['global_steps_{}'.format(self.engine.kwargs['model_type'])]
        score_records = []
        # psnr_records = []
        total = 0
        random_video_sn = torch.randint(0, len(self.engine.test_dataset_keys), (1,))
        # random_video_sn = 0
        for sn, video_name in enumerate(self.engine.test_dataset_keys):
            # _temp_test_folder = os.path.join(self.testing_data_folder, dir)
            # need to improve
            # dataset = AvenueTestOld(_temp_test_folder, clip_length=frame_num)
            dataset = self.engine.test_dataset_dict[video_name]
            len_dataset = dataset.pics_len
            test_iters = len_dataset - frame_num + 1
            test_counter = 0
            # feature_record = []
        
            data_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1)
            # import ipdb; ipdb.set_trace()
            scores = np.empty(shape=(len_dataset,),dtype=np.float32)
            # for test_input, _ in data_loader:
            random_frame_sn = torch.randint(0, len_dataset,(1,))
            for frame_sn, (test_input, anno, meta) in enumerate(data_loader):
                feature_record_object = []
                future = test_input[:, :, 2, :, :].cuda()
                current = test_input[:, :, 1, :, :].cuda()
                past = test_input[:, :, 0, :, :].cuda()
                bboxs = get_batch_dets(self.engine.Detector, current)
                for index, bbox in enumerate(bboxs):
                    # import ipdb; ipdb.set_trace()
                    if bbox.numel() == 0:
                        bbox = bbox.new_zeros([1,4])
                        # print('NO objects')
                        # continue
                    current_object, _ = multi_obj_grid_crop(current[index], bbox)
                    future_object, _ = multi_obj_grid_crop(future[index], bbox)
                    future2current = torch.stack([future_object, current_object], dim=1)

                    past_object, _ = multi_obj_grid_crop(past[index], bbox)
                    current2past = torch.stack([current_object, past_object], dim=1)

                    _, _, A_input = frame_gradient(future2current)
                    A_input = A_input.sum(1)
                    _, _, C_input = frame_gradient(current2past)
                    C_input = C_input.sum(1)
                    A_feature, temp_a, _ = self.engine.A(A_input)
                    B_feature, temp_b, _ = self.engine.B(current_object)
                    C_feature, temp_c, _ = self.engine.C(C_input)

                    # import ipdb; ipdb.set_trace()
                    if sn == random_video_sn and frame_sn == random_frame_sn:
                        vis_objects = OrderedDict({
                            'eval_oc_input_a': A_input.detach(),
                            'eval_oc_output_a': temp_a.detach(),
                            'eval_oc_input_b': current_object.detach(),
                            'eval_oc_output_b':  temp_b.detach(),
                            'eval_oc_input_c': C_input.detach(),
                            'eval_oc_output_c': temp_c.detach(),
                        })
                        tensorboard_vis_images(vis_objects, tb_writer, global_steps, normalize=self.engine.normalize.param['val'])

                    A_flatten_feature = A_feature.flatten(start_dim=1)
                    B_flatten_feature = B_feature.flatten(start_dim=1)
                    C_flatten_feature = C_feature.flatten(start_dim=1)
                    ABC_feature = torch.cat([A_flatten_feature, B_flatten_feature, C_flatten_feature], dim=1).detach()
                    ABC_feature_s = torch.chunk(ABC_feature, ABC_feature.size(0), dim=0)

                    for abc_f in ABC_feature_s:
                        temp = abc_f.squeeze(0).cpu().numpy()
                        feature_record_object.append(temp)
                
                predict_input = np.array(feature_record_object)
                self.engine.ovr_model = joblib.load(self.engine.ovr_model_path)
                g_i = self.engine.ovr_model.decision_function(predict_input) # the svm score of each object in one frame
                frame_score = oc_score(g_i)

                # test_psnr = psnr_error(g_output, test_target)
                # test_psnr = test_psnr.tolist()
                scores[test_counter+frame_num-1] = frame_score

                test_counter += 1
                total+=1
                if test_counter >= test_iters:
                    scores[:frame_num-1]=scores[frame_num-1]
                    score_records.append(scores)
                    print(f'finish test video set {video_name}')
                    break
        
        self.engine.pkl_path = save_score_results(self.engine.config, self.engine.logger, verbose=self.engine.verbose, config_name=self.engine.config_name, current_step=current_step, time_stamp=self.engine.kwargs["time_stamp"],score=score_records)
        results = self.engine.evaluate_function(self.engine.pkl_path, self.engine.logger, self.engine.config)
        self.engine.logger.info(results)
        tb_writer.add_text('AUC of ROC curve', f'AUC is {results.auc:.5f}',global_steps)
        return results.auc
Example #3
0
    def evaluate(self, current_step):
        """STAE evaluation method. 

        Evaluate the model base on some methods.
        Args:
            current_step: The current step at present
        Returns:
            results: The magnitude of the method based on this evaluation metric
        """
        # Set basic things
        self.engine.set_all(False)  # eval mode
        tb_writer = self.engine.kwargs['writer_dict']['writer']
        global_steps = self.engine.kwargs['writer_dict'][
            'global_steps_{}'.format(self.engine.kwargs['model_type'])]
        frame_num = self.engine.config.DATASET.val.sampled_clip_length
        score_records = []
        # num_videos = 0
        random_video_sn = torch.randint(0, len(self.engine.val_dataset_keys),
                                        (1, ))

        # calc the score for the test dataset
        for sn, video_name in enumerate(self.engine.val_dataset_keys):
            # num_videos += 1
            # need to improve
            dataloader = self.engine.val_dataloaders_dict[
                'general_dataset_dict'][video_name]
            len_dataset = dataloader.dataset.pics_len
            test_iters = len_dataset - frame_num + 1
            # test_iters = len_dataset // clip_step
            test_counter = 0

            vis_range = range(int(len_dataset * 0.5),
                              int(len_dataset * 0.5 + 5))

            scores = np.empty(shape=(len_dataset, ), dtype=np.float32)
            for clip_sn, (test_input, anno, meta) in enumerate(dataloader):
                test_input = test_input.cuda()
                # test_target = data[:,:,16:,:,:].cuda()
                time_len = test_input.shape[2]
                output, _ = self.engine.STAE(test_input)
                clip_score = reconstruction_loss(output, test_input)
                clip_score = clip_score.tolist()

                if (frame_num + test_counter) > len_dataset:
                    temp = test_counter + frame_num - len_dataset
                    scores[test_counter:len_dataset] = clip_score[temp:]
                else:
                    scores[test_counter:(frame_num +
                                         test_counter)] = clip_score

                test_counter += 1

                if sn == random_video_sn and (clip_sn in vis_range):
                    vis_objects = OrderedDict({
                        'stae_eval_clip':
                        test_input.detach(),
                        'stae_eval_clip_hat':
                        output.detach()
                    })
                    tensorboard_vis_images(
                        vis_objects,
                        tb_writer,
                        global_steps,
                        normalize=self.engine.normalize.param['val'])

                if test_counter >= test_iters:
                    # scores[:frame_num-1]=(scores[frame_num-1],) # fix the bug: TypeError: can only assign an iterable
                    smax = max(scores)
                    smin = min(scores)
                    normal_scores = np.array([(1.0 - np.divide(s - smin, smax))
                                              for s in scores])
                    normal_scores = np.clip(normal_scores, 0, None)
                    score_records.append(normal_scores)
                    logger.info(f'Finish testing the video:{video_name}')
                    break

        # Compute the metrics based on the model's results
        self.engine.pkl_path = save_score_results(
            score_records,
            self.engine.config,
            self.engine.logger,
            verbose=self.engine.verbose,
            config_name=self.engine.config_name,
            current_step=current_step,
            time_stamp=self.engine.kwargs["time_stamp"])
        results = self.engine.evaluate_function.compute(
            {'val': self.engine.pkl_path})
        self.engine.logger.info(results)

        # Write the metric into the tensorboard
        tb_writer.add_text(
            f'{self.engine.config.MODEL.name}: AUC of ROC curve',
            f'auc is {results.avg_value}', global_steps)

        return results.avg_value
    def evaluate(self, current_step):
        '''
        Evaluate the results of the model
        !!! Will change, e.g. accuracy, mAP.....
        !!! Or can call other methods written by the official
        '''
        # self.trainer.set_requires_grad(self.trainer.F, False)
        # self.trainer.set_requires_grad(self.trainer.G, False)
        # self.trainer.set_requires_grad(self.trainer.D, False)
        # self.trainer.G.eval()
        # self.trainer.D.eval()
        # self.trainer.F.eval()
        self.engine.set_all(False)  # eval mode
        tb_writer = self.engine.kwargs['writer_dict']['writer']
        global_steps = self.engine.kwargs['writer_dict'][
            'global_steps_{}'.format(self.engine.kwargs['model_type'])]
        frame_num = self.engine.config.DATASET.val.clip_length
        psnr_records = []
        score_records = []
        # total = 0

        # for dirs in video_dirs:
        random_video_sn = torch.randint(0, len(self.engine.val_dataset_keys),
                                        (1, ))
        for sn, video_name in enumerate(self.engine.val_dataset_keys):

            # need to improve
            dataset = self.engine.val_dataset_dict[video_name]
            len_dataset = dataset.pics_len
            test_iters = len_dataset - frame_num + 1
            test_counter = 0

            data_loader = DataLoader(dataset=dataset,
                                     batch_size=1,
                                     shuffle=False,
                                     num_workers=1)

            psnrs = np.empty(shape=(len_dataset, ), dtype=np.float32)
            scores = np.empty(shape=(len_dataset, ), dtype=np.float32)
            vis_range = range(int(len_dataset * 0.5),
                              int(len_dataset * 0.5 + 5))

            for frame_sn, (test_input, anno, meta) in enumerate(data_loader):
                test_target = test_input[:, :, -1, :, :].cuda()
                test_input = test_input[:, :, :-1, :, :].reshape(
                    test_input.shape[0], -1, test_input.shape[-2],
                    test_input.shape[-1]).cuda()

                g_output = self.engine.G(test_input)
                test_psnr = psnr_error(g_output.detach(),
                                       test_target,
                                       hat=True)
                test_psnr = test_psnr.tolist()
                psnrs[test_counter + frame_num - 1] = test_psnr
                scores[test_counter + frame_num - 1] = test_psnr

                test_counter += 1
                # total+=1
                if sn == random_video_sn and (frame_sn in vis_range):
                    vis_objects = OrderedDict({
                        'anopred_eval_frame':
                        test_target.detach(),
                        'anopred_eval_frame_hat':
                        g_output.detach()
                    })
                    tensorboard_vis_images(
                        vis_objects,
                        tb_writer,
                        global_steps,
                        normalize=self.engine.normalize.param['val'])

                if test_counter >= test_iters:
                    psnrs[:frame_num - 1] = psnrs[frame_num - 1]
                    scores[:frame_num - 1] = (scores[frame_num - 1], )
                    smax = max(scores)
                    smin = min(scores)
                    normal_scores = np.array(
                        [np.divide(s - smin, smax - smin) for s in scores])
                    normal_scores = np.clip(normal_scores, 0, None)
                    psnr_records.append(psnrs)
                    score_records.append(normal_scores)
                    # print(f'finish test video set {video_name}')
                    break

        self.engine.pkl_path = save_score_results(
            self.engine.config,
            self.engine.logger,
            verbose=self.engine.verbose,
            config_name=self.engine.config_name,
            current_step=current_step,
            time_stamp=self.engine.kwargs["time_stamp"],
            score=score_records,
            psnr=psnr_records)
        results = self.engine.evaluate_function(
            self.engine.pkl_path, self.engine.logger, self.engine.config,
            self.engine.config.DATASET.score_type)
        self.engine.logger.info(results)
        tb_writer.add_text('anopcn: AUC of ROC curve', f'auc is {results.auc}',
                           global_steps)
        return results.auc
Example #5
0
    def evaluate(self, current_step):
        """AnoPCN evaluation method. 

        Evaluate the model base on some methods.
        Args:
            current_step: The current step at present
        Returns:
            results: The magnitude of the method based on this evaluation metric
        """
        # Set basic things
        self.engine.set_all(False)
        tb_writer = self.engine.kwargs['writer_dict']['writer']
        global_steps = self.engine.kwargs['writer_dict'][
            'global_steps_{}'.format(self.engine.kwargs['model_type'])]

        frame_num = self.engine.config.DATASET.test_clip_length
        # psnr_records=[]
        score_records = []
        total = 0

        # for dirs in video_dirs:
        random_video_sn = torch.randint(0, len(self.engine.test_dataset_keys),
                                        (1, ))

        for sn, video_name in enumerate(self.engine.test_dataset_keys):
            # need to improve
            # dataset = self.engine.test_dataset_dict[video_name]
            dataloader = self.engine.val_dataloaders_dict[
                'general_dataset_dict'][video_name]
            len_dataset = dataloader.dataset.pics_len
            test_iters = len_dataset - frame_num + 1
            test_counter = 0

            # data_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1)
            # import ipdb; ipdb.set_trace()
            # psnrs = np.empty(shape=(len_dataset,),dtype=np.float32)
            scores = np.empty(shape=(len_dataset, ), dtype=np.float32)
            vis_range = range(int(len_dataset * 0.5),
                              int(len_dataset * 0.5 + 5))
            for frame_sn, (test_input, anno, meta) in enumerate(dataloader):
                test_target = test_input[:, :, -1, :, :].cuda()
                test_input = test_input[:, :, :-1, :, :].cuda()

                _, g_output = self.engine.G(test_input, test_target)
                test_psnr = psnr_error(g_output, test_target, hat=False)
                test_psnr = test_psnr.tolist()
                # psnrs[test_counter+frame_num-1]=test_psnr
                scores[test_counter + frame_num - 1] = test_psnr

                if sn == random_video_sn and (frame_sn in vis_range):
                    vis_objects = OrderedDict({
                        'anopcn_eval_frame':
                        test_target.detach(),
                        'anopcn_eval_frame_hat':
                        g_output.detach()
                    })
                    # vis_objects['anopcn_eval_frame'] = test_target.detach()
                    # vis_objects['anopcn_eval_frame_hat'] = g_output.detach()
                    tensorboard_vis_images(
                        vis_objects,
                        tb_writer,
                        global_steps,
                        normalize=self.engine.normalize.param['val'])
                test_counter += 1
                total += 1

                if test_counter >= test_iters:
                    # psnrs[:frame_num-1]=psnrs[frame_num-1]
                    scores[:frame_num - 1] = (scores[frame_num - 1], )
                    smax = max(scores)
                    smin = min(scores)
                    normal_scores = np.array(
                        [np.divide(s - smin, smax - smin) for s in scores])
                    normal_scores = np.clip(normal_scores, 0, None)
                    # psnr_records.append(psnrs)
                    score_records.append(normal_scores)
                    logger.info(f'finish test video set {video_name}')
                    break

        # Compute the metrics based on the model's results
        self.engine.pkl_path = save_score_results(
            score_records,
            self.engine.config,
            self.engine.logger,
            verbose=self.engine.verbose,
            config_name=self.engine.config_name,
            current_step=current_step,
            time_stamp=self.engine.kwargs["time_stamp"])
        results = self.engine.evaluate_function.compute(
            {'val': self.engine.pkl_path})
        self.engine.logger.info(results)

        # Write the metric into the tensorboard
        tb_writer.add_text(
            f'{self.engine.config.MODEL.name}: AUC of ROC curve',
            f'auc is {results.avg_value}', global_steps)

        return results.auc
Example #6
0
    def evaluate(self, current_step):
        """AMC evaluation method. 
        
        Evaluate the model base on some methods.

        Args:
            current_step: The current step at present
        Returns:
            results: The magnitude of the method based on this evaluation metric
        """
        # Set basic things
        self.engine.set_all(False)
        tb_writer = self.engine.kwargs['writer_dict']['writer']
        global_steps = self.engine.kwargs['writer_dict'][
            'global_steps_{}'.format(self.engine.kwargs['model_type'])]
        frame_num = self.engine.config.DATASET.val.sampled_clip_length
        # psnr_records=[]
        score_records = []
        # score_records_w=[]
        w_dict = OrderedDict()
        # total = 0

        # calc the weight for the training set
        w_video_dict = self.engine.train_dataloaders_dict['w_dataset_dict']
        w_video_names = self.engine.train_dataloaders_dict[
            'w_dataset_dict'].keys()

        for video_name in w_video_names:
            # dataset = self.engine.test_dataset_dict_w[video_name]
            data_loader = w_video_dict[video_name]
            len_dataset = data_loader.dataset.pics_len
            test_iters = len_dataset - frame_num + 1
            test_counter = 0

            scores = [0.0 for i in range(len_dataset)]

            for data, _, _ in data_loader:
                input_data_test = data[:, :, 0, :, :].cuda()
                target_test = data[:, :, 1, :, :].cuda()
                # import ipdb; ipdb.set_trace()
                output_flow_G, output_frame_G = self.engine.G(input_data_test)
                gtFlowEstim = torch.cat([input_data_test, target_test], 1)
                gtFlow_vis, gtFlow = flow_batch_estimate(
                    self.engine.F,
                    gtFlowEstim,
                    self.engine.normalize.param['val'],
                    output_format=self.engine.config.DATASET.optical_format,
                    optical_size=self.engine.config.DATASET.optical_size)
                # import ipdb; ipdb.set_trace()
                diff_appe, diff_flow = simple_diff(target_test, output_frame_G,
                                                   gtFlow, output_flow_G)
                # patch_score_appe, patch_score_flow, _, _ = find_max_patch(diff_appe, diff_flow)
                patch_score_appe, patch_score_flow = find_max_patch(
                    diff_appe, diff_flow)
                scores[test_counter + frame_num -
                       1] = [patch_score_appe, patch_score_flow]
                test_counter += 1
                # print(test_counter)
                if test_counter >= test_iters:
                    scores[:frame_num - 1] = [scores[frame_num - 1]]
                    scores = torch.tensor(scores)
                    frame_w = torch.mean(scores[:, 0])
                    flow_w = torch.mean(scores[:, 1])
                    w_dict[video_name] = [len_dataset, frame_w, flow_w]
                    logger.info(
                        f'Finish calc the scores of training set {video_name} in step:{current_step}'
                    )
                    break
        # import ipdb; ipdb.set_trace()
        wf, wi = calc_w(w_dict)
        # wf , wi = 1.0, 1.0
        tb_writer.add_text('weight of train set',
                           f'w_f:{wf:.3f}, w_i:{wi:.3f}', global_steps)
        logger.info(f'wf:{wf}, wi:{wi}')

        # calc the score for the test dataset
        num_videos = 0
        random_video_sn = torch.randint(0, len(self.engine.test_dataset_keys),
                                        (1, ))

        for sn, video_name in enumerate(self.engine.test_dataset_keys):
            num_videos += 1
            # need to improve
            dataloader = self.engine.val_dataloaders_dict[
                'general_dataset_dict'][video_name]
            len_dataset = dataloader.dataset.pics_len
            test_iters = len_dataset - frame_num + 1
            test_counter = 0

            vis_range = range(int(len_dataset * 0.5),
                              int(len_dataset * 0.5 + 5))
            # psnrs = np.empty(shape=(len_dataset,),dtype=np.float32)
            scores = np.empty(shape=(len_dataset, ), dtype=np.float32)

            for frame_sn, (data, anno, meta) in enumerate(dataloader):
                test_input = data[:, :, 0, :, :].cuda()
                test_target = data[:, :, 1, :, :].cuda()

                g_output_flow, g_output_frame = self.engine.G(test_input)
                gt_flow_esti_tensor = torch.cat([test_input, test_target], 1)
                flow_gt_vis, flow_gt = flow_batch_estimate(
                    self.engine.F,
                    gt_flow_esti_tensor,
                    self.engine.param['val'],
                    output_format=self.engine.config.DATASET.optical_format,
                    optical_size=self.engine.config.DATASET.optical_size)
                # test_psnr = psnr_error(g_output_frame, test_target)
                score, _, _ = amc_score(test_target, g_output_frame, flow_gt,
                                        g_output_flow, wf, wi)
                # test_psnr = test_psnr.tolist()
                score = score.tolist()
                # psnrs[test_counter+frame_num-1]=test_psnr
                scores[test_counter + frame_num - 1] = score
                test_counter += 1

                if sn == random_video_sn and (frame_sn in vis_range):
                    temp = vis_optical_flow(
                        g_output_flow.detach(),
                        output_format=self.engine.config.DATASET.
                        optical_format,
                        output_size=(g_output_flow.shape[-2],
                                     g_output_flow.shape[-1]),
                        normalize=self.engine.normalize.param['val'])
                    vis_objects = OrderedDict({
                        'amc_eval_frame':
                        test_target.detach(),
                        'amc_eval_frame_hat':
                        g_output_frame.detach(),
                        'amc_eval_flow':
                        flow_gt_vis.detach(),
                        'amc_eval_flow_hat':
                        temp
                    })
                    tensorboard_vis_images(
                        vis_objects,
                        tb_writer,
                        global_steps,
                        normalize=self.engine.normalize.param['val'])

                if test_counter >= test_iters:
                    # psnrs[:frame_num-1]=psnrs[frame_num-1]
                    # import ipdb; ipdb.set_trace()
                    scores[:frame_num - 1] = (
                        scores[frame_num - 1],
                    )  # fix the bug: TypeError: can only assign an iterable
                    smax = max(scores)
                    normal_scores = np.array(
                        [np.divide(s, smax) for s in scores])
                    normal_scores = np.clip(normal_scores, 0, None)
                    # psnr_records.append(psnrs)
                    score_records.append(normal_scores)
                    logger.info(f'Finish test video set {video_name}')
                    break

        # Compute the metrics based on the model's results
        self.engine.pkl_path = save_score_results(
            score_records,
            self.engine.config,
            self.engine.logger,
            verbose=self.engine.verbose,
            config_name=self.engine.config_name,
            current_step=current_step,
            time_stamp=self.engine.kwargs["time_stamp"])
        results = self.engine.evaluate_function.compute(
            {'val': self.engine.pkl_path})
        self.engine.logger.info(results)

        # Write the metric into the tensorboard
        tb_writer.add_text(
            f'{self.engine.config.MODEL.name}: AUC of ROC curve',
            f'auc is {results.auc}', global_steps)
        return results.auc