示例#1
0
    def test_video_anomaly_detection(self):
        # type: () -> None
        """
        Actually performs tests.
        """

        c, t, h, w = self.dataset.raw_shape

        # Load the checkpoint
        self.model.load_w(self.checkpoint)

        # Prepare a table to show results
        vad_table = self.empty_table

        # Set up container for novelty scores from all test videos
        global_llk = []
        global_rec = []
        global_ns = []
        global_y = []

        # Get accumulators
        results_accumulator_llk = ResultsAccumulator(time_steps=t)
        results_accumulator_rec = ResultsAccumulator(time_steps=t)

        # Start iteration over test videos
        for cl_idx, video_id in enumerate(self.dataset.test_videos):

            # Run the test
            self.dataset.test(video_id)
            loader = DataLoader(self.dataset,
                                collate_fn=self.dataset.collate_fn)

            # Build score containers
            sample_llk = np.zeros(shape=(len(loader) + t - 1, ))
            sample_rec = np.zeros(shape=(len(loader) + t - 1, ))
            sample_y = self.dataset.load_test_sequence_gt(video_id)
            for i, (x, y) in tqdm(enumerate(loader),
                                  desc=f'Computing scores for {self.dataset}'):
                #                x = x.to('cuda')
                x = x.to('cpu')

                x_r, z, z_dist = self.model(x)

                self.loss(x, x_r, z, z_dist)

                # Feed results accumulators
                results_accumulator_llk.push(self.loss.autoregression_loss)
                results_accumulator_rec.push(self.loss.reconstruction_loss)
                sample_llk[i] = results_accumulator_llk.get_next()
                sample_rec[i] = results_accumulator_rec.get_next()

            # Get last results
            while results_accumulator_llk.results_left != 0:
                index = (-results_accumulator_llk.results_left)
                sample_llk[index] = results_accumulator_llk.get_next()
                sample_rec[index] = results_accumulator_rec.get_next()

            min_llk, max_llk, min_rec, max_rec = self.compute_normalizing_coefficients(
                sample_llk, sample_rec)

            # Compute the normalized scores and novelty score
            sample_llk = normalize(sample_llk, min_llk, max_llk)
            sample_rec = normalize(sample_rec, min_rec, max_rec)
            sample_ns = novelty_score(sample_llk, sample_rec)

            # Update global scores (used for global metrics)
            global_llk.append(sample_llk)
            global_rec.append(sample_rec)
            global_ns.append(sample_ns)
            global_y.append(sample_y)

            try:
                # Compute AUROC for this video
                this_video_metrics = [
                    roc_auc_score(sample_y, sample_llk),  # likelihood metric
                    roc_auc_score(sample_y,
                                  sample_rec),  # reconstruction metric
                    roc_auc_score(sample_y, sample_ns)  # novelty score
                ]
                vad_table.add_row([video_id] + this_video_metrics)
            except ValueError:
                # This happens for sequences in which all frames are abnormal
                # Skipping this row in the table (the sequence will still count for global metrics)
                continue

        # Compute global AUROC and print table
        global_llk = np.concatenate(global_llk)
        global_rec = np.concatenate(global_rec)
        global_ns = np.concatenate(global_ns)
        global_y = np.concatenate(global_y)
        global_metrics = [
            roc_auc_score(global_y, global_llk),  # likelihood metric
            roc_auc_score(global_y, global_rec),  # reconstruction metric
            roc_auc_score(global_y, global_ns)  # novelty score
        ]
        vad_table.add_row(['avg'] + list(global_metrics))
        print(vad_table)

        # Save table
        with open(self.output_file, mode='w') as f:
            f.write(str(vad_table))
    def test_one_class_classification(self):
        # type: () -> None
        """
        Actually performs tests.
        """

        # Prepare a table to show results
        oc_table = self.empty_table

        # Set up container for metrics from all classes
        all_metrics = []

        # Start iteration over classes
        for cl_idx, cl in enumerate(self.dataset.test_classes):

            # Load the checkpoint
            self.model.load_w(join(self.checkpoints_dir, f'{cl}.pkl'))

            # First we need a run on validation, to compute
            # normalizing coefficient of the Novelty Score (Eq.9)
            min_llk, max_llk, min_rec, max_rec = self.compute_normalizing_coefficients(
                cl)

            # Run the actual test
            self.dataset.test(cl)
            loader = DataLoader(self.dataset)

            sample_llk = np.zeros(shape=(len(loader), ))
            sample_rec = np.zeros(shape=(len(loader), ))
            sample_y = np.zeros(shape=(len(loader), ))
            for i, (x, y) in tqdm(enumerate(loader),
                                  desc=f'Computing scores for {self.dataset}'):
                x = x.to(device)

                x_r, z, z_dist = self.model(x)

                self.loss(x, x_r, z, z_dist)

                sample_llk[i] = -self.loss.autoregression_loss
                sample_rec[i] = -self.loss.reconstruction_loss
                sample_y[i] = y.item()

            # Normalize scores
            sample_llk = normalize(sample_llk, min_llk, max_llk)
            sample_rec = normalize(sample_rec, min_rec, max_rec)

            # Compute the normalized novelty score
            sample_ns = novelty_score(sample_llk, sample_rec)

            # Compute AUROC for this class
            this_class_metrics = [
                roc_auc_score(sample_y, sample_llk),  # likelihood metric
                roc_auc_score(sample_y, sample_rec),  # reconstruction metric
                roc_auc_score(sample_y, sample_ns)  # novelty score
            ]
            oc_table.add_row([cl_idx] + this_class_metrics)

            all_metrics.append(this_class_metrics)

        # Compute average AUROC and print table
        all_metrics = np.array(all_metrics)
        avg_metrics = np.mean(all_metrics, axis=0)
        oc_table.add_row(['avg'] + list(avg_metrics))
        print(oc_table)

        # Save table
        with open(self.output_file, mode='w') as f:
            f.write(str(oc_table))
    def test_video_anomaly_detection(self):
        # type: () -> None
        """
        Actually performs tests.
        """

        c, t, h, w = self.dataset.raw_shape

        # Load the checkpoint
        self.model.load_w(self.checkpoint)

        # Prepare a table to show results
        vad_table = self.empty_table

        # Set up container for novelty scores from all test videos
        global_llk = []
        global_rec = []
        global_ns = []
        global_y = []

        # Get accumulators,干嘛的?答:get frame-level scores from clip-level scores
        results_accumulator_llk = ResultsAccumulator(time_steps=t)
        results_accumulator_rec = ResultsAccumulator(time_steps=t)

        cnt_step = 0 # global_step
        with SummaryWriter(log_dir="summary/test_{0}".format(
                Config.output_file.split('.')[0]),
                comment="{}".format(Config.dataset_name)) as writer:
            # Start iteration over test videos
            for cl_idx, video_id in enumerate(self.dataset.test_videos):
                # test_videos 的内容是:TestXXX(XXX:001~012) 这些目录名,每个目录名保存有一个
                # 视频的所有帧,所以代表一个视频,即 video_id

                # Run the test
                self.dataset.test(video_id) # 设置好cur_video_frames【其实是整个视频的全部clips】,
                # cur_video_gt,cur_len【其实是clips number】
                loader = DataLoader(self.dataset,
                                    num_workers=Config.num_workers,
                                    shuffle=Config.shuffle,
                                    batch_size=Config.batch_size,
                                    collate_fn=self.dataset.collate_fn) # 临时构建loader
                # 因为是 inference,所以没有 batch_size (或者说==1)
                # collate_fn:实际作用是:TODO

                # Build score containers
                sample_llk = np.zeros(shape=(len(loader) + t - 1,))
                sample_rec = np.zeros(shape=(len(loader) + t - 1,))
                # print("len(loader): ", len(loader)) # len(self.batch_sampler)
                # 因为loader会把所有Dataset的所有item都做登记,而len(dataset) ==
                # num_frames - t + 1,即所有的clips (带overlap的),要恢复就是:
                # len(loader) + t - 1
                # print("len(loader) + t - 1: ", len(loader) + t - 1)
                sample_y = self.dataset.load_test_sequence_gt(video_id) # (n_frames,)
                # print("len(sample_y): ", len(sample_y))
                # 事实证明:(len(loader) + t - 1) == len(sample_y), len(loader) =
                #
                for i, (x, y) in tqdm(enumerate(loader), desc=f'Computing scores for {self.dataset}'):
                    #
                    cnt_step = cnt_step + 1

                    x = x.to(device)

                    x_r, z, z_dist = self.model(x)

                    ttloss = self.loss(x, x_r, z, z_dist) # 记住,self.loss其实一个 object,这里
                    # 被执行了 forwrd(),所以等于修改了 object (即 self.loss被修改了)
                    total_loss = self.loss.total_loss
                    reconstruction_loss = self.loss.reconstruction_loss
                    autoregression_loss = self.loss.autoregression_loss
                    # write all loss
                    # if cnt_step % Config.plot_every == 0:
                    #     writer.add_scalars("test_loss",
                    #                        {'total_loss': total_loss,
                    #                         'reconstruction_loss': reconstruction_loss,
                    #                         'autoregression_loss': autoregression_loss
                    #                         },
                    #                        cnt_step)

                    # Feed results accumulators: 模仿一个队列,队尾进,队头出
                    # 我的办法:通过设置断点,进去看results_accumulator_llk是怎么工作的?
                    # 因为 batch_szie == 1, 所以push了 it(==num_clips==num_frames-t+1)次,
                    # 所以还有 (t - 1) 帧没有计算 loss,留到 下面的 while
                    results_accumulator_llk.push(self.loss.autoregression_loss)
                    results_accumulator_rec.push(self.loss.reconstruction_loss)
                    sample_llk[i] = results_accumulator_llk.get_next()
                    sample_rec[i] = results_accumulator_rec.get_next()

                # Get last results
                # 计算剩下的 (t-1)帧各自的 loss
                while results_accumulator_llk.results_left != 0:
                    index = (- results_accumulator_llk.results_left)
                    sample_llk[index] = results_accumulator_llk.get_next()
                    sample_rec[index] = results_accumulator_rec.get_next()

                min_llk, max_llk, min_rec, max_rec = self.compute_normalizing_coefficients(sample_llk, sample_rec)

                # Compute the normalized scores and novelty score
                sample_llk = normalize(sample_llk, min_llk, max_llk)
                sample_rec = normalize(sample_rec, min_rec, max_rec)
                sample_ns = novelty_score(sample_llk, sample_rec)
                # 绘制 score-map
                # print("len of sample_ns:", len(sample_ns))
                fig_novelty_score = plt.figure()
                plt.title('novelty_score of {}'.format(video_id))
                plt.plot(range(len(sample_ns)), sample_ns, color='green',
                                             label='novelty_score')
                plt.xlabel('frames')
                plt.ylabel('novelty_score')
                writer.add_figure('Novelty Score', fig_novelty_score, global_step=cl_idx)

                # Update global scores (used for global metrics)
                global_llk.append(sample_llk)
                global_rec.append(sample_rec)
                global_ns.append(sample_ns)
                global_y.append(sample_y)

                try:
                    # Compute AUROC for this video
                    this_video_metrics = [
                        roc_auc_score(sample_y, sample_llk),  # likelihood metric
                        roc_auc_score(sample_y, sample_rec),  # reconstruction metric
                        roc_auc_score(sample_y, sample_ns)    # novelty score
                    ]
                    vad_table.add_row([video_id] + this_video_metrics)
                except ValueError:
                    # This happens for sequences in which all frames are abnormal
                    # Skipping this row in the table (the sequence will still count for global metrics)
                    continue

            # Compute global AUROC and print table
            global_llk = np.concatenate(global_llk)
            global_rec = np.concatenate(global_rec)
            global_ns = np.concatenate(global_ns)
            global_y = np.concatenate(global_y)
            global_metrics = [
                roc_auc_score(global_y, global_llk),  # likelihood metric
                roc_auc_score(global_y, global_rec),  # reconstruction metric
                roc_auc_score(global_y, global_ns)    # novelty score
            ]
            vad_table.add_row(['avg'] + list(global_metrics))
            print(vad_table)

            # Save table
            with open(self.output_file, mode='w') as f:
                f.write(str(vad_table))
示例#4
0
    def test_joint_for_paramsopt(self, model, loss_fn, val_dataloader, metrics,
                                 params):
        # type: () -> None
        """
        Actually performs tests.
        """
        c, t, h, w = val_dataloader.raw_shape

        # set model to evaluation mode
        model.eval()

        # summary for current eval loop
        # summ = []
        metrics = {}

        # Load the checkpoint
        # self.model.load_w(self.checkpoint)
        # self.ckpt = torch.load(self.checkpoint)
        # self.model.load_state_dict(self.ckpt['net_dict'])
        # self.R = self.ckpt['R']
        # self.c = self.ckpt['c']

        # Prepare a table to show results
        vad_table = self.empty_table

        # Set up container for novelty scores from all test videos
        global_llk = []
        global_rec = []
        global_ns = []
        global_y = []

        # Get accumulators,干嘛的?答:get frame-level scores from clip-level scores
        results_accumulator_llk = ResultsAccumulator(time_steps=t)
        results_accumulator_rec = ResultsAccumulator(time_steps=t)

        cnt_step = 0  # global_step
        with SummaryWriter(
                log_dir="summary/test_deepSVDD/test_{0}_lr={1}_lam_rec={2}_"
                "lam_svdd={3}_code_length={4}".format(self.params.dataset_name,
                                                      self.params.LR,
                                                      self.params.lam_rec,
                                                      self.params.lam_svdd,
                                                      self.params.code_length),
                comment="{}".format(self.params.dataset_name)) as writer:
            # Start iteration over test videos
            for cl_idx, video_id in enumerate(val_dataloader.test_videos):
                # test_videos 的内容是:TestXXX(XXX:001~012) 这些目录名,每个目录名保存有一个
                # 视频的所有帧,所以代表一个视频,即 video_id

                # Run the test
                val_dataloader.test(
                    video_id)  # 设置好cur_video_frames【其实是整个视频的全部clips】,
                # cur_video_gt,cur_len【其实是clips number】
                loader = DataLoader(
                    val_dataloader,
                    num_workers=1,
                    shuffle=False,
                    batch_size=1,
                    collate_fn=val_dataloader.collate_fn)  # 临时构建loader
                # 因为是 inference,所以没有 batch_size (或者说==1)
                # collate_fn:实际作用是:TODO

                # Build score containers
                sample_llk = np.zeros(shape=(len(loader) + t - 1, ))
                sample_rec = np.zeros(shape=(len(loader) + t - 1, ))
                # print("len(loader): ", len(loader)) # len(self.batch_sampler)
                # 因为loader会把所有Dataset的所有item都做登记,而len(dataset) ==
                # num_frames - t + 1,即所有的clips (带overlap的),要恢复就是:
                # len(loader) + t - 1
                # print("len(loader) + t - 1: ", len(loader) + t - 1)
                sample_y = val_dataloader.load_test_sequence_gt(
                    video_id)  # (n_frames,)
                # print("len(sample_y): ", len(sample_y))
                # 事实证明:(len(loader) + t - 1) == len(sample_y), len(loader) =
                #
                for i, (x, y) in tqdm(
                        enumerate(loader),
                        desc=f'Computing scores for {self.params.dataset_name}'
                ):
                    #
                    cnt_step = cnt_step + 1

                    x = x.to(self.device)

                    x_r, z = model(x)
                    z = z.view(-1, 690, 2 * (self.params.code_length))
                    # print("in 327 line, z.size: ", z.size())

                    ttloss = loss_fn(x, x_r, z)  # 记住,self.loss其实一个 object,这里
                    # 被执行了 forwrd(),所以等于修改了 object (即 self.loss被修改了)
                    total_loss = loss_fn.total_loss
                    reconstruction_loss = loss_fn.reconstruction_loss
                    deepSVDD_loss = loss_fn.deepSVDD_loss
                    # write all loss
                    # if cnt_step % Config.plot_every == 0:
                    #     writer.add_scalars("test_loss",
                    #                        {'total_loss': total_loss,
                    #                         'reconstruction_loss': reconstruction_loss,
                    #                         'autoregression_loss': autoregression_loss
                    #                         },
                    #                        cnt_step)

                    # Feed results accumulators: 模仿一个队列,队尾进,队头出
                    # 我的办法:通过设置断点,进去看results_accumulator_llk是怎么工作的?
                    # 因为 batch_szie == 1, 所以push了 it(==num_clips==num_frames-t+1)次,
                    # 所以还有 (t - 1) 帧没有计算 loss,留到 下面的 while
                    results_accumulator_llk.push(loss_fn.deepSVDD_loss)
                    results_accumulator_rec.push(loss_fn.reconstruction_loss)
                    sample_llk[i] = results_accumulator_llk.get_next()
                    sample_rec[i] = results_accumulator_rec.get_next()

                # Get last results
                # 计算剩下的 (t-1)帧各自的 loss
                while results_accumulator_llk.results_left != 0:
                    index = (-results_accumulator_llk.results_left)
                    sample_llk[index] = results_accumulator_llk.get_next()
                    sample_rec[index] = results_accumulator_rec.get_next()

                min_llk, max_llk, min_rec, max_rec = self.compute_normalizing_coefficients(
                    sample_llk, sample_rec)

                # Compute the normalized scores and novelty score
                sample_llk = normalize(sample_llk, min_llk, max_llk)
                sample_rec = normalize(sample_rec, min_rec, max_rec)
                sample_ns = novelty_score(sample_llk, sample_rec)
                # # 绘制 score-map
                # # print("len of sample_ns:", len(sample_ns))
                # fig_novelty_score = plt.figure()
                # plt.title('novelty_score of {}'.format(video_id))
                # plt.plot(range(len(sample_ns)), sample_ns, color='green',
                #          label='novelty_score')
                # plt.xlabel('frames')
                # plt.ylabel('novelty_score')
                # writer.add_figure('Novelty Score', fig_novelty_score, global_step=cl_idx)

                # Update global scores (used for global metrics)
                global_llk.append(sample_llk)
                global_rec.append(sample_rec)
                global_ns.append(sample_ns)
                global_y.append(sample_y)

                try:
                    # Compute AUROC for this video
                    this_video_metrics = [
                        roc_auc_score(sample_y,
                                      sample_llk),  # likelihood metric
                        roc_auc_score(sample_y,
                                      sample_rec),  # reconstruction metric
                        roc_auc_score(sample_y, sample_ns)  # novelty score
                    ]
                    vad_table.add_row([video_id] + this_video_metrics)
                except ValueError:
                    # This happens for sequences in which all frames are abnormal
                    # Skipping this row in the table (the sequence will still count for global metrics)
                    continue

            # Compute global AUROC and print table
            global_llk = np.concatenate(global_llk)
            global_rec = np.concatenate(global_rec)
            global_ns = np.concatenate(global_ns)
            global_y = np.concatenate(global_y)
            global_metrics = [
                roc_auc_score(global_y, global_llk),  # likelihood metric
                roc_auc_score(global_y, global_rec),  # reconstruction metric
                roc_auc_score(global_y, global_ns)  # novelty score
            ]
            vad_table.add_row(['avg'] + list(global_metrics))
            print(vad_table)

            # # Save table
            # with open(self.output_file, mode='w') as f:
            #     f.write(str(vad_table))
            #     #
            #     # 查看下网络
            #     # model_input = torch.rand([1380, 1, 8, 32, 32])
            #     # writer.add_graph(self.model, input_to_model=model_input)
            # print("ag_auc: ", list(global_metrics)[2])
            metrics['auc'] = list(global_metrics)[2]
            return metrics  # 返回 avg_auc
示例#5
0
    def test_one_class_classification(self):
        # type: () -> None
        """
        Actually performs tests.
        """

        # Prepare a table to show results
        oc_table = self.empty_table

        # Set up container for metrics from all classes
        all_metrics = []

        # Start iteration over classes
        for cl_idx, cl in enumerate(self.dataset.test_classes):
            if(cl_idx == 2):
                lala = True
                print('cl_idx ' +str(cl_idx))
                print('cl '+ str(cl))
                # Load the checkpoint
                self.model.load_w(join(self.checkpoints_dir, f'{cl}.pkl'))

                # First we need a run on validation, to compute
                # normalizing coefficient of the Novelty Score (Eq.9)
                min_llk, max_llk, min_rec, max_rec = self.compute_normalizing_coefficients(cl)

                # Run the actual test
                self.dataset.test(cl)
                loader = DataLoader(self.dataset)

                sample_llk = np.zeros(shape=(len(loader),))
                sample_rec = np.zeros(shape=(len(loader),))
                sample_y = np.zeros(shape=(len(loader),))
                print("Length of the dataset = " + str(len(self.dataset)))
                
                labels = np.zeros((len(self.dataset)))

                for i, (x, y) in tqdm(enumerate(loader), desc=f'Computing scores for {self.dataset}'):
                    labels[i] = y.item()
                print(labels)
                print(np.where(labels==0))
                labels_inliers = np.zeros((len(np.where(labels==1)[0])))
                labels_outliers = np.zeros((len(np.where(labels==0)[0])))

                zs_in = np.empty((64,len(labels_inliers)))
                zs_out = np.empty((64, len(labels_outliers)))
                print("number of inliers = " + str(len(labels_inliers)))
                print("number of outliers = " + str(len(labels_outliers)))
                count_in = 0
                count_out = 0

                for i, (x, y) in tqdm(enumerate(loader), desc=f'Computing scores for {self.dataset}'):
                    x = x.to('cuda')
                    
                    x_r, z, z_dist = self.model(x) # z_dist has shape torch.Size([1, 100, 64])

                    
                    print(i)
                    # print("y.item() = " + str(y.item()))
                    if(y.item() == 1):
                        zs_in[:,count_in] = z.cpu().numpy()
                        count_in += 1
                    else:
                        zs_out[:,count_out] = z.cpu().numpy()
                        count_out += 1
                    #     print("INLIER")     
                    #     # # print(z_dist_sm.size())
                    #     z_d = z.detach()
                    #     z_d = z_d.view(len(z_d), -1).contiguous()
                    #     idxs_of_bins = torch.clamp(torch.unsqueeze(z_d, dim=1) * 100, min=0,
                    #         max=(100 - 1)).long()
                        
                    #     visualize_instance_on_tensorboard(writer,x,idxs_of_bins,z_d,z_dist,i,inlier=True)
                        
                    # elif(y.item()==0):
                    #     print("OUTLIER")      
                    #     z_d = z.detach()
                    #     z_d = z_d.view(len(z_d), -1).contiguous()
                    #     idxs_of_bins = torch.clamp(torch.unsqueeze(z_d, dim=1) * 100, min=0,
                    #         max=(100 - 1)).long()
                        
                    #     visualize_instance_on_tensorboard(writer,x,idxs_of_bins,z_d,z_dist,i,inlier=False)
                    # self.loss(x, x_r, z, z_dist)

                    sample_llk[i] = - self.loss.autoregression_loss
                    sample_rec[i] = - self.loss.reconstruction_loss
                    # if(y.item()==1):
                    #     writer.add_scalar('data/reconstruction_eror_in', sample_rec[i],i)
                    #     writer.add_scalar('data/llk_error_in', sample_llk[i],i)
                    # else:
                    #     writer.add_scalar('data/reconstruction_eror_out', sample_rec[i],i)
                    #     writer.add_scalar('data/llk_error_out', sample_llk[i],i)
                    # writer.add_custom_scalars_multilinechart(['data/reconstruction_eror_in', 'data/reconstruction_eror_out'],title='reconstruction error')
                    # writer.add_custom_scalars_multilinechart(['data/llk_error_in', 'data/llk_error_out'], title='llk error')
                    sample_y[i] = y.item()
                np.save('/data/Ponc/zs_2_in.npy',zs_in)
                np.save('/data/Ponc/zs_2_out.npy',zs_out)

                print("WRITING")
                hist, x_axis, _ = plt.hist(zs[0,:], bins = 100)
                x_axis = x_axis[:-1]
                hist = hist/np.sum(hist)
                ord_g = 4
                M = generateMoments(hist, ord_g,1)
                magic_q = comb(1+ord_g, 1)

                print(magic_q)
                q_eval = Q(M, x_axis)
                
                plt.subplot(211)
                plt.title("Gaussian Distr. mu=0.5, ss=0.1")
                plt.plot(x_axis, hist)
                plt.subplot(212)
                plt.title("Q(x) with M"+str(ord_g))
                plt.plot(x_axis, q_eval)
                plt.plot(x_axis, magic_q*np.ones(len(x_axis)))
                plt.show()


                writer.close()
                # Normalize scores
                sample_llk = normalize(sample_llk, min_llk, max_llk)
                sample_rec = normalize(sample_rec, min_rec, max_rec)

                # Compute the normalized novelty score
                sample_ns = novelty_score(sample_llk, sample_rec)

                # Compute AUROC for this class
                this_class_metrics = [
                    roc_auc_score(sample_y, sample_llk),  # likelihood metric
                    roc_auc_score(sample_y, sample_rec),  # reconstruction metric
                    roc_auc_score(sample_y, sample_ns)    # novelty score
                ]
                oc_table.add_row([cl_idx] + this_class_metrics)

                all_metrics.append(this_class_metrics)

        # Compute average AUROC and print table
        all_metrics = np.array(all_metrics)
        avg_metrics = np.mean(all_metrics, axis=0)
        oc_table.add_row(['avg'] + list(avg_metrics))
        print(oc_table)

        # Save table
        with open(self.output_file, mode='w') as f:
            f.write(str(oc_table))
    def test_video_anomaly_detection(self, model, flow=None, val=False):
        _, t, _, _ = self.videoshape

        test_loglikelihood = NLL().to(self.cuda)
        test_loglikelihood.eval()
        model.eval()
        if flow is not None:
            flow.eval()
            vad_table = self.empty_table(True)
            flow_input = Static_intensity(self.cuda)
        else:
            vad_table = self.empty_table(False)

        global_rec = []
        global_f = []
        global_y = []

        results_accumulator_rec = ResultsAccumulator(time_steps=t)
        results_accumulator_nll = ResultsAccumulator(time_steps=t)

        if val == True:
            video_list = self.dataset.val_videos
        else:
            video_list = self.dataset.test_videos

        for cl_idx, video_id in enumerate(video_list):
            self.dataset.test(video_id)
            loader = DataLoader(self.dataset,
                                num_workers=self.workers,
                                collate_fn=self.dataset.collate_fn)
            sample_nll = np.zeros(shape=(len(loader) + t - 1, ))
            sample_rec = np.zeros(shape=(len(loader) + t - 1, ))
            sample_y = self.dataset.load_test_sequence_gt(video_id)

            for i, input in tqdm(enumerate(loader)):
                x = input[0]
                x = x.to(self.cuda)
                if flow is not None:
                    x_r, flow_z = model(x)
                    intensity = flow_input(input[0])
                    flows = torch.cat((flow_z[0], intensity), 1)
                    _, nll, _ = flow(flows.to(self.cuda))
                    # _, nll, _ = flow(flow_z[1].to(self.cuda))
                else:
                    x_r, _ = model(x)
                    nll = torch.Tensor([0.])
                if self.dataname == 'shanghaitech':
                    score = self.test_method(x, input[1], x_r)
                else:
                    score = self.test_method(x, None, x_r)
                results_accumulator_rec.push(
                    self.test_method.reconstruction_loss)
                sample_rec[i] = results_accumulator_rec.get_next()

                test_nll = test_loglikelihood(nll)
                results_accumulator_nll.push(test_nll.item())
                sample_nll[i] = results_accumulator_nll.get_next()

            # Get last results
            while results_accumulator_rec.results_left != 0:
                index = (-results_accumulator_rec.results_left)
                sample_rec[index] = results_accumulator_rec.get_next()
                sample_nll[index] = results_accumulator_nll.get_next()

            min_nll, max_nll, min_rec, max_rec = self.compute_normalizing_coefficients(
                sample_nll, sample_rec)

            # Compute the normalized scores and novelty score
            sample_rec = normalize(sample_rec, min_rec, max_rec)
            if flow is not None:
                sample_nll = normalize(sample_nll, min_nll, max_nll)
                sample_f = novelty_score(sample_nll, sample_rec, self.lamb)
                global_f.append(sample_f)
            global_rec.append(sample_rec)
            global_y.append(sample_y)

            try:
                if flow is not None:
                    # Compute AUROC for this video
                    this_video_metrics = [
                        roc_auc_score(sample_y,
                                      sample_rec),  # reconstruction metric
                        roc_auc_score(sample_y, sample_f),  # likelihood metric
                    ]
                else:
                    this_video_metrics = [roc_auc_score(sample_y, sample_rec)]
                vad_table.add_row([video_id] + list(this_video_metrics))
            except ValueError:
                # This happens for sequences in which all frames are abnormal
                # Skipping this row in the table (the sequence will still count for global metrics)
                continue

        # Compute global AUROC and print table
        global_rec = np.concatenate(global_rec)
        global_y = np.concatenate(global_y)
        if flow is not None:
            global_f = np.concatenate(global_f)

        if flow is not None:
            global_metrics = [
                roc_auc_score(global_y, global_rec),  # reconstruction metric
                roc_auc_score(global_y, global_f),  # likelihood metric
            ]
        else:
            global_metrics = [roc_auc_score(global_y, global_rec)]
        vad_table.add_row(['avg'] + list(global_metrics))
        print(vad_table)
        # Save table
        if val:
            with open(self.output_file_val, mode='a+') as f:
                f.write(str(vad_table))
                f.write('\n')
            return list(global_metrics)
        else:
            with open(self.output_file, mode='a+') as f:
                f.write(str(vad_table))
                f.write('\n')
            return list(global_metrics)
    def test_video_anomaly_detection_multiflow(self,
                                               model,
                                               flow1,
                                               flow2,
                                               val=False):
        _, t, _, _ = self.videoshape
        flow_input = Static_intensity(self.cuda)
        test_loglikelihood = NLL().to(self.cuda)
        test_loglikelihood.eval()
        model.eval()
        flow1.eval()
        flow2.eval()
        vad_table = PrettyTable()
        vad_table.field_names = [
            'VIDEO-ID', 'AUROC-Recon', 'AUROC-FlowS', 'AUROC-FlowD',
            'AUROC-Total'
        ]
        vad_table.float_format = '0.4'

        global_rec = []
        global_fa = []
        global_fm = []
        global_ns = []
        global_y = []

        results_accumulator_rec = ResultsAccumulator(time_steps=t)
        results_accumulator_nll1 = ResultsAccumulator(time_steps=t)
        results_accumulator_nll2 = ResultsAccumulator(time_steps=t)

        if val == True:
            video_list = self.dataset.val_videos
        else:
            video_list = self.dataset.test_videos

        for cl_idx, video_id in enumerate(video_list):
            self.dataset.test(video_id)
            loader = DataLoader(self.dataset,
                                num_workers=self.workers,
                                collate_fn=self.dataset.collate_fn)
            sample_nll1 = np.zeros(shape=(len(loader) + t - 1, ))
            sample_nll2 = np.zeros(shape=(len(loader) + t - 1, ))
            sample_rec = np.zeros(shape=(len(loader) + t - 1, ))
            sample_y = self.dataset.load_test_sequence_gt(video_id)

            for i, input in tqdm(enumerate(loader)):
                x = input[0]
                x = x.to(self.cuda)
                intensity = flow_input(input[0])

                x_r, flow_z = model(x)
                flows = torch.cat((flow_z[0], intensity), 1)
                _, nll1, _ = flow1(flows.to(self.cuda))
                _, nll2, _ = flow2(flow_z[1].to(self.cuda))

                if self.dataname == 'shanghaitech':
                    score = self.test_method(x, input[1], x_r)
                else:
                    score = self.test_method(x, None, x_r)
                results_accumulator_rec.push(
                    self.test_method.reconstruction_loss)
                sample_rec[i] = results_accumulator_rec.get_next()

                test_nll1 = test_loglikelihood(nll1)
                results_accumulator_nll1.push(test_nll1.item())
                sample_nll1[i] = results_accumulator_nll1.get_next()

                test_nll2 = test_loglikelihood(nll2)
                results_accumulator_nll2.push(test_nll2.item())
                sample_nll2[i] = results_accumulator_nll2.get_next()

            # Get last results
            while results_accumulator_rec.results_left != 0:
                index = (-results_accumulator_rec.results_left)
                sample_rec[index] = results_accumulator_rec.get_next()
                sample_nll1[index] = results_accumulator_nll1.get_next()
                sample_nll2[index] = results_accumulator_nll2.get_next()

            min_nll1, max_nll1, min_nll2, max_nll2 = self.compute_normalizing_coefficients(
                sample_nll1, sample_nll2)
            sample_nll = (
                (max_nll1 - min_nll1) /
                (max_nll1 - min_nll1 + max_nll2 - min_nll2)) * sample_nll1 + (
                    (max_nll2 - min_nll2) /
                    (max_nll1 - min_nll1 + max_nll2 - min_nll2)) * sample_nll2
            # sample_nll = sample_nll1+sample_nll2
            min_nll, max_nll, min_rec, max_rec = self.compute_normalizing_coefficients(
                sample_nll, sample_rec)

            # Compute the normalized scores and novelty score
            sample_rec = normalize(sample_rec, min_rec, max_rec)
            sample_nll1 = normalize(sample_nll1, min_nll1, max_nll1)
            sample_nll2 = normalize(sample_nll2, min_nll2, max_nll2)
            sample_nll = normalize(sample_nll, min_nll, max_nll)
            sample_ns = novelty_score(sample_nll, sample_rec, self.lamb)
            sample_fa = novelty_score(sample_nll1, sample_rec, self.lamb)
            sample_fm = novelty_score(sample_nll2, sample_rec, self.lamb)
            global_ns.append(sample_ns)
            global_rec.append(sample_rec)
            global_y.append(sample_y)
            global_fa.append(sample_fa)
            global_fm.append(sample_fm)

            try:
                this_video_metrics = [
                    roc_auc_score(sample_y,
                                  sample_rec),  # reconstruction metric
                    roc_auc_score(sample_y, sample_fa),  # likelihood metric
                    roc_auc_score(sample_y, sample_fm),  # likelihood metric
                    roc_auc_score(sample_y, sample_ns)  # novelty score
                ]
                vad_table.add_row([video_id] + list(this_video_metrics))
            except ValueError:
                # This happens for sequences in which all frames are abnormal
                # Skipping this row in the table (the sequence will still count for global metrics)
                continue
        # Compute global AUROC and print table
        global_ns = np.concatenate(global_ns)
        global_fa = np.concatenate(global_fa)
        global_fm = np.concatenate(global_fm)
        global_rec = np.concatenate(global_rec)
        global_y = np.concatenate(global_y)
        global_metrics = [
            roc_auc_score(global_y, global_rec),  # reconstruction metric
            roc_auc_score(global_y, global_fa),  # likelihood metric
            roc_auc_score(global_y, global_fm),  # likelihood metric
            roc_auc_score(global_y, global_ns),  # novelty score
        ]
        vad_table.add_row(['avg'] + list(global_metrics))
        print(vad_table)

        # Save table
        if val:
            with open(self.output_file_val, mode='a+') as f:
                f.write(str(vad_table))
                f.write('\n')
            return list(global_metrics)
        else:
            with open(self.output_file, mode='a+') as f:
                f.write(str(vad_table))
                f.write('\n')
            return list(global_metrics)