示例#1
0
    def evaluate(self, model_path):
        self.model.eval()
        out_dict = {}
        eval_arr = []
        table = PrettyTable()
        checkpoint = torch.load(model_path)
        self.model.load_state_dict(checkpoint)
        table.title = 'Eval result'
        table.field_names = ['ID', 'Precision', 'Recall', 'F-score']
        table.float_format = '1.3'

        inference_time = []
        with h5py.File(self.config.data_path) as data_file:
            for feature, label, idx in tqdm(self.test_dataset,
                                            desc='Evaluate',
                                            ncols=80,
                                            leave=False):
                if self.config.gpu:
                    feature = feature.cuda()

                start = time.time()
                #print(feature.size())
                pred_score = self.model(feature.unsqueeze(0)).squeeze(0)
                inference_time.append(time.time() - start)

                pred_score = torch.softmax(pred_score, dim=0)[1]
                video_info = data_file['video_' + str(idx)]
                pred_score, pred_selected, pred_summary = eval.select_keyshots(
                    video_info, pred_score)
                true_summary_arr = video_info['user_summary'][()]
                #print(len(pred_summary), len(true_summary_arr[0]))
                eval_res = [
                    eval.eval_metrics(pred_summary, true_summary)
                    for true_summary in true_summary_arr
                ]
                eval_res = np.mean(eval_res, axis=0).tolist()

                eval_arr.append(eval_res)
                table.add_row([idx] + eval_res)

                out_dict[idx] = {
                    'pred_score': pred_score,
                    'pred_selected': pred_selected,
                    'pred_summary': pred_summary
                }

        eval_mean = np.mean(eval_arr, axis=0).tolist()
        table.add_row(['mean'] + eval_mean)
        tqdm.write(str(table))
        print(inference_time)
示例#2
0
    def evaluate(self, epoch_i):
        self.model.eval()
        out_dict = {}
        eval_arr = []
        table = PrettyTable()
        table.title = 'Eval result of epoch {}'.format(epoch_i)
        table.field_names = ['ID', 'Precision', 'Recall', 'F-score']
        table.float_format = '1.3'

        with h5py.File(self.config.data_path) as data_file:
            for feature, label, idx in tqdm(self.test_dataset,
                                            desc='Evaluate',
                                            ncols=80,
                                            leave=False):
                if self.config.gpu:
                    feature = feature.cuda()
                pred_score = self.model(feature.unsqueeze(0)).squeeze(0)
                pred_score = torch.nn.functional.softmax(pred_score, dim=0)[1]
                video_info = data_file['video_' +
                                       re.findall(r'[(](.*?)[)]', str(idx))[0]]
                pred_score, pred_selected, pred_summary = eval.select_keyshots(
                    video_info, pred_score)
                true_summary_arr = video_info['user_summary'][()]
                eval_res = [
                    eval.eval_metrics(pred_summary, true_summary)
                    for true_summary in true_summary_arr
                ]
                eval_res = np.mean(eval_res, axis=0).tolist()

                eval_arr.append(eval_res)
                table.add_row([idx] + eval_res)

                out_dict[re.findall(r'[(](.*?)[)]', str(idx))[0]] = {
                    'pred_score': pred_score,
                    'pred_selected': pred_selected,
                    'pred_summary': pred_summary
                }

        score_save_path = self.config.score_dir + '/epoch-{}.json'.format(
            epoch_i)
        with open(score_save_path, 'w') as f:
            tqdm.write('Save score at {}'.format(str(score_save_path)))
            json.dump(out_dict, f)
        eval_mean = np.mean(eval_arr, axis=0).tolist()
        table.add_row(['mean'] + eval_mean)
        tqdm.write(str(table))
示例#3
0
    def evaluate(self, epoch_i):

        out_dict = {}
        eval_arr = []
        table = PrettyTable()
        table.title = 'Evaluation Result of epoch {}'.format(epoch_i)
        table.field_names = ['ID', 'Precision', 'Recall', 'F-Score']
        table.float_format = '1.5'

        with h5py.File(self.config.data_path) as data_file:
            for feature, label, index in tqdm(self.test_dataset,
                                              desc='Evaluate',
                                              ncols=90,
                                              leave=False):

                pred_score = self.model.predict(feature.reshape(-1, 320, 1024))
                video_info = data_file['video_' + str(index)]
                pred_score, pred_selected, pred_summary = eval.select_keyshots(
                    video_info, pred_score)
                true_summary_arr = video_info['user_summary'][:]
                eval_res = [
                    eval.eval_metrics(pred_summary, true_summary)
                    for true_summary in true_summary_arr
                ]
                eval_res = np.mean(eval_res, axis=0).tolist()

                eval_arr.append(eval_res)
                table.add_row([index] + eval_res)

                out_dict[str(index)] = {
                    'pred_score': pred_score,
                    'pred_selected': pred_selected,
                    'pred_summary': pred_summary
                }

        score_save_path = self.config.score_dir + '/epoch-{}.json'.format(
            epoch_i)
        with open(score_save_path, 'w') as f:
            tqdm.write('Save score at {}'.format(str(score_save_path)))
            json.dump(out_dict, f)
        eval_mean = np.mean(eval_arr, axis=0).tolist()
        table.add_row(['mean'] + eval_mean)
        tqdm.write(str(table))