Beispiel #1
0
def evaluation_captions(result):
    test = result
    datasetGTS = {'annotations': []}
    datasetRES = {'annotations': []}

    for i, image_id in enumerate(test):
        array = []
        for each in test[image_id]['Pred Sent']:
            array.append(test[image_id]['Pred Sent'][each])
        pred_sent = '. '.join(array)

        array = []
        for each in test[image_id]['Real Sent']:
            sent = test[image_id]['Real Sent'][each]
            if len(sent) != 0:
                array.append(sent)
        real_sent = '. '.join(array)
        datasetGTS['annotations'].append({
            'image_id': i,
            'caption': real_sent
        })
        datasetRES['annotations'].append({
            'image_id': i,
            'caption': pred_sent
        })

    rng = range(len(test))
    eva_scores = calculate_metrics(rng, datasetGTS, datasetRES)
    print(eva_scores)
    test_result_dir = './results/BLEUS.txt'
    with open(test_result_dir, 'a') as f:
        f.writelines(str(eva_scores))
        f.writelines('\n')
    Bleu = eva_scores['Bleu_1']
    return Bleu
Beispiel #2
0
    parser.add_argument(
        '--result_path',
        type=str,
        default='./report_v4_models/v4/20210405-09:05/results/debug.json')
    args = parser.parse_args()

    test = load_json(args.result_path)
    datasetGTS = {'annotations': []}
    datasetRES = {'annotations': []}

    for i, image_id in enumerate(test):
        array = []
        for each in test[image_id]['Pred Sent']:
            array.append(test[image_id]['Pred Sent'][each])
        pred_sent = '. '.join(array)

        array = []
        for each in test[image_id]['Real Sent']:
            sent = test[image_id]['Real Sent'][each]
            if len(sent) != 0:
                array.append(sent)
        real_sent = '. '.join(array)
        datasetGTS['annotations'].append({'image_id': i, 'caption': real_sent})
        datasetRES['annotations'].append({'image_id': i, 'caption': pred_sent})

    rng = range(len(test))
    result_dict = calculate_metrics(rng, datasetGTS, datasetRES)
    print("\nIU-XRAY Test Dataset Evaluation Metrics & Scores:")
    for key in result_dict.keys():
        print(key, round(result_dict[key], 5))
Beispiel #3
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()

    parser.add_argument('--result_path',
                        type=str,
                        default='./results/clean_test.json')
    args = parser.parse_args()

    test = load_json(args.result_path)
    datasetGTS = {'annotations': []}
    datasetRES = {'annotations': []}

    for i, image_id in enumerate(test):
        array = []
        for each in test[image_id]['Pred Sent']:
            array.append(test[image_id]['Pred Sent'][each])
        pred_sent = '. '.join(array)

        array = []
        for each in test[image_id]['Real Sent']:
            sent = test[image_id]['Real Sent'][each]
            if len(sent) != 0:
                array.append(sent)
        real_sent = '. '.join(array)
        datasetGTS['annotations'].append({'image_id': i, 'caption': real_sent})
        datasetRES['annotations'].append({'image_id': i, 'caption': pred_sent})

    rng = range(len(test))
    print calculate_metrics(rng, datasetGTS, datasetRES)
    def generate_hier_with_spatial(self, epoch_id, Cider_max):
        self.extractor.eval()
        self.multi_rnn_model.eval()

        results = {}

        for k, (frontal_images, lateral_images, image_names, captions,
                probs) in enumerate(self.val_data_loader):
            frontal_images = self._to_var(frontal_images, requires_grad=False)
            lateral_images = self._to_var(lateral_images, requires_grad=False)

            V_frontal, v_g_frontal, V_lateral, v_g_lateral = self.extractor.forward(
                frontal_images, lateral_images)

            pred_sentences = {}
            real_sentences = {}
            for image_name in image_names:
                image_prefix = image_name[0].split('_')[0]
                pred_sentences[image_prefix] = {}
                real_sentences[image_prefix] = {}

            sampled_captions = self.multi_rnn_model.sample(
                v_g_frontal, V_frontal, v_g_lateral, V_lateral)
            for i in range(self.args.s_max):

                sampled_ids = sampled_captions[:, i, :]

                for id, array in zip(image_names, sampled_ids):
                    image_prefix = id[0].split('_')[0]
                    pred_sentences[image_prefix][i] = self.__vec2sent(
                        array.cpu().detach().numpy())

            for id, array in zip(image_names, captions):
                image_prefix = id[0].split('_')[0]
                for i, sent in enumerate(array):
                    real_sentences[image_prefix][i] = self.__vec2sent(sent[1:])

            for image_name in image_names:
                id = image_name[0].split('_')[0]
                results[id] = {
                    'Pred Sent': pred_sentences[id],
                    'Real Sent': real_sentences[id]
                }

        datasetGTS = {'annotations': []}
        datasetRES = {'annotations': []}

        for i, image_id in enumerate(results):
            array = []
            for each in results[image_id]['Pred Sent']:
                array.append(results[image_id]['Pred Sent'][each])
            pred_sent = '. '.join(array)

            array = []
            for each in results[image_id]['Real Sent']:
                sent = results[image_id]['Real Sent'][each]
                if len(sent) != 0:
                    array.append(sent)
            real_sent = '. '.join(array)
            datasetGTS['annotations'].append({
                'image_id': i,
                'caption': real_sent
            })
            datasetRES['annotations'].append({
                'image_id': i,
                'caption': pred_sent
            })

        rng = range(len(results))

        evaluations = calculate_metrics(rng, datasetGTS, datasetRES)
        print(type(evaluations))
        print(evaluations)
        if evaluations['CIDEr'] > Cider_max:
            self.__save_json(results, epoch_id)
        return evaluations['Bleu_1'], evaluations['Bleu_2'], evaluations[
            'Bleu_3'], evaluations['Bleu_4'], evaluations[
                'CIDEr'], evaluations['METEOR'], evaluations['ROUGE_L']