示例#1
0
def nn_metrics():
  image_root = eval_generation.determine_image_pattern('birds_fg', '')
  vocab_file = '%s/%s.txt' %(eval_generation.determine_vocab_folder('birds_fg', ''), 'CUB_vocab_noUNK')
  vocab = open_txt(vocab_file)
  #gt json
  anno_path_train = eval_generation.determine_anno_path('birds_fg', 'test')
  sg = eval_generation.build_sequence_generator(anno_path_train, 100, image_root, 
                                  vocab = vocab, max_words=50)

  caption_experiment = eval_generation.CaptionExperiment(sg = sg)
  caption_experiment.score_generation(json_filename='generated_sentences/nearest_neighbor_baseline.json')
def eval_class_meteor(tag):
    #Make the reference for an image all reference sentences from the corresponding class.

    gen_annotations = 'generated_sentences//birds_fg_test/gve_models/%s/beam1/generation_result.json' % tag

    if not os.path.isdir('cider_scores'):
        os.mkdir('cider_scores')

    dataset = 'birds_fg'
    split = 'test'
    gt_comp = 'train_noCub'  #alternatively could use sentences from val or test. Can think of this metric as a measure between a generated sentence and the NN class in the reference set.

    image_root = eval_generation.determine_image_pattern(dataset, split)
    vocab_file = 'data/vocab.txt'
    vocab = open_txt(vocab_file)

    #combine gt annotations for each class
    anno_path_ref = eval_generation.determine_anno_path(dataset, gt_comp)
    ref_annotations = read_json(anno_path_ref)
    gen_annotations = read_json(gen_annotations)

    #create tfidf dict
    tfidf_dict = {}
    for a in ref_annotations['annotations']:
        im = a['image_id']
        if im not in tfidf_dict.keys():
            tfidf_dict[im] = []
        tfidf_dict[im].append({
            'caption': a['caption'],
            'id': a['image_id'],
            'image_id': a['image_id']
        })

    #create dict which has all annotations which correspond to a certain class in the reference set
    gt_class_annotations = {}
    for a in ref_annotations['annotations']:
        cl = int(a['image_id'].split('/')[0].split('.')[0]) - 1
        if cl not in gt_class_annotations:
            gt_class_annotations[cl] = {}
            gt_class_annotations[cl]['all_images'] = []
        gt_class_annotations[cl]['all_images'].append({
            'caption': a['caption'],
            'id': a['image_id'],
            'image_id': a['image_id']
        })

    #create dict which includes 200 different "test" sets with images only from certain classes
    gen_class_annotations = {}
    for a in gen_annotations:
        cl = int(a['image_id'].split('/')[0].split('.')[0]) - 1
        im = a['image_id']
        if cl not in gen_class_annotations:
            gen_class_annotations[cl] = {}
        if im not in gen_class_annotations[cl].keys():
            gen_class_annotations[cl][im] = []
        gen_class_annotations[cl][im].append({
            'caption': a['caption'],
            'id': a['image_id'],
            'image_id': a['image_id']
        })

    #for tokenizer need dict with list of dicts
    t = time.time()
    tokenizer = PTBTokenizer()
    tfidf_dict = tokenizer.tokenize(tfidf_dict)
    for key in gt_class_annotations:
        gt_class_annotations[key] = tokenizer.tokenize(
            gt_class_annotations[key])
    for key in gen_class_annotations:
        gen_class_annotations[key] = tokenizer.tokenize(
            gen_class_annotations[key])
    print "Time for tokenization: %f." % (time.time() - t)

    score_dict = {}
    for cl in sorted(gen_class_annotations.keys()):
        gts = {}
        gen = {}
        t = time.time()
        for cl_gt in sorted(gt_class_annotations.keys()):
            for im in sorted(gen_class_annotations[cl].keys()):
                gen[im + ('_%d' % cl_gt)] = gen_class_annotations[cl][im]
                gts[im + ('_%d' %
                          cl_gt)] = gt_class_annotations[cl_gt]['all_images']
        scores, im_ids = compute_cider(gen, gts)
        for s, ii in zip(scores, im_ids):
            score_dict[ii] = s
        print "Class %s took %f s." % (cl, time.time() - t)

    pkl.dump(score_dict, open('cider_scores/cider_score_dict_%s.p' % (tag),
                              'w'))
示例#3
0
def shuffle_captions(args):
    #read gt captions
    image_root = eval_generation.determine_image_pattern(
        args.dataset_name, args.split_name)
    anno_path = eval_generation.determine_anno_path(args.dataset_name,
                                                    args.split_name)
    #revise anno path
    gt_captions = read_json(anno_path)
    gt_captions_small = {}
    gt_captions_small['type'] = 'captions'
    gt_captions_small['images'] = []
    gt_captions_small['annotations'] = []
    gt_gen_captions = []

    im_to_captions = {}
    for a in gt_captions['annotations']:
        if a['image_id'] in im_to_captions.keys():
            im_to_captions[a['image_id']].append(a['caption'])
        else:
            im_to_captions[a['image_id']] = [a['caption']]

    count = 0
    for image_id in im_to_captions.keys():
        gt_caps = im_to_captions[image_id][1:]
        new_gt_a = [{
            'caption': gc,
            'id': count + i,
            'image_id': image_id
        } for i, gc in enumerate(gt_caps)]
        count += len(gt_caps)
        new_gt_i = {'id': image_id, 'file_name': image_id}
        gt_captions_small['annotations'].extend(new_gt_a)
        gt_captions_small['images'].append(new_gt_i)

        new_val_a = {
            'image_id': image_id,
            'caption': im_to_captions[image_id][0]
        }
        gt_gen_captions.append(new_val_a)

    save_json(gt_captions_small, 'tmp_gt_json.json')
    anno_path = os.getcwd() + '/tmp_gt_json.json'

    vocab_file = '%s/%s.txt' % (eval_generation.determine_vocab_folder(
        args.dataset_name, args.split_name), args.vocab)
    vocab = open_txt(vocab_file)

    sg = eval_generation.build_sequence_generator(anno_path,
                                                  100,
                                                  image_root,
                                                  vocab=vocab,
                                                  max_words=50)

    def compute_metrics(results):
        caption_experiment = eval_generation.CaptionExperiment(sg=sg)
        caption_experiment.score_generation(json_filename='tmp_json_out.json')

    def no_shuffle(val_generated):
        save_json(val_generated, 'tmp_json_out.json')
        compute_metrics('tmp_json_out.json')
        os.remove('tmp_json_out.json')

    def shuffle_all(gen_caps):
        all_caps = [g['caption'] for g in gen_caps]
        random.shuffle(all_caps)
        val_generated = []
        for count, key in enumerate(im_to_captions.keys()):
            val_generated.append({'image_id': key, 'caption': all_caps[count]})
        save_json(val_generated, 'tmp_json_out.json')
        compute_metrics('tmp_json_out.json')
        os.remove('tmp_json_out.json')

    def shuffle_classes(gen_caps):
        val_classes = open_txt(bird_dataset_path +
                               'zero_shot_splits/valclasses.txt')
        class_captions = {}
        for g in gen_caps:
            c = g['image_id'].split('/')[0]
            if c in class_captions.keys():
                class_captions[c].append(g['caption'])
            else:
                class_captions[c] = [copy.deepcopy(g['caption'])]
        for c in class_captions:
            random.shuffle(class_captions[c])
        count_classes = {}
        for c in class_captions:
            count_classes[c] = 0

        val_generated = []
        for g in gen_caps:
            c = g['image_id'].split('/')[0]
            class_caption = class_captions[c][count_classes[c]]
            count_classes[c] += 1
            val_generated.append({
                'image_id': g['image_id'],
                'caption': class_caption
            })
        save_json(val_generated, 'tmp_json_out.json')
        compute_metrics('tmp_json_out.json')
        os.remove('tmp_json_out.json')

    #shuffle gt captions
    print "Running shuffle experiments: No shuffle gt captions..."
    no_shuffle(gt_gen_captions)

    print "Running shuffle experiments: Randomly shuffle within class..."
    shuffle_classes(gt_gen_captions)

    print "Running shuffle experiments: Randomly shuffle gt captions..."
    shuffle_all(gt_gen_captions)

    gen_captions = read_json(
        'generated_sentences/birds_from_scratch_zsSplit_freezeConv_iter_20000.generation_result.json'
    )

    print "Running shuffle experiments: No shuffle lrcn captions..."
    no_shuffle(gen_captions)

    print "Running shuffle experiments: Randomly shuffle within class..."
    shuffle_classes(gen_captions)

    print "Running shuffle experiments: Randomly shuffle lrcn captions..."
    shuffle_all(gen_captions)