def language_eval(pred, split, params):
    sys.path.insert(0, osp.join('pyutils', 'refer2'))
    if 'refcoco' in params['dataset']:
        image_root = params['coco_image_root']
    else:
        image_root = params['gta_image_root']
    from refer import REFER
    refer = REFER(params['data_root'],
                  image_root,
                  params['dataset'],
                  params['splitBy'],
                  old_version=False)

    sys.path.insert(0, osp.join('pyutils', 'refer2', 'evaluation'))
    from refEvaluation import RefEvaluation
    eval_cider_r = params['dataset'] == 'refgta'
    refEval = RefEvaluation(refer, pred, eval_cider_r=eval_cider_r)
    refEval.evaluate()
    overall = {}
    for metric, score in refEval.eval.items():
        overall[metric] = score
    print(overall)
    from crossEvaluation import CrossEvaluation
    ceval = CrossEvaluation(refer, pred)
    ceval.cross_evaluate()
    ceval.make_ref_to_evals()
    ref_to_evals = ceval.ref_to_evals
    ceval.Xscore('CIDEr')
    return overall
def main(params):

    # set up lambda
    if params['dataset'] == 'refcoco':
        lambda1 = 5  # 5
        lambda2 = 5
    elif params['dataset'] == 'refcoco+':
        lambda1 = 5
        lambda2 = 5
    elif params['dataset'] == 'refcocog':
        lambda1 = 5
        lambda2 = 5
    else:
        error('No such dataset option for ', params['dataset'])

    # load generated sentences
    data_path = osp.join(
        'cache/lang', params['dataset'] + '_' + params['splitBy'],
        params['model_id'] + '_' + params['split'] + '_beam' +
        params['beam_size'] + '.json')
    data = json.load(open(data_path))
    data = data[
        'predictions']  # [{ref_id, beams, sent}], and beams = [{ppl, sent, logp}]

    # load confusion score, i.e., img_to_ref_confusion, img_to_ref_ids
    info_path = osp.join(
        'cache/lang', params['dataset'] + '_' + params['splitBy'],
        params['model_id'] + '_' + params['split'] + '_beam' +
        params['beam_size'] + '_confusion.json')
    info = json.load(open(info_path))
    img_to_ref_confusion = info[
        'img_to_ref_confusion']  # careful, key in string format
    img_to_ref_ids = info['img_to_ref_ids']  # careful, key in string format

    # make ref_to_beams
    ref_to_beams = {item['ref_id']: item['beams'] for item in data}

    # add ref_id to each beam
    for ref_id, beams in ref_to_beams.items():
        for beam in beams:
            beam['ref_id'] = ref_id  # make up ref_id in beam

    # compute unary potential, img_to_ref_unary
    # let's firstly try one image
    Res = []
    for image_id in img_to_ref_confusion:
        # ref_ids and confusion matrices for this image
        img_ref_ids = img_to_ref_ids[image_id]
        ref_to_confusion = img_to_ref_confusion[image_id]
        # compute unary potential for each ref_id
        for ref_id in img_ref_ids:
            confusion = ref_to_confusion[str(
                ref_id)]  # (beam_size, #img_ref_ids)
            beams = ref_to_beams[ref_id]  # [{ppl, sent, logp}] of beam_size
            compute_unary(ref_id, beams, confusion, img_ref_ids, lambda1,
                          lambda2)

        # here's more preparation
        ref_beam_to_ix, ix_to_ref_beam, all_beams = make_index(
            img_ref_ids, ref_to_beams)

        # compute pairwise potentials
        pairwise_ref_beam_ids = compute_pairwise(img_ref_ids, ref_to_beams)

        # call cplex
        res = bilp(img_ref_ids, ref_to_beams, all_beams, pairwise_ref_beam_ids,
                   ref_beam_to_ix)
        Res += res

    # evaluate
    refEval = RefEvaluation(refer, Res)
    refEval.evaluate()
    overall = {}
    for metric, score in refEval.eval.items():
        overall[metric] = score
    print overall

    if params['write_result'] > 0:
        file_name = params['model_id'] + '_' + params[
            'split'] + '_beamrerank.json'
        result_path = osp.join('cache', 'lang',
                               params['dataset'] + '_' + params['splitBy'],
                               file_name)
        refToEval = refEval.refToEval
        for res in Res:
            ref_id, sent = res['ref_id'], res['sent']
            refToEval[ref_id]['sent'] = sent
        with open(result_path[:-5] + '_out.json', 'w') as outfile:
            json.dump({'overall': overall, 'refToEval': refToEval}, outfile)

    # CrossEvaluation takes as input [{ref_id, sent}]
    ceval = CrossEvaluation(refer, Res)
    ceval.cross_evaluate()
    ceval.make_ref_to_evals()
    ref_to_evals = ceval.ref_to_evals  # ref_to_evals = {ref_id: {ref_id: {method: score}}}

    # compute cross score
    xcider = ceval.Xscore('CIDEr')
Beispiel #3
0
# load predictions
Res = json.load(open(result_path, 'r'))['predictions']  # [{'ref_id', 'sent'}]

# regular evaluate
sys.path.insert(0, osp.join('pyutils', 'refer2', 'evaluation'))
from refEvaluation import RefEvaluation
refEval = RefEvaluation(refer, Res)
refEval.evaluate()
overall = {}
for metric, score in refEval.eval.items():
    overall[metric] = score
print overall

if params['write_result'] > 0:
    refToEval = refEval.refToEval
    for res in Res:
        ref_id, sent = res['ref_id'], res['sent']
        refToEval[ref_id]['sent'] = sent
    with open(result_path[:-5] + '_out.json', 'w') as outfile:
        json.dump({'overall': overall, 'refToEval': refToEval}, outfile)

# cross evaluate
from crossEvaluation import CrossEvaluation
ceval = CrossEvaluation(refer, Res)
ceval.cross_evaluate()
ceval.make_ref_to_evals()
ref_to_evals = ceval.ref_to_evals  # ref_to_evals = {ref_id: {ref_id: {method: score}}}
# compute cross score
ceval.Xscore('CIDEr')
Beispiel #4
0
def main(params):
    target_save_dir = osp.join(params['save_dir'],'prepro', params['dataset']+'_'+params['splitBy'])
    
    if params['old']:
        params['data_json'] = 'old'+params['data_json']
        params['data_h5'] = 'old'+params['data_h5']
        params['image_feats_h5'] = 'old'+params['image_feats_h5']
        params['ann_feats_h5'] = 'old'+params['ann_feats_h5']
        params['id'] = 'old'+params['id']
        
    with open(target_save_dir+params["split"]+'_'+params['id']+str(params['beam_width'])+'.json') as f:
        data =  json.load(f)
    ref_to_beams = {item['ref_id']: item['beam'] for item in data}
    
    # add ref_id to each beam
    for ref_id, beams in ref_to_beams.items():
        for beam in beams:  
            beam['ref_id'] = ref_id  # make up ref_id in beam
            
    loader = DataLoader(params)
    featsOpt = {'ann':osp.join(target_save_dir, params['ann_feats_h5']),
                'img':osp.join(target_save_dir, params['image_feats_h5'])}
    loader.loadFeats(featsOpt) 
    chainer.config.train = False
    chainer.config.enable_backprop = False
    
    gpu_id = params['gpu_id']
    cuda.get_device(gpu_id).use()
    xp = cuda.cupy
    
    ve = VisualEncoder().to_gpu(gpu_id)
    if 'attention' in params['id']:
        le = LanguageEncoderAttn(len(loader.ix_to_word)).to_gpu(gpu_id)
    else:
        le = LanguageEncoder(len(loader.ix_to_word)).to_gpu(gpu_id)
    cca = CcaEmbedding().to_gpu(gpu_id)
    serializers.load_hdf5(params['model_root']+params['dataset']+'_'+params['splitBy']+'/'+params['id']+"ve.h5", ve)
    serializers.load_hdf5(params['model_root']+params['dataset']+'_'+params['splitBy']+'/'+params['id']+"le.h5", le)
    serializers.load_hdf5(params['model_root']+params['dataset']+'_'+params['splitBy']+'/'+params['id']+"cca.h5", cca)        
    
    img_to_ref_ids, img_to_ref_confusion = calc_confusion(loader, data, ref_to_beams, ve, le, cca, params, xp)
    
    sys.path.insert(0, osp.join('pyutils', 'refer2'))
    sys.path.insert(0, osp.join('pyutils', 'refer2', 'evaluation'))
    from refer import REFER
    from refEvaluation import RefEvaluation
    from crossEvaluation import CrossEvaluation
    refer = REFER(params['data_root'], params['dataset'], params['splitBy'], old_version=params['old'])
    
    if params['dataset'] == 'refcoco':
        lambda1 = 5  
        lambda2 = 5
    elif params['dataset'] == 'refcoco+':
        lambda1 = 5
        lambda2 = 5
    elif params['dataset'] == 'refcocog':
        lambda1 = 5
        lambda2 = 5
    else:
        error('No such dataset option for ', params['dataset'])
        

    # compute unary potential, img_to_ref_unary
    # let's firstly try one image
    Res = []
    for image_id in img_to_ref_confusion:
        # ref_ids and confusion matrices for this image
        img_ref_ids = img_to_ref_ids[image_id]
        ref_to_confusion = img_to_ref_confusion[image_id]
        # compute unary potential for each ref_id
        for ref_id in img_ref_ids:
            confusion = ref_to_confusion[ref_id]  # (beam_size, #img_ref_ids)
            beams = ref_to_beams[ref_id]  # [{ppl, sent, logp}] of beam_size
            compute_unary(ref_id, beams, confusion, img_ref_ids, lambda1, lambda2)

        # here's more preparation
        ref_beam_to_ix, ix_to_ref_beam, all_beams = make_index(img_ref_ids, ref_to_beams)

        # compute pairwise potentials
        pairwise_ref_beam_ids = compute_pairwise(img_ref_ids, ref_to_beams)

        # call cplex
        res = bilp(img_ref_ids, ref_to_beams, all_beams, pairwise_ref_beam_ids, ref_beam_to_ix, loader)
        Res += res
    # evaluate
    refEval = RefEvaluation(refer, Res)
    refEval.evaluate()
    overall = {}
    for metric, score in refEval.eval.items():
        overall[metric] = score
    print (overall)

    if params['write_result'] > 0:
        file_name = params['model_id']+'_'+params['split']+'_beamrerank.json'
        result_path = osp.join('cache', 'lang', params['dataset']+'_'+params['splitBy'], file_name)
        refToEval = refEval.refToEval
        for res in Res:
            ref_id, sent = res['ref_id'], res['sent']
            refToEval[ref_id]['sent'] = sent
        with open(result_path[:-5] + '_out.json', 'w') as outfile:
            json.dump({'overall': overall, 'refToEval': refToEval}, outfile)

    # CrossEvaluation takes as input [{ref_id, sent}]
    ceval = CrossEvaluation(refer, Res)
    ceval.cross_evaluate()
    ceval.make_ref_to_evals()
    ref_to_evals = ceval.ref_to_evals  # ref_to_evals = {ref_id: {ref_id: {method: score}}}

    # compute cross score
    xcider = ceval.Xscore('CIDEr')
Beispiel #5
0
def main(params):
    target_save_dir = osp.join(params['save_dir'], 'prepro',
                               params['dataset'] + '_' + params['splitBy'])
    model_dir = osp.join(params['save_dir'], 'model',
                         params['dataset'] + '_' + params['splitBy'])

    if params['old']:
        params['data_json'] = 'old' + params['data_json']
        params['data_h5'] = 'old' + params['data_h5']
        params['image_feats'] = 'old' + params['image_feats']
        params['ann_feats'] = 'old' + params['ann_feats']
        params['id'] = 'old' + params['id']

    if params['dataset'] in ['refcoco', 'refcoco+', 'refcocog']:
        global_shapes = (224, 224)
        image_root = params['coco_image_root']
    elif params['dataset'] == 'refgta':
        global_shapes = (480, 288)
        image_root = params['gta_image_root']

    with open(target_save_dir + params["split"] + '_' + params['id'] +
              params['id2'] + str(params['beam_width']) + '.json') as f:
        data = json.load(f)
    ref_to_beams = {item['ref_id']: item['beam'] for item in data}

    # add ref_id to each beam
    for ref_id, beams in ref_to_beams.items():
        for beam in beams:
            beam['ref_id'] = ref_id  # make up ref_id in beam

    loader = DataLoader(params)
    featsOpt = {
        'sp_ann': osp.join(target_save_dir, params['sp_ann_feats']),
        'ann_input': osp.join(target_save_dir, params['ann_feats']),
        'img': osp.join(target_save_dir, params['image_feats']),
        'shapes': osp.join(target_save_dir, params['ann_shapes'])
    }
    loader.loadFeats(featsOpt)
    loader.shuffle('train')
    loader.loadFeats(featsOpt)
    chainer.config.train = False
    chainer.config.enable_backprop = False

    gpu_id = params['gpu_id']
    cuda.get_device(gpu_id).use()
    xp = cuda.cupy

    rl_crit = ListenerReward(len(loader.ix_to_word),
                             global_shapes=global_shapes).to_gpu(gpu_id)
    serializers.load_hdf5(osp.join(model_dir, params['id'] + ".h5"), rl_crit)
    #serializers.load_hdf5(osp.join(model_dir, "attn_rank.h5"), rl_crit)
    img_to_ref_ids, img_to_ref_confusion = calc_confusion(
        loader, data, ref_to_beams, rl_crit, params, xp)

    sys.path.insert(0, osp.join('pyutils', 'refer2'))
    sys.path.insert(0, osp.join('pyutils', 'refer2', 'evaluation'))
    from refer import REFER
    from refEvaluation import RefEvaluation
    from crossEvaluation import CrossEvaluation
    refer = REFER(params['data_root'],
                  image_root,
                  params['dataset'],
                  params['splitBy'],
                  old_version=params['old'])

    if params['dataset'] == 'refcoco':
        lambda1 = 5
        lambda2 = 5
    elif params['dataset'] == 'refcoco+':
        lambda1 = 5
        lambda2 = 5
    elif params['dataset'] == 'refcocog':
        lambda1 = 5
        lambda2 = 5
    elif params['dataset'] == 'refgta':
        lambda1 = 5
        lambda2 = 5
    else:
        error('No such dataset option for ', params['dataset'])

    # compute unary potential, img_to_ref_unary
    # let's firstly try one image
    Res = []
    for image_id in img_to_ref_confusion:
        # ref_ids and confusion matrices for this image
        img_ref_ids = img_to_ref_ids[image_id]
        ref_to_confusion = img_to_ref_confusion[image_id]
        # compute unary potential for each ref_id
        for ref_id in img_ref_ids:
            confusion = ref_to_confusion[ref_id]  # (beam_size, #img_ref_ids)
            beams = ref_to_beams[ref_id]  # [{ppl, sent, logp}] of beam_size
            compute_unary(ref_id, beams, confusion, img_ref_ids, lambda1,
                          lambda2)

        # here's more preparation
        ref_beam_to_ix, ix_to_ref_beam, all_beams = make_index(
            img_ref_ids, ref_to_beams)

        # compute pairwise potentials
        pairwise_ref_beam_ids = compute_pairwise(img_ref_ids, ref_to_beams)

        # call cplex
        res = bilp(img_ref_ids, ref_to_beams, all_beams, pairwise_ref_beam_ids,
                   ref_beam_to_ix, loader)
        Res += res
    # evaluate
    eval_cider_r = params['dataset'] == 'refgta'
    refEval = RefEvaluation(refer, Res, eval_cider_r=eval_cider_r)
    refEval.evaluate()
    overall = {}
    for metric, score in refEval.eval.items():
        overall[metric] = score
    print(overall)

    if params['write_result'] > 0:
        refToEval = refEval.refToEval
        for res in Res:
            ref_id, sent = res['ref_id'], res['sent']
            refToEval[ref_id]['sent'] = sent
        with open('' + params['id'] + params['id2'] + '_out.json',
                  'w') as outfile:
            json.dump({'overall': overall, 'refToEval': refToEval}, outfile)

    # CrossEvaluation takes as input [{ref_id, sent}]
    ceval = CrossEvaluation(refer, Res)
    ceval.cross_evaluate()
    ceval.make_ref_to_evals()
    ref_to_evals = ceval.ref_to_evals  # ref_to_evals = {ref_id: {ref_id: {method: score}}}

    # compute cross score
    xcider = ceval.Xscore('CIDEr')