Пример #1
0
def modeltest():
    #上面函数为测试flask程序使用。
    parser = argparse.ArgumentParser(description='Test')
    parser.add_argument(
        '--iter',
        default='-1',
        type=int,
        help='iter: iteration of the checkpoint to load. Default: 80000')
    parser.add_argument(
        '--batch_size',
        default='1',
        type=int,
        help='batch_size: batch size for parallel test. Default: 1')
    parser.add_argument(
        '--cache',
        default=False,
        type=boolean_string,
        help='cache: if set as TRUE all the test data will be loaded at once'
        ' before the transforming start. Default: FALSE')
    opt = parser.parse_args()
    m = initialization(conf, test=opt.cache)[0]

    # load model checkpoint of iteration opt.iter
    print('Loading the model...')
    m.load()
    print('Transforming...')
    time = datetime.now()

    probe = m.transform('probe', opt.batch_size)
    gallery = m.transform('gallery', opt.batch_size)

    return evaluation(probe, gallery)
Пример #2
0
def validate(opt):
    model.eval()
    
    data_iter_val = iter(dataloader_val)

    references = []
    hypotheses = []

    num_show = 0
    predictions = []
    
#    for step in range(len(dataloader_val)):  # 3551
    for step in range(2):
        imgid, img, proposals, gt_seqs, input_seqs, cat_embeds, \
                imgid_, img_, gt_bboxs_, gt_seq_, cat_embeds_, num = data_iter_val.next()
        
        proposals = proposals[:, :max(int(max(num[:,0])),1),:]
        gt_seqs = gt_seqs[:, :max(int(max(num[:,1])),1),:]
        cat_embeds = cat_embeds[:, :max(int(max(num[:,2])),1),:]
        gt_bboxs_ = gt_bboxs_[:, :max(int(max(num[:,3])),1),:]
        cat_embeds_ = cat_embeds_[:, :max(int(max(num[:,4])),1),:]
        
        gt_seqs_lens = torch.LongTensor([[len(seq.nonzero()) for seq in batch] for batch in gt_seqs]).unsqueeze(2)  # (batch, 5, 1)
        
        input_seqs = torch.zeros(opt.batch_size, opt.seq_length+1).long()
        input_seqs[:,0] = dataset.word_idx['<sos>']
        
        if opt.iscuda:
            img = img.cuda()
            proposals = proposals.cuda()
            gt_seqs = gt_seqs.cuda()
            input_seqs = input_seqs.cuda()
            cat_embeds = cat_embeds.cuda()
            img_ = img_.cuda()
            gt_bboxs_ = gt_bboxs_.cuda()
            gt_seq_ = gt_seq_.cuda()
            cat_embeds_ = cat_embeds_.cuda()
            num = num.cuda()
            gt_seqs_lens = gt_seqs_lens.cuda()

        #eval_opt = {'sample_max':1, 'beam_size': opt.beam_size, 'inference_mode' : True, 'tag_size' : opt.cbs_tag_size}
        input_seqs, show_skeleton = model(img, proposals, gt_seqs, input_seqs, cat_embeds, \
                        img_, gt_bboxs_, gt_seq_, cat_embeds_, num, gt_seqs_lens, 'sample')        
        
        
        '''
        BLEU: references = [[ref1a, ref1b, ref1c], [ref2a, ref2b], ...], 
              hypotheses = [hyp1, hyp2, ...]
        '''

        # References
        for i in imgid:
            img_caps = dataset_val.coco_cap[i]
            references.append(list(
                        map(lambda c: [dataset.idx_word[str(w)] for w in c if w not in [dataset.word_idx['<sos>'], 0]],
                        img_caps)))  # remove <start> and pads
                
        # Hypotheses
        input_seqs = input_seqs.tolist()
        for i,c in enumerate(input_seqs):
            input_seqs[i] = input_seqs[i][1:(c.index(dataset.word_idx['<eos>'])+1)] if dataset.word_idx['<eos>'] in c\
                            else input_seqs[i][1:]
            hypotheses.append([dataset.idx_word[str(w)] for w in input_seqs[i] if w not in [dataset.word_idx['<sos>'], 0]])  # remove pads

        assert len(references) == len(hypotheses)
        
        
        '''
        COCOeval: {'imang_id':int, 
                   'caption':str}
        python 2.7
        '''
        
        for k, sent in enumerate(hypotheses[-10:]):
            sent = ' '.join(sent) if sent[-1]!='<eos>' else ' '.join(sent[:-1])
            entry = {'image_id': int(imgid[k]), 'caption': sent}
            predictions.append(entry)
            if num_show < 20:  # show first 20 predictions
                print('image %s: %s' %(imgid[k], entry['caption']))
                num_show += 1

        if step % 100 == 0:
            print('step: %d / %d' %(step+1, len(dataloader_val)))
                

    print('Total image to be evaluated %d' %(len(predictions)))
    
    # BLEU-4 scores
    bleu4 = corpus_bleu(references, hypotheses)
    
    # COCOeval score
    json.dump(predictions, open(opt.val_res_dir + 'val_res.json', 'w'))
    lang_stats = utils.evaluation(predictions, opt.val_cap_path, opt.val_res_dir+'val_res.json')


    return bleu4, lang_stats
Пример #3
0
parser.add_argument(
    '--iter',
    default='-1',
    type=int,
    help='iter: iteration of the checkpoint to load. Default: 80000')
parser.add_argument(
    '--batch_size',
    default='1',
    type=int,
    help='batch_size: batch size for parallel test. Default: 1')
parser.add_argument(
    '--cache',
    default=False,
    type=boolean_string,
    help='cache: if set as TRUE all the test data will be loaded at once'
    ' before the transforming start. Default: FALSE')
opt = parser.parse_args()

m = initialization(conf, test=opt.cache)[0]

# load model checkpoint of iteration opt.iter
print('Loading the model...')
m.load()
print('Transforming...')
time = datetime.now()

probe = m.transform('probe', opt.batch_size)
gallery = m.transform('gallery', opt.batch_size)

evaluation(probe, gallery)
Пример #4
0
# load model checkpoint of iteration opt.iter
print('Loading the model of iteration %d...' % opt.iter)
m.load(opt.iter)
#加载给定iter的模型参数。

print('Transforming...')
time = datetime.now()
test = m.transform('test', opt.batch_size)
#opt.batch_size默认是1。
#返回的test变量依次为:
# feature,数据总量*(62*255),nparray,
# view_list, seq_type_list, label_list。

print('Evaluating...')
acc = evaluation(test, conf['data'])
#evaluation使用的cuda_dist函数没有被显式导入,却没有报错。

print('Evaluation complete. Cost:', datetime.now() - time)

# Print rank-1 accuracy of the best model
# e.g.
# ===Rank-1 (Include identical-view cases)===
# NM: 95.405,     BG: 88.284,     CL: 72.041
for i in range(1):
    print('===Rank-%d (Include identical-view cases)===' % (i + 1))
    print('NM: %.3f,\tBG: %.3f,\tCL: %.3f' % (np.mean(
        acc[0, :, :, i]), np.mean(acc[1, :, :, i]), np.mean(acc[2, :, :, i])))

# Print rank-1 accuracy of the best model,excluding identical-view cases
# e.g.