Example #1
0
def main():
    from vocab import Vocabulary
    import evaluation
    #evaluation.evalrank("runs/coco_vse++/model_best.pth.tar", data_path="data", split="test")
    evaluation.evalrank("runs/coco_vse++_vse/model_best.pth.tar",
                        data_path="data",
                        split="test")
Example #2
0
def main_test():
    global args
    args = parser.parse_args()

    if args.transfer_test != True:
        evaluation.evalrank(model_path=args.model_path, data_path=args.data_path, data_name=args.data_name,
                            data_name_vocab=args.data_name_vocab, split=args.split, VSE_model=CVSE)
    else:
        evaluation.evalrank(model_path=args.model_path, data_path=args.data_path, data_name=args.data_name,
                           data_name_vocab=args.data_name_vocab, split="test", VSE_model=CVSE,
                           concept_path=args.concept_path, transfer_test=True)
def main():
    # Hyper Parameters
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_path',
                        default='./data/',
                        help='path to datasets')
    parser.add_argument('--model_path',
                        default='./data/',
                        help='path to model')
    parser.add_argument('--split', default='test', help='val/test')
    parser.add_argument('--gpuid', default=0., type=str, help='gpuid')
    parser.add_argument('--fold5', action='store_true', help='fold5')
    opts = parser.parse_args()

    device_id = opts.gpuid
    print("use GPU:", device_id)
    os.environ['CUDA_VISIBLE_DEVICES'] = str(device_id)
    device_id = 0
    torch.cuda.set_device(0)
    # load model and options
    checkpoint = torch.load(opts.model_path)
    opt = checkpoint['opt']
    opt.loss_verbose = False
    opt.split = opts.split
    opt.data_path = opts.data_path
    opt.fold5 = opts.fold5

    # load vocabulary used by the model
    vocab = deserialize_vocab(
        os.path.join(opt.vocab_path, '%s_vocab.json' % opt.data_name))
    opt.vocab_size = len(vocab)

    # construct model
    model = SCAN(opt)
    model.cuda()
    model = nn.DataParallel(model)

    # load model state
    model.load_state_dict(checkpoint['model'])

    print('Loading dataset')
    data_loader = data.get_test_loader(opt.split, opt.data_name, vocab,
                                       opt.batch_size, opt.workers, opt)

    print(opt)
    print('Computing results...')

    evaluation.evalrank(model.module,
                        data_loader,
                        opt,
                        split=opt.split,
                        fold5=opt.fold5)
Example #4
0
def test(run=run):
    with open('%s_params_%s.pkl' % (saveto, run), 'rb') as f:
        model_options = pkl.load(f)
    with open('%s.dictionary_%s.pkl' % (saveto, run), 'rb') as f:
        worddict = pkl.load(f)
    model = LIUMCVC_Encoder(model_options)
    model.load_state_dict(torch.load('%s_model_%s.pkl' % (saveto, run)))
    model = model.cuda()

    best_model = {}
    best_model['options'] = model_options
    best_model['en_cn_model'] = model
    best_model['worddict'] = worddict

    evalrank(best_model, data, split='dev')
    evalrank(best_model, data, split='test')
Example #5
0
def execute(input_string, n=1000):
    final_top_n = eval.evalrank(
        input_string,
        img_feature,
        n,
        "/scan/SCAN/runs/coco_scan/log/model_best.pth.tar",
        data_path="/scan/SCAN/data",
        split="test",
        fold5=False)

    return final_top_n
Example #6
0
def execute(input_string, n=1000):
    final_top_n = eval.evalrank(input_string,
                                n,
                                "runs/coco_scan/log/model_best.pth.tar",
                                data_path="./data",
                                split="test",
                                fold5=False)

    ids = dict(result=None)
    ids['result'] = final_top_n
    jsonFile = json.dumps(ids)
    requests.post("http://localhost:5000/getScanResult",
                  data={'Results': jsonFile})
Example #7
0
def main(args):
    run = args.run
    checkpoint = args.checkpoint
    data_path = args.data_path

    folders = glob.glob("{}{}/seed*/".format(args.model_path, run))
    nr_runs = len(folders)

    r1 = 0
    r5 = 0
    r10 = 0
    medr = 0
    meanr = 0

    ri1 = 0
    ri5 = 0
    ri10 = 0
    medri = 0
    meanri = 0

    for i in range(nr_runs):
        print("Evaluating seed{}".format(str(i+1)))
        model_path = "{}{}/seed{}/checkpoint/{}".format(args.model_path, run, i+1, checkpoint )
        # plot_path = "{}{}/seed{}checkpoint/".format(args.plot_path,  run)
        rt, rti, r, ri = evaluation.evalrank(model_path, run, data_path=args.data_path, split="test", vocab_path=args.vocab_path)
        r1 += r[0]
        r5 += r[1]
        r10 += r[2]
        medr += r[3]
        meanr += r[4]

        ri1 += ri[0]
        ri5 += ri[1]
        ri10 += ri[2]
        medri += ri[3]
        meanri += ri[4]

    r1 = r1 / nr_runs
    r5 = r5 / nr_runs
    r10 = r10 / nr_runs
    medr = medr / nr_runs
    meanr = meanr / nr_runs

    ri1 = ri1 / nr_runs
    ri5 = ri5 / nr_runs
    ri10 = ri10 / nr_runs
    medri = medri / nr_runs
    meanri = meanri / nr_runs

    print("AVERAGE Image to text: {:.1f} {:.1f} {:.1f} {:.1f} {:.1f}".format( r1, r5, r10, medr, meanr))
    print("AVERAGE Text to image: {:.1f} {:.1f} {:.1f} {:.1f} {:.1f}".format(ri1, ri5, ri10, medri, meanri))
Example #8
0
def main(opt, current_config):
    model_checkpoint = opt.checkpoint

    checkpoint = torch.load(model_checkpoint)
    print('Checkpoint loaded from {}'.format(model_checkpoint))
    loaded_config = checkpoint['config']

    if opt.size == "1k":
        fold5 = True
    elif opt.size == "5k":
        fold5 = False
    else:
        raise ValueError('Test split size not recognized!')

    # Override some mandatory things in the configuration (paths)
    loaded_config['dataset']['images-path'] = current_config['dataset'][
        'images-path']
    loaded_config['dataset']['data'] = current_config['dataset']['data']
    loaded_config['image-model'][
        'pre-extracted-features-root'] = current_config['image-model'][
            'pre-extracted-features-root']

    evaluation.evalrank(loaded_config, checkpoint, split="test", fold5=fold5)
Example #9
0
def main(opt, current_config):
    model_checkpoint = opt.checkpoint

    if opt.gpu:
        checkpoint = torch.load(
            model_checkpoint)  # , map_location=torch.device("cpu"))
    else:
        checkpoint = torch.load(model_checkpoint,
                                map_location=torch.device("cpu"))

    print('Checkpoint loaded from {}'.format(model_checkpoint))
    loaded_config = checkpoint['config']

    if opt.size == "1k":
        fold5 = True
    elif opt.size == "5k":
        fold5 = False
    else:
        raise ValueError('Test split size not recognized!')

    # Override some mandatory things in the configuration (paths)
    if current_config is not None:
        loaded_config['dataset']['images-path'] = current_config['dataset'][
            'images-path']
        loaded_config['dataset']['data'] = current_config['dataset']['data']
        loaded_config['image-model'][
            'pre-extracted-features-root'] = current_config['image-model'][
                'pre-extracted-features-root']
        loaded_config['training']['bs'] = current_config['training']['bs']

    evaluation.evalrank(loaded_config,
                        checkpoint,
                        split="test",
                        fold5=False,
                        eval_t2i=opt.t2i,
                        eval_i2t=opt.i2t)
Example #10
0
from vocab import Vocabulary
import evaluation
import numpy

DATA_PATH = '/hdd2/niluthpol/VTT/vsepp_data/'
RUN_PATH = '/home/niluthpol/VTT/models/'

shared_space = 'both'  # help='both'|'object_text'|'activity_text' ;  default = 'both'

evaluation.evalrank(RUN_PATH + "msrvtt_object_text/model_best.pth.tar",
                    RUN_PATH + "msrvtt_activity_text/model_best.pth.tar",
                    data_path=DATA_PATH,
                    split="test",
                    shared_space="both")
def main():
    # Hyper Parameters
    
    opt = opts.parse_opt()

    device_id = opt.gpuid
    device_count = len(str(device_id).split(","))
    #assert device_count == 1 or device_count == 2
    print("use GPU:", device_id, "GPUs_count", device_count, flush=True)
    os.environ['CUDA_VISIBLE_DEVICES']=str(device_id)
    device_id = 0
    torch.cuda.set_device(0)

    # Load Vocabulary Wrapper
    vocab = deserialize_vocab(os.path.join(opt.vocab_path, '%s_vocab.json' % opt.data_name))
    opt.vocab_size = len(vocab)

    # Load data loaders
    train_loader, val_loader = data.get_loaders(
        opt.data_name, vocab, opt.batch_size, opt.workers, opt)

    # Construct the model
    model = SCAN(opt)
    model.cuda()
    model = nn.DataParallel(model)

     # Loss and Optimizer
    criterion = ContrastiveLoss(opt=opt, margin=opt.margin, max_violation=opt.max_violation)
    mse_criterion = nn.MSELoss(reduction="batchmean")
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate)

    # optionally resume from a checkpoint
    if not os.path.exists(opt.model_name):
        os.makedirs(opt.model_name)
    start_epoch = 0
    best_rsum = 0

    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            start_epoch = checkpoint['epoch']
            best_rsum = checkpoint['best_rsum']
            model.load_state_dict(checkpoint['model'])
            print("=> loaded checkpoint '{}' (epoch {}, best_rsum {})"
                  .format(opt.resume, start_epoch, best_rsum))
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))
    evalrank(model.module, val_loader, opt)

    print(opt, flush=True)
    
    # Train the Model
    for epoch in range(start_epoch, opt.num_epochs):
        message = "epoch: %d, model name: %s\n" % (epoch, opt.model_name)
        log_file = os.path.join(opt.logger_name, "performance.log")
        logging_func(log_file, message)
        print("model name: ", opt.model_name, flush=True)
        adjust_learning_rate(opt, optimizer, epoch)
        run_time = 0
        for i, (images, captions, lengths, masks, ids, _) in enumerate(train_loader):
            start_time = time.time()
            model.train()

            optimizer.zero_grad()

            if device_count != 1:
                images = images.repeat(device_count,1,1)

            score = model(images, captions, lengths, masks, ids)
            loss = criterion(score)

            loss.backward()
            if opt.grad_clip > 0:
                clip_grad_norm_(model.parameters(), opt.grad_clip)
            optimizer.step()
            run_time += time.time() - start_time
            # validate at every val_step
            if i % 100 == 0:
                log = "epoch: %d; batch: %d/%d; loss: %.4f; time: %.4f" % (epoch, 
                            i, len(train_loader), loss.data.item(), run_time / 100)
                print(log, flush=True)
                run_time = 0
            if (i + 1) % opt.val_step == 0:
                evalrank(model.module, val_loader, opt)

        print("-------- performance at epoch: %d --------" % (epoch))
        # evaluate on validation set
        rsum = evalrank(model.module, val_loader, opt)
        #rsum = -100
        filename = 'model_' + str(epoch) + '.pth.tar'
        # remember best R@ sum and save checkpoint
        is_best = rsum > best_rsum
        best_rsum = max(rsum, best_rsum)
        save_checkpoint({
            'epoch': epoch + 1,
            'model': model.state_dict(),
            'best_rsum': best_rsum,
            'opt': opt,
        }, is_best, filename=filename, prefix=opt.model_name + '/')
Example #12
0
File: eval.py Project: fwtan/vsepp
from vocab import Vocabulary
import evaluation
evaluation.evalrank(
    "data/runs/coco_vse++_resnet_restval_finetune/model_best.pth.tar",
    data_path="data/data",
    split="test")
Example #13
0
from vocab import Vocabulary
import evaluation

import argparse

parser = argparse.ArgumentParser()
parser.add_argument('--model_path', default='$RUN_PATH/coco_vse/model_best.pth.tar', help='path to model')
parser.add_argument('--data_path', default='data/data', help='path to datasets')
parser.add_argument('--fold5', action='store_true',
                    help='Use fold5')
parser.add_argument('--save_embeddings', action='store_true',
                    help='save_embeddings')
parser.add_argument('--save_csv', default='')

opt_eval = parser.parse_args()

evaluation.evalrank(opt_eval, split='test')
Example #14
0
import tools, evaluation, os

# Hey Kipster!  For this to work, use a python virtualenv
# and pip install -r requirements.txt in IF-root
# you might also need to install numpy or gfortran with your os pkg manager

# First lets make sure the model kinda works
__dirname = os.path.dirname(os.path.realpath(__file__))
model = tools.load_model(__dirname + '/data/coco.npz')
evaluation.evalrank(model, data='coco', split='test')

# Now lets compute sentence vecs for something specific
example_sentences = [
    'black tie women', 'warm winter coat',
    'long dressi gown tuxedo cocktail black_ti'
]
sentence_vectors = tools.encode_sentences(model,
                                          example_sentences,
                                          verbose=True)

print sentence_vectors.shape
print sentence_vectors[0].shape
Example #15
0
from vocab import Vocabulary
import evaluation
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
evaluation.evalrank(
    "/home/wangzheng/neurltalk/new/SCAN_2loss500_3_0.8_10x/runs/f30k_scan/log/model_best.pth.tar",
    data_path="/data6/wangzheng/data",
    split="test")
Example #16
0
from vocab import Vocabulary
import evaluation

evaluation.evalrank("models/model_best.pth.tar",
                    data_path="../data/",
                    split="test")
from vocab import Vocabulary
import evaluation
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
RUN_PATH = "models1/model_best.pth.tar"
DATA_PATH = "/media/ubuntu/data/chunxiao/"
evaluation.evalrank(RUN_PATH, data_path=DATA_PATH, split="test")
Example #18
0
from vocab import Vocabulary
import evaluation
import argparse 

parser = argparse.ArgumentParser()

parser.add_argument('--model', '-m',)
parser.add_argument('--data_path', '-d', default=None)
parser.add_argument('--data_name', default=None)
parser.add_argument('--split', '-s', default='test')
parser.add_argument('--no_cv', action='store_false')
parser.add_argument('--batch_size', type=int, default=64)

args = parser.parse_args()

evaluation.evalrank(
    model_path=args.model,
    data_path=args.data_path,
    data_name=args.data_name,
    split=args.split,
    fold5=args.no_cv,
    batch_size=args.batch_size,
)
Example #19
0
# -------------------------------------------------------------------------------------
# A Bidirectional Focal Atention Network implementation based on
# https://arxiv.org/abs/1909.11416.
# "Focus Your Atention: A Bidirectional Focal Atention Network for Image-Text Matching"
# Chunxiao Liu, Zhendong Mao, An-An Liu, Tianzhu Zhang, Bin Wang, Yongdong Zhang
#
# Writen by Chunxiao Liu, 2019
# -------------------------------------------------------------------------------------
"""testall on MSCOCO"""

from vocab import Vocabulary
import evaluation
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
RUN_PATH = "/userhome/BFAN/models/model_best.pth.tar"
DATA_PATH = "/userhome/"
evaluation.evalrank(RUN_PATH, data_path=DATA_PATH, split="testall",fold5=True)
Example #20
0
from vocab import Vocabulary
import evaluation
import pickle

evaluation.evalrank("",
                     data_path="./data", split="test", 
                     fold5=True)

"""print (rt,rti)
print(len(rt),len(rti))
dic_now = {}
dic_now["rt_ranks"]=rt[0]
dic_now["rt_top1"]=rt[1]
dic_now["rti_ranks"] =rti[0]
dic_now["rti_top1"]=rti[1]

with open('vsepp' + '.results.pickle', 'wb') as handle:
    pickle.dump(dic_now, handle, protocol=pickle.HIGHEST_PROTOCOL)"""
Example #21
0
# import evaluation_vsepp
# evaluation_vsepp.evalrank("runs/save_ori_vse/coco_vse++_combine/model_best.pth.tar", data_path='data/', split="test", fold5=False) 


# evaluation.evalrank("runs/coco_vse++_combine_P_S/model_best.pth.tar", data_path='data/', split="test", fold5=False) 

# for i in range(1,17):
# 	print('model:' + str(i) + '\n')
# 	evaluation.evalrank("runs/save_fc_attn/coco_combine_double_GCN_attn_" + str(i) +"/model_best.pth.tar", data_path='../vsepp-master/data_SCAN/', split="test", fold5=False) 
# 	print('\n')
# 	# print('\n')
# 	# evaluation.evalrank("runs/coco_combine_double_GCN_attn/checkpoint.pth.tar", data_path='../vsepp-master/data_SCAN/', split="test", fold5=False) 



# for i in range(1,10):
# 	print('model:' + str(i) + '\n')
# 	evaluation.evalrank("runs/camera/GCN_Attn_" + str(i) +"/model_best.pth.tar", data_path='../vsepp-master/data_SCAN/', split="test", fold5=False) 
# 	print('\n')
# 	# print('\n')
# 	# evaluation.evalrank("runs/coco_combine_double_GCN_attn/checkpoint.pth.tar", data_path='../vsepp-master/data_SCAN/', split="test", fold5=False) 


for i in range(1,5):
	print('model:' + str(i) + '\n')
	evaluation.evalrank("runs/camera/Only_Visual_GCN_Attn_" + str(i) +"/model_best.pth.tar", data_path='../vsepp-master/data_SCAN/', split="test", fold5=False) 
	print('\n')
	# print('\n')
	# evaluation.evalrank("runs/coco_combine_double_GCN_attn/checkpoint.pth.tar", data_path='../vsepp-master/data_SCAN/', split="test", fold5=False) 

Example #22
0
import torch
from vocab import Vocabulary
import evaluation_models
import evaluation

# for flickr
print('\nEvaluation on Flickr30K:')
evaluation.evalrank(
    "/SSD/VSRN_CTC/runs/full_model3_newfeats/model_best.pth.tar",
    data_path='/SSD/Datasets/Flickr30K/data/',
    split="test",
    fold5=False)

# for TextCaps Validation set
print('\nEvaluation on TextCaps Validation set:')
evaluation.evalrank("/SSD/VSRN/runs/full_model3_newfeats/model_best.pth.tar",
                    data_path='/SSD/Datasets/TextCaps/Flickr_Format/',
                    split="dev",
                    fold5=False)

# for NewSplit STACMR set
print('\nEvaluation on STACMR:')
evaluation.evalrank(
    "/SSD/VSRN_CTC/runs/full_model3_newfeats/model_best.pth.tar",
    data_path=
    '/SSD/Datasets/Coco-Text/ST_CMR_testdataset/New_Split/Flickr_Format/',
    split="dev",
    fold5=False)
evaluation.evalrank(
    "/SSD/VSRN_CTC/runs/full_model3_newfeats/model_best.pth.tar",
    data_path=