Esempio n. 1
0
def eval_bestimg(modelpath, testpath, tokenize=phonemes):
    rows = [json.loads(line) for line in open(testpath)]
    model = visual.load(path=modelpath)
    scaler = model.scaler
    batcher = model.batcher
    mapper = batcher.mapper
    img_fs = {}
    sent_ids = {}
    prov = dp.getDataProvider('coco', root='/home/gchrupala/repos/reimaginet')
    for split in ['val', 'test', 'restval']:
        for img in prov.iterImages(split=split):
            img_fs[img['cocoid']] = scaler.transform([img['feat']])[0]
            for sent in img['sentences']:
                sent_ids[sent['sentid']] = sent

    def response(row):
        sent = sent_ids[row['meta']['id']]
        inputs = list(mapper.transform([tokenize(sent)]))
        pred = model.Visual.predict(batcher.batch_inp(inputs))[0]
        return 1 + numpy.argmin([
            cosine(pred, img_fs[cocoid])
            for cocoid in row['meta']['candidates']
        ])

    preds = numpy.array([response(row) for row in rows])
    target = numpy.array([row['meta']['response'] for row in rows])
    return numpy.mean(preds == target)
Esempio n. 2
0
def evaluate(dataset='coco',
             datapath='.',
             model_path='model.zip',
             batch_size=128,
             tokenize=phonemes
            ):
    model = visual.load(path=model_path)
    task = model.Visual
    scaler = model.scaler
    batcher = model.batcher
    mapper = batcher.mapper
    prov   = dp.getDataProvider(dataset, root=datapath)
    sents_tok =  [ tokenize(sent) for sent in prov.iterSentences(split='val') ]
    predictions = visual.predict_img(model, sents_tok, batch_size=batch_size)
    sents  = list(prov.iterSentences(split='val'))
    images = list(prov.iterImages(split='val'))
    img_fs = list(scaler.transform([ image['feat'] for image in images ]))
    correct_img = numpy.array([ [ sents[i]['imgid']==images[j]['imgid']
                                  for j in range(len(images)) ]
                                for i in range(len(sents)) ] )
    return ranking(img_fs, predictions, correct_img, ns=(1,5,10), exclude_self=False)
Esempio n. 3
0
def eval_bestimg(modelpath, testpath, tokenize=phonemes):
    rows = [ json.loads(line) for line in open(testpath)]
    model = visual.load(path=modelpath)
    scaler = model.scaler
    batcher = model.batcher
    mapper = batcher.mapper
    img_fs = {}
    sent_ids = {}
    prov = dp.getDataProvider('coco', root='/home/gchrupala/repos/reimaginet')
    for split in ['val','test','restval']:
        for img in prov.iterImages(split=split):
            img_fs[img['cocoid']] = scaler.transform([ img['feat'] ])[0]
            for sent in img['sentences']:
                sent_ids[sent['sentid']]=sent
    def response(row):
        sent = sent_ids[row['meta']['id']]
        inputs = list(mapper.transform([tokenize(sent) ]))
        pred = model.Visual.predict(batcher.batch_inp(inputs))[0]
        return 1+numpy.argmin([ cosine(pred, img_fs[cocoid]) for cocoid in row['meta']['candidates']])
    preds = numpy.array([ response(row) for row in rows ])
    target = numpy.array([ row['meta']['response'] for row in rows])
    return numpy.mean(preds==target)
    
Esempio n. 4
0
def evaluate(dataset='coco',
             datapath='.',
             model_path='model.zip',
             batch_size=128,
             tokenize=phonemes):
    model = visual.load(path=model_path)
    task = model.Visual
    scaler = model.scaler
    batcher = model.batcher
    mapper = batcher.mapper
    prov = dp.getDataProvider(dataset, root=datapath)
    sents_tok = [tokenize(sent) for sent in prov.iterSentences(split='val')]
    predictions = visual.predict_img(model, sents_tok, batch_size=batch_size)
    sents = list(prov.iterSentences(split='val'))
    images = list(prov.iterImages(split='val'))
    img_fs = list(scaler.transform([image['feat'] for image in images]))
    correct_img = numpy.array(
        [[sents[i]['imgid'] == images[j]['imgid'] for j in range(len(images))]
         for i in range(len(sents))])
    return ranking(img_fs,
                   predictions,
                   correct_img,
                   ns=(1, 5, 10),
                   exclude_self=False)