Пример #1
0
def val_batch(atts, words, defns):
    logits = m(defns, words)
    val_loss = torch.sum(crit(logits, atts))
    preds = crit.predict(logits)
    acc_table = evaluate_accuracy(preds, atts.cpu().data.numpy())
    acc_table['loss'] = val_loss.cpu().data.numpy()[None, :]
    return acc_table.T.squeeze()
Пример #2
0
def eval(ckpt, use_emb=False):

    # Recommended hyperparameters
    args = ModelConfig(batch_size=64, ckpt=ckpt, dropout=0.5, use_emb=use_emb)

    m = DictionaryModel(dict_field.vocab,
                        output_size=att_crit.input_size,
                        embed_input=args.use_emb,
                        dropout_rate=args.dropout)
    m.load_state_dict(torch.load(args.ckpt)['m_state_dict'])

    if torch.cuda.is_available():
        m.cuda()
        att_crit.cuda()
        train_data.atts_matrix.cuda()
        val_data.atts_matrix.cuda()
        test_data.atts_matrix.cuda()
    m.eval()

    # Don't take the mean until the end
    preds_ = []
    labels_ = []
    for val_b, (atts, words, defns, perm) in enumerate(tqdm(test_iter)):
        preds_.append(att_crit.predict(m(defns, words))[perm])
        labels_.append(atts.data.cpu().numpy()[perm])
    preds = np.concatenate(preds_, 0)
    labels = np.concatenate(labels_, 0)

    acc_table = evaluate_accuracy(preds, labels).T.squeeze()
    return acc_table, preds
Пример #3
0
def val_batch():
    atts = val_data.atts_matrix
    logits = m(val_data.embeds)
    val_loss = torch.sum(crit(logits, atts))
    preds = crit.predict(logits)
    acc_table = evaluate_accuracy(preds, atts.cpu().data.numpy())
    acc_table['loss'] = val_loss.cpu().data.numpy()[None, :]
    return acc_table
Пример #4
0
def defn_to_atts(defn_type, use_emb=False, first_defn=True):

    train_data, val_data, test_data = Attributes.splits(use_defns=True, cuda=False,
                                                        first_defn_at_test=first_defn)
    enc_fn = {'bow': bowize, 'nbow': nbowize}[defn_type]
    if first_defn:
        # We want to oversample
        balanced_train_inds = train_data._balanced_inds

        X_train, Y_train = get_x(train_data, enc_fn, use_emb=use_emb)
        X_train = X_train[balanced_train_inds]
        Y_train = Y_train[balanced_train_inds]
        X_val, Y_val = get_x(val_data, enc_fn, use_emb=use_emb)
        X_test, Y_test = get_x(test_data, enc_fn, use_emb=use_emb)

    else:
        # We want to undersample
        X_train, Y_train = get_stacked_x(train_data, enc_fn, use_emb=use_emb)
        X_val, Y_val = get_stacked_x(val_data, enc_fn, use_emb=use_emb)
        X_test, Y_test = get_stacked_x(test_data, enc_fn, use_emb=use_emb)

    # cross validate
    cs = np.power(10., [-3,-2,-1,0])
    accs = defaultdict(list)
    for c in cs:
        for d, (dom_name, dom_size) in enumerate(train_data.domains):
            M = LogisticRegression(C=c)
            print("fitting {}".format(d))
            M.fit(X_train, Y_train[:, d])
            s = M.score(X_val, Y_val[:, d])
            accs[d].append(s)

    c_to_use = {d:cs[np.argmax(scores)] for d, scores in accs.items()}
    print("Using c={}, acc of {:.3f} on val".format(
        '\n'.join('{:2d}:{:.3f}'.format(d,c) for d,c in c_to_use.items()),
        np.mean([max(accs[d]) for d in c_to_use.keys()])
    ))

    # -----------------------------------------------
    preds = []
    for d, (dom_name, dom_size) in enumerate(train_data.domains):
        M = LogisticRegression(C=c_to_use[d])
        print("fitting {}".format(d))
        M.fit(X_train, Y_train[:,d])
        s = M.score(X_test, Y_test[:,d])
        print("Score for {} is {}".format(dom_name, s))

        preds.append(M.predict(X_test))

    preds_full = np.array(preds).T
    acc_table = evaluate_accuracy(preds_full, Y_test)

    acc_table.index = ['{}{}({})'.format(defn_type, ' +GloVe' if use_emb else '',
                                         'firstdefn' if first_defn else 'concat')]

    np.save('{}{}.pkl'.format(defn_type, ' +GloVe' if use_emb else ''), preds_full)
    return acc_table
Пример #5
0
def deploy(data):
    embs = data.embeds
    preds = m(embs)
    gt_atts = torch.cat(data.atts_list, 1)
    loss = binary_cross_entropy_with_logits(preds, gt_atts, size_average=True)

    # Now get the test results
    acc_table = evaluate_accuracy(crit.predict(m(data.embeds)),
                                  data.atts_matrix.cpu().data.numpy())
    acc_table['loss'] = loss.cpu().data.numpy()[None,:]
    return acc_table
Пример #6
0
def emb_to_atts(emb_type):
    assert emb_type in TYPES

    if emb_type != 'glove':
        # Replace the matrices
        vecs = np.load(os.path.join(retrofit_dir, emb_type + '.pkl'))
        X_train = np.stack([vecs[v] for v in train_data.atts_df.index])
        X_val = np.stack([vecs[v] for v in val_data.atts_df.index])
        X_test = np.stack([vecs[v] for v in test_data.atts_df.index])
    else:
        X_train = embeds_train
        X_val = embeds_val
        X_test = embeds_test
    print("train {} val {} test {}".format(X_train.shape, X_val.shape,
                                           X_test.shape))

    # cross validate
    cs = np.power(10., [-3, -2, -1, 0, 1])
    accs = defaultdict(list)
    for c in cs:
        for d, (dom_name, dom_size) in enumerate(train_data.domains):
            M = LogisticRegression(C=c)
            print("fitting {}".format(d))
            M.fit(X_train, Y_train[:, d])
            s = M.score(X_val, Y_val[:, d])
            accs[d].append(s)

    c_to_use = {d: cs[np.argmax(scores)] for d, scores in accs.items()}
    print("Using c={}, acc of {:.3f} on val".format(
        '\n'.join('{:2d}:{:.3f}'.format(d, c) for d, c in c_to_use.items()),
        np.mean([max(accs[d]) for d in c_to_use.keys()])))
    # -----------------------------------------------
    preds = []
    for d, (dom_name, dom_size) in enumerate(train_data.domains):
        M = LogisticRegression(C=c_to_use[d])
        print("fitting {}".format(d))
        M.fit(X_train, Y_train[:, d])
        s = M.score(X_test, Y_test[:, d])
        print("Score for {} is {}".format(dom_name, s))

        preds.append(M.predict(X_test))

    preds_full = np.array(preds).T
    accs = evaluate_accuracy(preds_full, Y_test)
    accs.index = [emb_type]
    return accs, preds_full
Пример #7
0
        start = time.time()
        l = train_batch(b_inds, optimizers=[optimizer])
        train_l.append(l.data[0])

        dur = time.time() - start
        if b % 1000 == 0 and b >= 100:
            print("e{:2d}b{:5d} Cost {:.3f} , {:.3f} s/batch".format(
                epoch,
                b,
                np.mean(train_l),
                dur,
            ),
                  flush=True)
    dur_epoch = time.time() - start_epoch
    print("Duration of epoch was {:.3f}/batch, overall loss was {:.3f}".format(
        dur_epoch / b,
        np.mean(train_l),
    ))

# Now get the test results
acc_table = evaluate_accuracy(crit.predict(m(test_data.embeds)),
                              test_data.atts_matrix.cpu().data.numpy())
acc_table.to_csv(os.path.join(args.save_dir, 'results.csv'))

torch.save(
    {
        'args': args.args,
        'epoch': epoch,
        'm_state_dict': m.state_dict(),
        'optimizer': optimizer.state_dict(),
    }, os.path.join(args.save_dir, 'ckpt_{}.tar'.format(epoch)))
Пример #8
0
"""
This model tries to predict the word from definition
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from data.attribute_loader import Attributes
from scipy.stats import mode
import pandas as pd
import numpy as np
from sklearn.metrics import f1_score
from lib.attribute_loss import evaluate_accuracy

train_data, val_data, test_data = Attributes.splits(use_defns=False,
                                                    cuda=False)
preds = mode(train_data.atts_matrix.data.numpy()).mode

print("Always predicting {}".format(preds))

Y_test = test_data.atts_matrix.data.numpy()
acc_table = evaluate_accuracy(
    np.repeat(preds, Y_test.shape[0], axis=0),
    test_data.atts_matrix.data.numpy(),
)
acc_table.to_csv('mfc.csv', float_format='%.2f')