Exemplo n.º 1
0
def evaluate_model(evalparams):

    torch.manual_seed(evalparams.seed)
    random.seed(1234)
    if evalparams.cpu:
        evalparams.cuda = False
    elif evalparams.cud:
        torch.cuda.manual_seed(args.seed)

    # load opt
    print(evalparams.model_dir, evalparams.model)
    #     model_file = evalparams.model_dir + "/" + evalparams.model
    model_file = 'best_model.pt'
    print("Loading model from {}".format(model_file))
    opt = torch_utils.load_config(model_file)
    model = RelationModel(opt)
    model.load(model_file)

    # load vocab
    vocab_file = evalparams.model_dir + '/vocab.pkl'
    vocab = Vocab(vocab_file, load=True)
    assert opt[
        'vocab_size'] == vocab.size, "Vocab size must match that in the saved model."

    # load data
    data_file = opt['data_dir'] + '/{}.json'.format(evalparams.dataset)
    print("Loading data from {} with batch size {}...".format(
        data_file, opt['batch_size']))
    batch = DataLoader(data_file,
                       opt['batch_size'],
                       opt,
                       vocab,
                       evaluation=True)

    helper.print_config(opt)
    id2label = dict([(v, k) for k, v in constant.LABEL_TO_ID.items()])

    predictions = []
    all_probs = []
    for i, b in enumerate(batch):
        preds, probs, _ = model.predict(b)
        predictions += preds
        all_probs += probs
    predictions = [id2label[p] for p in predictions]
    p, r, f1 = scorer.score(batch.gold(), predictions, verbose=True)

    # save probability scores
    if len(evalparams.out) > 0:
        helper.ensure_dir(os.path.dirname(evalparams.out))
        with open(evalparams.out, 'wb') as outfile:
            pickle.dump(all_probs, outfile)
        print("Prediction scores saved to {}.".format(evalparams.out))

    print("Evaluation ended.")

    return (batch.gold(), predictions, model)
Exemplo n.º 2
0
def get_biased_model_class_probs(args):
    # load opt
    model_file = args.model_dir + "/" + args.model
    print("Loading model from {}".format(model_file))
    opt = torch_utils.load_config(model_file)
    model = RelationModel(opt)
    model.load(model_file)

    # load vocab
    vocab_file = args.model_dir + "/vocab.pkl"
    vocab = Vocab(vocab_file, load=True)
    assert opt[
        "vocab_size"] == vocab.size, "Vocab size must match that in the saved model."
    opt["vocab_size"] = vocab.size
    emb_file = opt["vocab_dir"] + "/embedding.npy"
    emb_matrix = np.load(emb_file)
    assert emb_matrix.shape[0] == vocab.size
    assert emb_matrix.shape[1] == opt["emb_dim"]

    # load data
    data_file = args.data_dir + "/{}".format(args.data_name)
    print("Loading data from {} with batch size {}...".format(
        data_file, opt["batch_size"]))
    batch = DataLoader(data_file,
                       opt["batch_size"],
                       opt,
                       vocab,
                       evaluation=True)

    # helper.print_config(opt)
    id2label = dict([(v, k) for k, v in constant.LABEL_TO_ID.items()])

    all_probs = []
    for i, b in enumerate(batch):
        preds, probs, _ = model.predict(b)
        all_probs.append(probs)
    return all_probs
Exemplo n.º 3
0
    for i, batch in enumerate(train_batch):
        start_time = time.time()
        global_step += 1
        loss = model.update(batch)
        train_loss += loss
        if global_step % opt['log_step'] == 0:
            duration = time.time() - start_time
            print(format_str.format(datetime.now(), global_step, max_steps, epoch,\
                    opt['num_epoch'], loss, duration, current_lr))

    # eval on dev
    print("Evaluating on dev set...")
    predictions = []
    dev_loss = 0
    for i, batch in enumerate(dev_batch):
        preds, _, loss = model.predict(batch)
        predictions += preds
        dev_loss += loss
    predictions = [id2label[p] for p in predictions]
    current_dev_metrics, _ = scorer.score(dev_batch.gold(), predictions)
    dev_f1 = current_dev_metrics['f1']

    train_loss = train_loss / train_batch.num_examples * opt[
        'batch_size']  # avg loss per batch
    dev_loss = dev_loss / dev_batch.num_examples * opt['batch_size']
    print("epoch {}: train_loss = {:.6f}, dev_loss = {:.6f}, dev_f1 = {:.4f}".
          format(epoch, train_loss, dev_loss, dev_f1))
    file_logger.log("{}\t{:.6f}\t{:.6f}\t{:.4f}".format(
        epoch, train_loss, dev_loss, dev_f1))

    print("Evaluating on test set...")
Exemplo n.º 4
0
vocab_file = args.model_dir + '/vocab.pkl'
vocab = Vocab(vocab_file, load=True)
assert opt['vocab_size'] == vocab.size, "Vocab size must match that in the saved model."

# load data
data_file = opt['data_dir'] + '/{}.json'.format(args.dataset)
print("Loading data from {} with batch size {}...".format(data_file, opt['batch_size']))
batch = DataLoader(data_file, opt['batch_size'], opt, vocab, evaluation=True)

helper.print_config(opt)
id2label = dict([(v,k) for k,v in constant.LABEL_TO_ID.items()])

predictions = []
all_probs = []
for i, b in enumerate(batch):
    preds, probs, _ = model.predict(b)
    predictions += preds
    all_probs += probs
predictions = [id2label[p] for p in predictions]
p, r, f1 = scorer.score(batch.gold(), predictions, verbose=True)

# save probability scores
if len(args.out) > 0:
    helper.ensure_dir(os.path.dirname(args.out))
    with open(args.out, 'wb') as outfile:
        pickle.dump(all_probs, outfile)
    print("Prediction scores saved to {}.".format(args.out))

print("Evaluation ended.")

Exemplo n.º 5
0
                    datetime.now(),
                    global_step,
                    max_steps,
                    epoch,
                    opt["num_epoch"],
                    loss,
                    duration,
                    current_lr,
                ))

    # eval on dev
    print("Evaluating on dev set...")
    predictions = []
    dev_loss = 0
    for i, batch in enumerate(dev_batch):
        preds, _, loss = model.predict(batch)
        predictions += preds
        dev_loss += loss
    predictions = [id2label[p] for p in predictions]
    dev_p, dev_r, dev_f1 = scorer.score(dev_batch.gold(), predictions)

    train_loss = (train_loss / train_batch.num_examples * opt["batch_size"]
                  )  # avg loss per batch
    dev_loss = dev_loss / dev_batch.num_examples * opt["batch_size"]
    print("epoch {}: train_loss = {:.6f}, dev_loss = {:.6f}, dev_f1 = {:.4f}".
          format(epoch, train_loss, dev_loss, dev_f1))
    file_logger.log("{}\t{:.6f}\t{:.6f}\t{:.4f}".format(
        epoch, train_loss, dev_loss, dev_f1))

    # save
    model_file = model_save_dir + "/checkpoint_epoch_{}.pt".format(epoch)
Exemplo n.º 6
0
def train_unbiased_model(args, biased_batch_probs):
    # make opt
    opt = vars(args)
    opt["num_class"] = len(constant.LABEL_TO_ID)

    # load vocab
    vocab_file = opt['vocab_dir'] + '/vocab.pkl'
    vocab = Vocab(vocab_file, load=True)
    opt['vocab_size'] = vocab.size
    emb_file = opt['vocab_dir'] + '/embedding.npy'
    emb_matrix = np.load(emb_file)
    assert emb_matrix.shape[0] == vocab.size
    assert emb_matrix.shape[1] == opt['emb_dim']

    # load data
    print("Loading data from {} with batch size {}...".format(
        opt["data_dir"], opt["batch_size"]))
    train_batch = DataLoader(
        opt["data_dir"] + "/" + args.data_name,
        opt["batch_size"],
        opt,
        vocab,
        evaluation=False,
    )
    dev_batch = DataLoader(opt["data_dir"] + "/dev.json",
                           opt["batch_size"],
                           opt,
                           vocab,
                           evaluation=True)

    model_id = opt["id"] if len(opt["id"]) > 1 else "0" + opt["id"]
    model_save_dir = opt["save_dir"] + "/" + model_id
    opt["model_save_dir"] = model_save_dir
    helper.ensure_dir(model_save_dir, verbose=True)

    # save config
    helper.save_config(opt, model_save_dir + "/config.json", verbose=True)
    vocab.save(model_save_dir + "/vocab.pkl")
    file_logger = helper.FileLogger(
        model_save_dir + "/" + opt["log"],
        header="# epoch\ttrain_loss\tdev_loss\tdev_f1")

    # print model info
    helper.print_config(opt)

    # model
    model = RelationModel(opt, emb_matrix=emb_matrix)

    id2label = dict([(v, k) for k, v in constant.LABEL_TO_ID.items()])
    dev_f1_history = []
    current_lr = opt["lr"]

    global_step = 0
    global_start_time = time.time()
    format_str = (
        "{}: step {}/{} (epoch {}/{}), loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}"
    )
    max_steps = len(train_batch) * opt["num_epoch"]

    # start training
    for epoch in range(1, opt["num_epoch"] + 1):
        train_loss = 0
        for i, batch in enumerate(train_batch):
            start_time = time.time()
            global_step += 1
            loss = model.update(batch,
                                torch.tensor(biased_batch_probs[i]).cuda())
            train_loss += loss
            if global_step % opt["log_step"] == 0:
                duration = time.time() - start_time
                print(
                    format_str.format(
                        datetime.now(),
                        global_step,
                        max_steps,
                        epoch,
                        opt["num_epoch"],
                        loss,
                        duration,
                        current_lr,
                    ))

        # eval on dev
        print("Evaluating on dev set...")
        predictions = []
        dev_loss = 0
        for i, batch in enumerate(dev_batch):
            preds, _, loss = model.predict(batch)
            predictions += preds
            dev_loss += loss
        predictions = [id2label[p] for p in predictions]
        dev_p, dev_r, dev_f1 = scorer.score(dev_batch.gold(), predictions)

        f = open("label.txt", "w+")
        f.write(str(dev_batch.gold()))
        f.close()

        train_loss = (train_loss / train_batch.num_examples * opt["batch_size"]
                      )  # avg loss per batch
        dev_loss = dev_loss / dev_batch.num_examples * opt["batch_size"]
        print(
            "epoch {}: train_loss = {:.6f}, dev_loss = {:.6f}, dev_f1 = {:.4f}"
            .format(epoch, train_loss, dev_loss, dev_f1))
        file_logger.log("{}\t{:.6f}\t{:.6f}\t{:.4f}".format(
            epoch, train_loss, dev_loss, dev_f1))

        # save
        model_file = model_save_dir + "/checkpoint_epoch_{}.pt".format(epoch)
        model.save(model_file, epoch)
        if epoch == 1 or dev_f1 > max(dev_f1_history):
            copyfile(model_file, model_save_dir + "/best_model.pt")
            print("new best model saved.")
        if epoch % opt["save_epoch"] != 0:
            os.remove(model_file)

        # lr schedule
        if (len(dev_f1_history) > 10 and dev_f1 <= dev_f1_history[-1]
                and opt["optim"] in ["sgd", "adagrad"]):
            current_lr *= opt["lr_decay"]
            model.update_lr(current_lr)

        dev_f1_history += [dev_f1]
        print("")

    print("Training ended with {} epochs.".format(epoch))
Exemplo n.º 7
0
    train_loss = 0
    for i, batch in enumerate(train_batch):
        start_time = time.time()
        global_step += 1
        loss = model.update(batch)
        train_loss += loss
        if global_step % opt['log_step'] == 0:
            duration = time.time() - start_time
            print(format_str.format(datetime.now(), global_step, max_steps, epoch,\
                    opt['num_epoch'], loss, duration, current_lr))

    # eval on dev
    print("Evaluating on dev set...")
    predictions = []
    for i, batch in enumerate(dev_batch):
        preds, _ = model.predict(batch, test=False)
        predictions += preds
    predictions = [id2label[p] for p in predictions]
    dev_p, dev_r, dev_f1 = scorer.score(dev_batch.gold(), predictions)

    train_loss = train_loss / train_batch.num_examples * opt[
        'batch_size']  # avg loss per batch
    print("epoch {}: train_loss = {:.4f}, dev_f1 = {:.4f}".format(epoch,\
            train_loss, dev_f1))

    # save
    model_file = model_save_dir + '/checkpoint_epoch_{}.pt'.format(epoch)
    model.save(model_file, epoch)
    if epoch == 1 or dev_f1 > max(dev_f1_history):
        copyfile(model_file, model_save_dir + '/best_model.pt')
        print("new best model saved.")
Exemplo n.º 8
0
# load data
data_file = opt['data_dir'] + '/{}.json'.format(args.dataset)
print("Loading data from {} with batch size {}...".format(
    data_file, opt['batch_size']))
batch = DataLoader(data_file, opt['batch_size'], opt, vocab, evaluation=True)

helper.print_config(opt)
id2label = dict([(v, k) for k, v in constant.LABEL_TO_ID.items()])

predictions = []
all_probs = []
# sum_le6 = 0
# sum_tt = 0
for i, b in enumerate(batch):
    preds, probs, loss_2, loss_41, rel_acc = model.predict(b, test=True)
    predictions += preds
    all_probs += probs
    # sum_le6 += le6
    # sum_tt += tt
predictions = [id2label[p] for p in predictions]
p, r, f1 = scorer.score(batch.gold(), predictions, verbose=True)

# save probability scores
if len(args.out) > 0:
    helper.ensure_dir(os.path.dirname(args.out))
    with open(args.out, 'wb') as outfile:
        pickle.dump(all_probs, outfile)
    print("Prediction scores saved to {}.".format(args.out))

print("Evaluation ended.")
Exemplo n.º 9
0
def train_model(vocab_params,
                train_params,
                train_batch,
                dev_batch,
                model_id=-1):
    torch.manual_seed(train_params.seed)
    np.random.seed(train_params.seed)
    random.seed(train_params.seed)

    if train_params.cpu:
        train_params.cuda = False
    elif train_params.cuda:
        torch.cuda.manual_seed(train_params.seed)

    # make opt
    opt = vars(vocab_params)

    print(constant.LABEL_TO_ID)
    print(opt)
    opt['num_class'] = len(constant.LABEL_TO_ID)
    #     Combine all the parameters together
    opt.update(vars(train_params))

    # load vocab
    vocab_file = opt['vocab_dir'] + '/vocab.pkl'
    vocab = Vocab(vocab_file, load=True)
    opt['vocab_size'] = vocab.size
    emb_file = opt['vocab_dir'] + '/embedding.npy'
    emb_matrix = np.load(emb_file)
    assert emb_matrix.shape[0] == vocab.size
    assert emb_matrix.shape[1] == opt['emb_dim']

    if (model_id == -1):
        model_id = opt['id'] if len(opt['id']) > 1 else '0' + opt['id']
    model_save_dir = opt['save_dir'] + '/' + model_id
    opt['model_save_dir'] = model_save_dir
    helper.ensure_dir(model_save_dir, verbose=True)

    # save config
    helper.save_config(opt, model_save_dir + '/config.json', verbose=True)
    vocab.save(model_save_dir + '/vocab.pkl')
    file_logger = helper.FileLogger(
        model_save_dir + '/' + opt['log'],
        header="# epoch\ttrain_loss\tdev_loss\tdev_f1")

    # print model info
    helper.print_config(opt)

    # model
    model = RelationModel(opt, emb_matrix=emb_matrix)

    id2label = dict([(v, k) for k, v in constant.LABEL_TO_ID.items()])
    dev_f1_history = []
    current_lr = opt['lr']

    global_step = 0
    global_start_time = time.time()
    format_str = '{}: step {}/{} (epoch {}/{}), loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
    max_steps = len(train_batch) * opt['num_epoch']

    # start training
    for epoch in range(1, opt['num_epoch'] + 1):
        train_loss = 0
        for i, batch in enumerate(train_batch):
            start_time = time.time()
            global_step += 1
            loss = model.update(batch)
            train_loss += loss
            if global_step % opt['log_step'] == 0:
                duration = time.time() - start_time
                print(format_str.format(datetime.now(), global_step, max_steps, epoch,\
                        opt['num_epoch'], loss, duration, current_lr))

        # eval on dev
        print("Evaluating on dev set...")
        predictions = []
        dev_loss = 0
        for i, batch in enumerate(dev_batch):
            preds, _, loss = model.predict(batch)
            predictions += preds
            dev_loss += loss
        predictions = [id2label[p] for p in predictions]
        dev_p, dev_r, dev_f1 = scorer.score(dev_batch.gold(), predictions)

        train_loss = train_loss / train_batch.num_examples * opt[
            'batch_size']  # avg loss per batch
        dev_loss = dev_loss / dev_batch.num_examples * opt['batch_size']
        print("epoch {}: train_loss = {:.6f}, dev_loss = {:.6f}, dev_f1 = {:.4f}".format(epoch,\
                train_loss, dev_loss, dev_f1))
        file_logger.log("{}\t{:.6f}\t{:.6f}\t{:.4f}".format(
            epoch, train_loss, dev_loss, dev_f1))

        # save
        model_file = model_save_dir + '/checkpoint_epoch_{}.pt'.format(epoch)
        model.save(model_file, epoch)
        if epoch == 1 or dev_f1 > max(dev_f1_history):
            copyfile(model_file, model_save_dir + '/best_model.pt')
            print("new best model saved.")
        if epoch % opt['save_epoch'] != 0:
            os.remove(model_file)

        # lr schedule
        if len(dev_f1_history) > 10 and dev_f1 <= dev_f1_history[-1] and \
                opt['optim'] in ['sgd', 'adagrad']:
            current_lr *= opt['lr_decay']
            model.update_lr(current_lr)

        dev_f1_history += [dev_f1]
        print("")

    print("Training ended with {} epochs.".format(epoch))
Exemplo n.º 10
0
predictions = []
# all_probs = []
data_visual = []

with torch.no_grad():
    for i, b in enumerate(batch):

        batch_data = batch.data[i]
        batch_size = len(batch_data)
        batch_data = list(zip(*batch_data))
        assert len(batch_data) == 10
        tokens = batch_data[0]
        labels = batch_data[9]

        preds, probs, loss, weights = model.predict(b)

        for j in range(batch_size):
            text = [vocab.id2word[idx] for idx in tokens[j]]
            label = labels[j]
            prediction = preds[j]
            posterior = probs[j]
            attention = weights[j].tolist()
            jason_dict = {
                'text': text,
                'label': label,
                'prediction': prediction,
                'posterior': posterior,
                'attention': attention,
                'id': 'sample{}_{}'.format(i, j)
            }