Ejemplo n.º 1
0
def run_intent(root: str):
    hparams = HyperParams(root=root,
                          steps_outer=500,
                          steps_inner=50,
                          bs_inner=10)
    loaders = {
        s: IntentEmbedBertMetaLoader(hparams, s)
        for s in ["train", "val"]
    }
    net = LinearClassifier(num_in=loaders[Splits.train].embed_size, hp=hparams)
    system = ReptileSystem(hparams, loaders, net)
    system.run_train()
Ejemplo n.º 2
0
def main(test_input_dir, model_filepath, result_save_dir):
    #Create dataloaders.
    test_dataset = create_dataset(test_input_dir,
                                  num_examples=-1,
                                  num_options=20)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=4,
                                 shuffle=False,
                                 drop_last=True)

    #Create a classifier model.
    logger.info("Load model parameters from {}.".format(model_filepath))
    classifier_model = LinearClassifier.from_pretrained(
        "cl-tohoku/bert-base-japanese-whole-word-masking")
    classifier_model.to(device)

    parameters = None
    if torch.cuda.is_available():
        parameters = torch.load(model_filepath)
    else:
        parameters = torch.load(model_filepath,
                                map_location=torch.device("cpu"))

    classifier_model.load_state_dict(parameters)

    #Create a directory to save the results in.
    os.makedirs(result_save_dir, exist_ok=True)

    logger.info("Start model evaluation.")
    pred_labels, correct_labels, accuracy = evaluate(classifier_model,
                                                     test_dataloader)
    logger.info("Accuracy: {}".format(accuracy))

    #Save results as text files.
    res_filepath = os.path.join(result_save_dir, "result_eval.txt")
    labels_filepath = os.path.join(result_save_dir, "labels_eval.txt")

    with open(res_filepath, "w") as w:
        w.write("Accuracy: {}\n".format(accuracy))

    with open(labels_filepath, "w") as w:
        for pred_label, correct_label in zip(pred_labels, correct_labels):
            w.write("{} {}\n".format(pred_label, correct_label))

    logger.info("Finished model evaluation.")
Ejemplo n.º 3
0
def run_intent(root: str):
    hp = HyperParams(
        root=root,
        bs_inner=1,
        num_shots=5,
        early_stop=True,
        # steps_inner=50,
        steps_inner=1000,
        steps_outer=50,
        # steps_outer=500,
    )
    # loader_class = IntentEmbedBertMetaLoader
    loader_class = IntentEmbedWordMeanMetaLoader
    loaders = {s: loader_class(hp, s) for s in Splits.get_all()}
    load = loaders[Splits.train]
    net = LinearClassifier(num_in=load.embed_size, hp=hp)
    # net = LSTMClassifier(num_in=load.embed_size, hp=hp)
    system = ReptileSystem(hp, loaders, net)
    system.run_train()
Ejemplo n.º 4
0
def main(test_input_dir, bert_model_dir, parameters_dir, test_upper_bound,
         result_save_dir):
    #Create dataloaders.
    test_dataset = create_dataset(test_input_dir,
                                  num_examples=-1,
                                  num_options=20)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=4,
                                 shuffle=False,
                                 drop_last=True)

    #Create a classifier model.
    logger.info("Create a classifier model from {}.".format(bert_model_dir))

    classifier_model = None
    if bert_model_dir == "USE_DEFAULT":
        classifier_model = LinearClassifier.from_pretrained(
            "cl-tohoku/bert-base-japanese-whole-word-masking")
    else:
        config_filepath = os.path.join(bert_model_dir, "bert_config.json")
        bert_model_filepath = os.path.join(bert_model_dir, "pytorch_model.bin")

        bert_config = BertConfig.from_pretrained(bert_model_dir)
        #logger.info(bert_config)
        classifier_model = LinearClassifier.from_pretrained(
            bert_model_filepath, config=bert_config)

    classifier_model.to(device)

    logger.info("Start model evaluation.")
    for i in range(test_upper_bound):
        parameters = None
        parameters_filepath = os.path.join(parameters_dir,
                                           "checkpoint_{}.pt".format(i + 1))

        logger.info(
            "Load model parameters from {}.".format(parameters_filepath))

        if torch.cuda.is_available():
            parameters = torch.load(parameters_filepath)
        else:
            parameters = torch.load(parameters_filepath,
                                    map_location=torch.device("cpu"))

        classifier_model.load_state_dict(parameters)

        #Create a directory to save the results in.
        os.makedirs(result_save_dir, exist_ok=True)

        pred_labels, correct_labels, accuracy = evaluate(
            classifier_model, test_dataloader)
        logger.info("Accuracy: {}".format(accuracy))

        #Save results as text files.
        res_filepath = os.path.join(result_save_dir,
                                    "result_test_{}.txt".format(i + 1))
        labels_filepath = os.path.join(result_save_dir,
                                       "labels_test_{}.txt".format(i + 1))

        with open(res_filepath, "w") as w:
            w.write("Accuracy: {}\n".format(accuracy))

        with open(labels_filepath, "w") as w:
            for pred_label, correct_label in zip(pred_labels, correct_labels):
                w.write("{} {}\n".format(pred_label, correct_label))

    logger.info("Finished model evaluation.")
Ejemplo n.º 5
0
     if constant.policy_model != '':
         seq2seq = load_model(
             Seq2Seq(encoder=encoder,
                     decoder=decoder,
                     vocab=train_dataset.lang), constant.policy_model)
         model.encoder = deepcopy(seq2seq.encoder)
         model.decoder = deepcopy(seq2seq.decoder)
         if constant.bi == 'bi':
             model.reduce_state = deepcopy(seq2seq.reduce_state)
         del seq2seq
     train_fn = train_multitask
 elif constant.task == 'emotion':
     encoder = RNNEncoder(V=V, D=D, H=H, L=1, embedding=embedding)
     if constant.bi == 'bi':
         H = H * 2
     model = LinearClassifier(encoder=encoder, enc_type='rnn', H=H, C=C)
     train_fn = train_emotion
 elif constant.task == 'sentiment':
     if constant.use_bert:
         encoder = BertModel.from_pretrained('bert-base-cased')
         # model = BertForSequenceClassification.from_pretrained('bert-base-cased', num_labels=1)
         model = BinaryClassifier(encoder=encoder, enc_type='bert', H=H)
     else:
         encoder = RNNEncoder(V=V, D=D, H=H, L=1, embedding=embedding)
         if constant.bi == 'bi':
             H = H * 2
         model = BinaryClassifier(encoder=encoder, enc_type='rnn', H=H)
     train_fn = train_sentiment
 elif constant.task == 'seq2seq':
     encoder = RNNEncoder(V=V, D=D, H=H, L=1, embedding=embedding)
     decoder = RNNDecoder(V=V, D=D, H=H, L=1, embedding=embedding)
def main(batch_size, num_epochs, lr, bert_model_dir_1,
         finetuned_model_filepath_1, train_input_dir_1, dev1_input_dir_1,
         bert_model_dir_2, finetuned_model_filepath_2, train_input_dir_2,
         dev1_input_dir_2, result_save_dir):
    logger.info("batch_size: {} num_epochs: {} lr: {}".format(
        batch_size, num_epochs, lr))

    #Create dataloaders.
    logger.info("Create train dataloader from {}.".format(train_input_dir_1))
    train_dataset_1 = create_dataset(train_input_dir_1,
                                     num_examples=-1,
                                     num_options=20)
    train_sampler_1 = RandomSampler(train_dataset_1).set_seed(SEED)
    train_dataloader_1 = DataLoader(train_dataset_1,
                                    batch_size=batch_size,
                                    sampler=train_sampler_1,
                                    drop_last=True)

    logger.info("Create train dataloader from {}.".format(train_input_dir_2))
    train_dataset_2 = create_dataset(train_input_dir_2,
                                     num_examples=-1,
                                     num_options=20)
    train_sampler_2 = RandomSampler(train_dataset_2).set_seed(SEED)
    train_dataloader_2 = DataLoader(train_dataset_2,
                                    batch_size=batch_size,
                                    sampler=train_sampler_2,
                                    drop_last=True)

    logger.info("Create dev1 dataloader from {}.".format(dev1_input_dir_1))
    dev1_dataset_1 = create_dataset(dev1_input_dir_1,
                                    num_examples=-1,
                                    num_options=20)
    dev1_dataloader_1 = DataLoader(dev1_dataset_1,
                                   batch_size=4,
                                   shuffle=False,
                                   drop_last=True)

    logger.info("Create dev1 dataloader from {}.".format(dev1_input_dir_2))
    dev1_dataset_2 = create_dataset(dev1_input_dir_2,
                                    num_examples=-1,
                                    num_options=20)
    dev1_dataloader_2 = DataLoader(dev1_dataset_2,
                                   batch_size=4,
                                   shuffle=False,
                                   drop_last=True)

    #Create a classifier model.
    classifier_model = DoubleLinearClassifier(20)
    classifier_model.to(device)

    lc_model_1 = None
    if bert_model_dir_1 == "USE_DEFAULT":
        lc_model_1 = LinearClassifier.from_pretrained(
            "cl-tohoku/bert-base-japanese-whole-word-masking")
    else:
        config_filepath = os.path.join(bert_model_dir_1, "bert_config.json")
        bert_model_filepath = os.path.join(bert_model_dir_1,
                                           "pytorch_model.bin")

        bert_config = BertConfig.from_pretrained(bert_model_dir_1)
        logger.info(bert_config)
        lc_model_1 = LinearClassifier.from_pretrained(bert_model_filepath,
                                                      config=bert_config)

    lc_model_2 = None
    if bert_model_dir_2 == "USE_DEFAULT":
        lc_model_2 = LinearClassifier.from_pretrained(
            "cl-tohoku/bert-base-japanese-whole-word-masking")
    else:
        config_filepath = os.path.join(bert_model_dir_2, "bert_config.json")
        bert_model_filepath = os.path.join(bert_model_dir_2,
                                           "pytorch_model.bin")

        bert_config = BertConfig.from_pretrained(bert_model_dir_2)
        logger.info(bert_config)
        lc_model_2 = LinearClassifier.from_pretrained(bert_model_filepath,
                                                      config=bert_config)

    logger.info("Load model parameters from {} and {}.".format(
        finetuned_model_filepath_1, finetuned_model_filepath_2))
    lc_model_1.load_state_dict(
        torch.load(finetuned_model_filepath_1, map_location=device))
    lc_model_2.load_state_dict(
        torch.load(finetuned_model_filepath_2, map_location=device))

    lc_model_1.to(device)
    lc_model_2.to(device)

    #Create an optimizer.
    optimizer = optim.AdamW(classifier_model.parameters(), lr=lr)

    #Create a directory to save the results in.
    os.makedirs(result_save_dir, exist_ok=True)

    logger.info("Start model training.")
    for epoch in range(num_epochs):
        logger.info("===== Epoch {}/{} =====".format(epoch + 1, num_epochs))

        mean_loss = train(classifier_model, lc_model_1, lc_model_2, optimizer,
                          train_dataloader_1, train_dataloader_2)
        logger.info("Mean loss: {}".format(mean_loss))

        #Save model parameters.
        checkpoint_filepath = os.path.join(
            result_save_dir, "checkpoint_{}.pt".format(epoch + 1))
        torch.save(classifier_model.state_dict(), checkpoint_filepath)

        pred_labels, correct_labels, accuracy = evaluate(
            classifier_model, lc_model_1, lc_model_2, dev1_dataloader_1,
            dev1_dataloader_2)
        logger.info("Accuracy: {}".format(accuracy))

        #Save results as text files.
        res_filepath = os.path.join(result_save_dir,
                                    "result_eval_{}.txt".format(epoch + 1))
        labels_filepath = os.path.join(result_save_dir,
                                       "labels_eval_{}.txt".format(epoch + 1))

        with open(res_filepath, "w") as w:
            w.write("Accuracy: {}\n".format(accuracy))

        with open(labels_filepath, "w") as w:
            for pred_label, correct_label in zip(pred_labels, correct_labels):
                w.write("{} {}\n".format(pred_label, correct_label))

    logger.info("Finished model training.")
Ejemplo n.º 7
0
import os
from datetime import datetime
import torch
import torch.optim as optim
from torchvision import datasets
from tools import Parser, data_transformer_with_segmentation
from tools.visualisation import show_images
from models import bounding_box
from models import simple_cnn, alexnet, resnet101_wo_softmax, LinearClassifier

import matplotlib.pyplot as plt

embedding_size = 100

model, input_size = resnet101_wo_softmax(embedding_size)
classifier = LinearClassifier(2 * embedding_size)

# Training settings
args = Parser().parse()
use_cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)

# Create experiment folder
if not os.path.isdir(args.experiment):
    os.makedirs(args.experiment)

path_to_images = os.path.abspath(
    os.path.join(os.curdir, 'bird_dataset', 'train_images'))

# Data initialization and loading
data_transforms = data_transformer_with_segmentation(
Ejemplo n.º 8
0
def main(batch_size, num_epochs, lr, bert_model_dir, train_input_dir,
         dev1_input_dir, result_save_dir):
    logger.info("batch_size: {} num_epochs: {} lr: {}".format(
        batch_size, num_epochs, lr))

    #Create dataloaders.
    logger.info("Create train dataloader from {}.".format(train_input_dir))
    train_dataset = create_dataset(train_input_dir,
                                   num_examples=-1,
                                   num_options=4)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  drop_last=True)

    logger.info("Create dev1 dataloader from {}.".format(dev1_input_dir))
    dev1_dataset = create_dataset(dev1_input_dir,
                                  num_examples=-1,
                                  num_options=20)
    dev1_dataloader = DataLoader(dev1_dataset,
                                 batch_size=4,
                                 shuffle=False,
                                 drop_last=True)

    #Create a classifier model.
    logger.info("Create a classifier model from {}.".format(bert_model_dir))

    classifier_model = None
    if bert_model_dir == "USE_DEFAULT":
        classifier_model = LinearClassifier.from_pretrained(
            "cl-tohoku/bert-base-japanese-whole-word-masking")
    else:
        config_filepath = os.path.join(bert_model_dir, "bert_config.json")
        bert_model_filepath = os.path.join(bert_model_dir, "pytorch_model.bin")

        bert_config = BertConfig.from_pretrained(bert_model_dir)
        logger.info(bert_config)
        classifier_model = LinearClassifier.from_pretrained(
            bert_model_filepath, config=bert_config)

    classifier_model.to(device)

    #Create an optimizer and a scheduler.
    optimizer = AdamW(classifier_model.parameters(), lr=lr)
    total_steps = len(train_dataloader) * num_epochs
    scheduler = get_linear_schedule_with_warmup(optimizer,
                                                num_warmup_steps=0,
                                                num_training_steps=total_steps)

    #Create a directory to save the results in.
    os.makedirs(result_save_dir, exist_ok=True)

    logger.info("Start model training.")
    for epoch in range(num_epochs):
        logger.info("===== Epoch {}/{} =====".format(epoch + 1, num_epochs))

        mean_loss = train(classifier_model, optimizer, scheduler,
                          train_dataloader)
        logger.info("Mean loss: {}".format(mean_loss))

        #Save model parameters.
        checkpoint_filepath = os.path.join(
            result_save_dir, "checkpoint_{}.pt".format(epoch + 1))
        torch.save(classifier_model.state_dict(), checkpoint_filepath)

        pred_labels, correct_labels, accuracy = evaluate(
            classifier_model, dev1_dataloader)
        logger.info("Accuracy: {}".format(accuracy))

        #Save results as text files.
        res_filepath = os.path.join(result_save_dir,
                                    "result_eval_{}.txt".format(epoch + 1))
        labels_filepath = os.path.join(result_save_dir,
                                       "labels_eval_{}.txt".format(epoch + 1))

        with open(res_filepath, "w") as w:
            w.write("Accuracy: {}\n".format(accuracy))

        with open(labels_filepath, "w") as w:
            for pred_label, correct_label in zip(pred_labels, correct_labels):
                w.write("{} {}\n".format(pred_label, correct_label))

    logger.info("Finished model training.")