Example #1
0
    def __init__(self, doc2vec, dataset):
        """
        :param doc2vec: either a Doc2Vec model or the path to a saved Doc2Vec model
        :param dataset: the training set
        :return: the trained model
        """

        assert isinstance(doc2vec, Doc2Vec) or isinstance(doc2vec, str)

        if isinstance(doc2vec, str):
            doc2vec = Doc2Vec.load(doc2vec)

        self.doc2vec = doc2vec

        set, _, _ = load_dataset(dataset)

        y = set.data['y_original']
        X = set.data['essay'].apply(
            lambda doc: pd.Series(doc2vec.infer_vector(doc)))

        print('Training Set built')

        self.svr = SVR(C=1.0, epsilon=0.2)
        self.svr.fit(X, y)

        print('SVM trained')
Example #2
0
def main(dataset):
    
    dataset_path = os.path.join(ROOT, "data", dataset)
    svr_model_path = os.path.join(SVM_DIR, dataset)
    
    with open(svr_model_path, 'rb') as file:
        svr = pickle.load(file)
    
    trainset, _, testset = load_dataset(dataset_path)

    print('####DATASET####')
    
    print(dataset)
    
    print('#########TRAINING#########')
    
    evaluate(svr, trainset)
    
    print('#########TESTING#########')
    
    evaluate(svr, testset)
Example #3
0
def train_doc2vec(dataset, epochs=EPOCHS, size=SIZE, alpha=ALPHA, window=WINDOW, outfile=None, workers=WORKERS):
    """
    :param dataset: path to the dataset
    :param epochs: number of epochs to train the Doc2Vec model
    :param size: size of the embeddings for Doc2Vec
    :param alpha: learning rate for Doc2Vec
    :param window: size of window for Doc2Vec
    :param outfile: output file where to store the model (default None = model is just returned but not saved)
    :param workers: number of parallel processes to run
    :return: the trained model
    """
    
    if outfile is not None:
        outdir = os.path.dirname(outfile)
        os.makedirs(outdir, exist_ok=True)
    
    set, _, _ = load_dataset(dataset)
    
    documents = [TaggedDocument(set[i].essay, [i]) for i in range(len(set))]
    
    # dm = 1 for 'Distributed Memory' (PV-DM); dm=0 uses 'Distributed Bag-of-Words' which does not preserve words order
    model = Doc2Vec(vector_size=size, dm=1, alpha=alpha, window=window, min_count=1, workers=workers, seed=42)

    model.build_vocab(documents)

    for epoch in range(epochs):
        print('iteration {0}'.format(epoch))
        model.train(documents,
                    total_examples=model.corpus_count,
                    epochs=model.iter)
        # decrease the learning rate
        model.alpha -= 0.0002
        # fix the learning rate, no decay
        model.min_alpha = model.alpha
    
    if outfile is not None:
        model.save(outfile)
    
    return model
Example #4
0
def train(config):

    cuda_enabled = False
    if 'cuda' in config.device:
        cuda_enabled = True

    print("Starting Training...")

    training_data, validation_data, test_data = load_dataset(config.dataset)

    training_dataloader = DataLoader(training_data, batch_size=config.batch_size,
                                     shuffle=True, collate_fn=elmo_collate)

    validation_dataloader = DataLoader(validation_data, batch_size=VALIDATION_BATCHSIZE, shuffle=False, pin_memory=True,
                            collate_fn=elmo_collate)
    test_dataloader = DataLoader(test_data, batch_size=VALIDATION_BATCHSIZE, shuffle=False, pin_memory=True,
                                 collate_fn=elmo_collate)
    
    print("Training data loaded...")

    model = EmbeddingGRU(
        input_size=256,
        hidden_size=config.rnn_cell_dim,
        n_layers=config.num_rnn_layers,
        dropout=0.1,
        device=config.device,
    )
    

    writer = tensorboardX.SummaryWriter(os.path.join(os.path.dirname(__file__), f"runs/ELMORUN{time.time()}"))

    outdir = os.path.join(OUTPUT_DIR, config.name)

    os.makedirs(outdir, exist_ok=True)

    outfile_metrics = os.path.join(outdir, "metrics.pickle")
    outfile_metrics_valid = os.path.join(outdir, "metrics_valid.csv")
    outfile_metrics_train = os.path.join(outdir, "metrics_train.csv")
    outfile_metrics_test = os.path.join(outdir, "metrics_test.csv")

    criterion = torch.nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)

    if cuda_enabled:
        model.cuda(device=config.device)
        criterion.cuda(device=config.device)

    metrics = {
        "train": {
            "rmse": [],
            "pearson": [],
            "spearman": [],
            "kappa": [],
        },
        "valid": {
            "rmse": [],
            "pearson": [],
            "spearman": [],
            "kappa": [],
        },
        "test": {
            "rmse": [],
            "pearson": [],
            "spearman": [],
            "kappa": [],
        }
    }

    for e in range(config.num_epochs):
        print(f"Starting epoch {e}")

        loss, pearson, spearman, kappa, aloss, apearson, aspearman = run_epoch(config, criterion, e, model, optimizer, training_dataloader, writer)
        print('| Train Loss: {:.5f} |  Pearson: {:.5f} |  Spearman: {:.5f} |  Kappa: {:.5f} |'.format(
            loss, pearson, spearman, kappa))
        print('| Denor Loss: {:.5f} |  Pearson: {:.5f} |  Spearman: {:.5f} |\n'.format(aloss, apearson, aspearman))
        metrics["train"] = update_metrics(metrics["train"], loss, pearson, spearman, kappa)
        print("Got here")
        update_writer(writer, e, loss, pearson, spearman, kappa, is_eval=False)
        print("Got here")
        update_csv(outfile_metrics_train, e, loss, pearson, spearman, kappa)
        print("Got here")

        loss, pearson, spearman, kappa, aloss, apearson, aspearman = run_epoch(config, criterion, e, model, optimizer, validation_dataloader, writer, is_training=False)
        print('| Valid Loss: {:.5f} |  Pearson: {:.5f} |  Spearman: {:.5f} |  Kappa: {:.5f} |'.format(
            loss, pearson, spearman, kappa))
        print('| Denor Loss: {:.5f} |  Pearson: {:.5f} |  Spearman: {:.5f} |\n'.format(aloss, apearson, aspearman))
        metrics["valid"] = update_metrics(metrics["valid"], loss, pearson, spearman, kappa)
        print("Got here")

        update_writer(writer, e, loss, pearson, spearman, kappa, is_eval=True)
        print("Got here")
        update_csv(outfile_metrics_valid, e, loss, pearson, spearman, kappa)
        print("Got here")

        update_saved_model(metrics, model, optimizer, e, outdir)
        print("Got here")
        update_metrics_pickle(metrics, outfile_metrics)
        print("Got here")


    print("Finished training")
    print("Evaluating on test set")
    saved_model_dir = os.path.join(OUTPUT_DIR, "ELMO")
    saved_model_path = os.path.join(saved_model_dir, "checkpoint_rmse.pth.tar")
    load_saved_model(saved_model_path, model, config.device)
    loss, pearson, spearman, kappa, aloss, apearson, aspearman = run_epoch(config, criterion, 1, model, optimizer, test_dataloader, writer, is_training=False)
    print('| Test Loss: {:.5f} |  Pearson: {:.5f} |  Spearman: {:.5f} |  Kappa: {:.5f} |'.format(
            loss, pearson, spearman, kappa))
    print('| Denor Loss: {:.5f} |  Pearson: {:.5f} |  Spearman: {:.5f} |\n'.format(aloss, apearson, aspearman))
    update_csv(outfile_metrics_test, 1, loss, pearson, spearman, kappa)
Example #5
0
def main(dataset, epochs, lr, batchsize, context_size, error_rate, alpha, **kwargs):
    
    assert 0. <= alpha <= 1.
    assert error_rate > 0 and isinstance(error_rate, int)
    assert context_size > 1 and isinstance(context_size, int)
    
    outdir = os.path.join(OUTPUT_DIR, dataset)
    
    os.makedirs(outdir, exist_ok=True)

    outfile = os.path.join(outdir, OUTFILE_NAME)
    
    training, _, _ = load_dataset(os.path.join(DATASET_DIR, dataset))
    
    training = ContextDateset(training, context_size)
    
    vocab_len = len(training.dict)
    
    model = SSWEModel(vocab_len=vocab_len, context_size=context_size, **kwargs)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)

    def collate(batch):
        
        X = torch.stack([torch.LongTensor(b.ngram).reshape(-1) for b in batch])
        y = torch.tensor([b.y for b in batch]).reshape(-1)
        
        # add a dimension between the batch dim and the sequence dim for the noisy versions of each sequence (i.e. with
        # the word in the middle changed to a random word
    
        X = X.unsqueeze(1).expand(-1, error_rate + 1, -1).contiguous()
        
        # find the position of the word in the middle of the sequence
        t = context_size // 2
        
        # change the word in the middle of the sequences to a random word, for each batch and each copy of the original
        # sequence (notice that the first copy is left unchanged)
        
        X[:, 1:, t] = torch.randint(0, vocab_len-1, size=(X.shape[0], error_rate))
        
        return X, y
    
    training = DataLoader(training, batch_size=batchsize, shuffle=True, pin_memory=True, collate_fn=collate)

    score_loss = MSELoss()
    
    optimizer = Adam(model.parameters(), lr)
    # optimizer = SGD(model.parameters(), lr)
    
    model.to(device)

    train_losses = []

    for e in range(epochs):

        train_loss = 0

        print('###############################################')
        print('Starting epoch {}'.format(e + 1))
        
        model.train()
    
        for i_batch, batch in enumerate(training):
            if i_batch % 100 == 0:
                    print("{}/{}".format(i_batch, len(training)))
            
            x, t = batch            
            x = x.to(device)
            t = t.to(device)
        
            model.zero_grad()
        
            fc, fs = model(x)
                        
            # the loss for the score prediction applies only to the original sequence (i.e. the first one)
            score_l = score_loss(fs[:, 0], t)
            
            # the loss for the context
            score_c = torch.clamp(1 - fc[:, 0].view(-1, 1) + fc[:, 1:], min=0).mean()
            
            # the final loss is the weighted sum of these 2 losses
            l = (1 - alpha) * score_l + alpha * score_c

            l.backward()
            optimizer.step()
        
            train_loss += l.item() * x.shape[1]
            
            if i_batch % 10 == 0:
                print('| Training loss at {}: {} |'.format(i_batch, (train_loss/(i_batch+1))))
                
            
        train_loss /= len(training)
        train_losses.append(train_loss)
    
        print('| Training loss: {} |'.format(train_loss))
        
        print("Saving Model checkpoint")
        save_model_checkpoint({                
                'state_dict': model.state_dict(),              
            } , outfile)
            
        print('|\tModel Saved!')
Example #6
0
from dl4nlt.datasets import load_dataset

################################################################

# SET THIS PARAMETER
EXP_NAME = '(LSTM;sswe;local_mispelled;0.0001;128;0.4;1;1)'

DATASET = 'local_mispelled'

MODEL_PATH = os.path.join(ROOT, "models/lstm/saved_data", DATASET, EXP_NAME, "model")

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


training, _, _ = load_dataset(os.path.join(ROOT, 'data', DATASET))

checkpoint = torch.load(MODEL_PATH, map_location=device)

model = CustomLSTM(device=device, **checkpoint['params'])
model.load_state_dict(checkpoint['state_dict'])

embeddings = model.encoder



# words = ['computer', 'people', 'laptop']
# words = ['computer', '_computer', 'laptop', '_laptop']
# words = ['girl', '_girl', 'woman', '_woman', 'man', '_man']
words = ['student', 'teacher', '_student', '_teacher']
Example #7
0
def train(name, dataset, epochs, lr, batchsize, **kwargs):
    def run_epoch(data, epoch, is_eval=False):
        if is_eval:
            model.eval()
        else:
            model.train()

        all_predictions = []
        all_targets = []

        all_predictions_denorm = []
        all_targets_denorm = []

        for i_batch, batch in enumerate(data):
            x, s, t, l = batch
            x = x.to(device)
            t = t.to(device)

            model.zero_grad()

            y = model(x, l)
            this_loss = torch.sqrt(criterion(y, t))

            if not is_eval:
                this_loss.backward()
                optimizer.step()
                writer.add_scalar('Iteration training loss', this_loss.item(),
                                  epoch * len(data) + i_batch)

            y_denorm = denormalize_vec(s, y.detach(), device)
            t_denorm = denormalize_vec(s, t.detach(), device)
            this_kappa = cohen_kappa_score(t_denorm,
                                           y_denorm,
                                           labels=list(range(0, 61)),
                                           weights="quadratic")
            this_pearson, p_value = pearsonr(t.detach(), y.detach())
            this_spearman, p_value = spearmanr(t.detach(), y.detach())

            if not is_eval:
                writer.add_scalar('Iteration training pearson', this_pearson,
                                  epoch * len(data) + i_batch)
                writer.add_scalar('Iteration training spearman', this_spearman,
                                  epoch * len(data) + i_batch)
                writer.add_scalar('Iteration training kappa', this_kappa,
                                  epoch * len(data) + i_batch)

            all_predictions_denorm += y_denorm.tolist()
            all_targets_denorm += t_denorm.tolist()

            all_predictions += y.tolist()
            all_targets += t.tolist()

        epoch_loss = torch.sqrt(
            criterion(torch.FloatTensor(all_predictions),
                      torch.FloatTensor(all_targets))).item()
        epoch_denorm_loss = torch.sqrt(
            criterion(torch.FloatTensor(all_predictions_denorm),
                      torch.FloatTensor(all_targets_denorm))).item()

        epoch_pearson, _ = pearsonr(all_targets, all_predictions)
        epoch_denorm_pearson, _ = pearsonr(all_targets_denorm,
                                           all_predictions_denorm)

        epoch_spearman, _ = spearmanr(all_targets, all_predictions)
        epoch_denorm_spearman, _ = spearmanr(all_targets_denorm,
                                             all_predictions_denorm)

        epoch_kappa = cohen_kappa_score(all_targets_denorm,
                                        all_predictions_denorm,
                                        labels=list(range(0, 61)),
                                        weights="quadratic")

        return epoch_loss, epoch_pearson, epoch_spearman, epoch_kappa, epoch_denorm_loss, epoch_denorm_pearson, epoch_denorm_spearman

    ##############################################
    ## Data, Model and Optimizer initialization ##
    ##############################################

    outdir = os.path.join(OUTPUT_DIR, dataset, name)

    print('Outdir:', outdir)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    outfile_metrics = os.path.join(outdir, "metrics.pickle")
    outfile_metrics_valid = os.path.join(outdir, "metrics_valid.csv")
    outfile_metrics_train = os.path.join(outdir, "metrics_train.csv")
    outfile_model = os.path.join(outdir, "model")

    os.makedirs(outdir, exist_ok=True)

    writer = SummaryWriter(os.path.join('runs', dataset, name))

    training_set, validation_set, testing_set = load_dataset(
        os.path.join(DATASET_DIR, dataset))

    vocab_len = len(training_set.dict)
    training = DataLoader(training_set,
                          batch_size=batchsize,
                          shuffle=True,
                          pin_memory=True,
                          collate_fn=create_collate())
    validation = DataLoader(validation_set,
                            batch_size=VALIDATION_BATCHSIZE,
                            shuffle=False,
                            pin_memory=True,
                            collate_fn=create_collate())
    testing = DataLoader(testing_set,
                         batch_size=VALIDATION_BATCHSIZE,
                         shuffle=False,
                         pin_memory=True,
                         collate_fn=create_collate())

    if kwargs['embeddings'] == 'sswe':
        kwargs['embeddings'] = os.path.join(SSWE_DIR, dataset, SSWE_FILE)

    model = CustomLSTM(vocab_len=vocab_len,
                       device=device,
                       target_vocab_to_idx=training_set.dict.word2idx,
                       **kwargs)
    model.to(device)

    criterion = MSELoss()
    optimizer = Adam(model.parameters(), lr)

    best_model = model.state_dict()

    ####################################
    ############# TRAINING #############
    ####################################
    start_time = time.time()

    print('\n###############################################')
    print('Starting epoch 0 (Random Guessing)')
    loss, pearson, spearman, kappa, aloss, apearson, aspearman = run_epoch(
        validation, 0, is_eval=True)
    print(
        '| Valid Loss: {:.5f} |  Pearson: {:.5f} |  Spearman: {:.5f} |  Kappa: {:.5f} |\n'
        .format(loss, pearson, spearman, kappa))
    print('| Denor Loss: {:.5f} |  Pearson: {:.5f} |  Spearman: {:.5f} |\n'.
          format(aloss, apearson, aspearman))

    metrics = {
        "train": {
            "rmse": [],
            "pearson": [],
            "spearman": [],
            "kappa": [],
        },
        "valid": {
            "rmse": [],
            "pearson": [],
            "spearman": [],
            "kappa": [],
        }
    }

    for e in range(epochs):
        print('###############################################')
        print('Starting epoch {}'.format(e + 1))

        loss, pearson, spearman, kappa, aloss, apearson, aspearman = run_epoch(
            training, e, is_eval=False)
        print(
            '| Train Loss: {:.5f} |  Pearson: {:.5f} |  Spearman: {:.5f} |  Kappa: {:.5f} |'
            .format(loss, pearson, spearman, kappa))
        print(
            '| Denor Loss: {:.5f} |  Pearson: {:.5f} |  Spearman: {:.5f} |\n'.
            format(aloss, apearson, aspearman))
        metrics["train"] = update_metrics(metrics["train"], loss, pearson,
                                          spearman, kappa)
        update_writer(writer, e, loss, pearson, spearman, kappa, is_eval=False)
        update_csv(outfile_metrics_train, e, loss, pearson, spearman, kappa)

        loss, pearson, spearman, kappa, aloss, apearson, aspearman = run_epoch(
            validation, e, is_eval=True)
        print(
            '| Valid Loss: {:.5f} |  Pearson: {:.5f} |  Spearman: {:.5f} |  Kappa: {:.5f} |'
            .format(loss, pearson, spearman, kappa))
        print(
            '| Denor Loss: {:.5f} |  Pearson: {:.5f} |  Spearman: {:.5f} |\n'.
            format(aloss, apearson, aspearman))
        metrics["valid"] = update_metrics(metrics["valid"], loss, pearson,
                                          spearman, kappa)
        update_writer(writer, e, loss, pearson, spearman, kappa, is_eval=True)
        update_csv(outfile_metrics_valid, e, loss, pearson, spearman, kappa)

        # update_saved_model(metrics, model, optimizer, e, outdir)
        update_metrics_pickle(metrics, outfile_metrics)

        if metrics["valid"]["rmse"][-1] == min(metrics["valid"]["rmse"]):
            best_model = model.state_dict()

        print()

    print("Finished training in {:.1f} minutes ".format(
        (time.time() - start_time) / 60))

    print(
        '###################################################################################'
    )
    print('Starting Testing')
    print(
        '###################################################################################'
    )

    model.load_state_dict(best_model)

    loss, pearson, spearman, kappa, aloss, apearson, aspearman = run_epoch(
        testing, e, is_eval=True)
    print(
        '| Test Loss: {:.5f} |  Pearson: {:.5f} |  Spearman: {:.5f} |  Kappa: {:.5f} |'
        .format(loss, pearson, spearman, kappa))
    print('| Denor Loss: {:.5f} |  Pearson: {:.5f} |  Spearman: {:.5f} |\n'.
          format(aloss, apearson, aspearman))

    params = kwargs.copy()
    params.update({
        'vocab_len': vocab_len,
        'target_vocab_to_idx': training_set.dict.word2idx,
    })

    torch.save({'state_dict': best_model, 'params': params}, outfile_model)

    return min(metrics["valid"]["rmse"]), max(
        metrics["valid"]["kappa"]), (loss, pearson, spearman, kappa, aloss,
                                     apearson, aspearman)