Esempio n. 1
0
def train_procedure(model, config: Config, epoch: int, train_insts: List[Instance], dev_insts: List[Instance], test_insts: List[Instance], devscore=None, testscore=None):
    optimizer = get_optimizer(config, model)
    random.shuffle(train_insts)
    batched_data = batching_list_instances(config, train_insts, is_soft=False, is_naive=True)
    dev_batches = batching_list_instances(config, dev_insts)
    test_batches = batching_list_instances(config, test_insts)

    if devscore is None:
        best_dev = [-1, 0]
    else:
        best_dev = devscore

    if testscore is None:
        best_test = [-1, 0]
    else:
        best_test = testscore

    for i in range(1, epoch + 1):
        epoch_loss = 0
        start_time = time.time()
        model.zero_grad()
        if config.optimizer.lower() == "sgd":
            optimizer = lr_decay(config, optimizer, i)
        for index in tqdm(np.random.permutation(len(batched_data))):
            model.train()
            loss = model(*batched_data[index][0:5], batched_data[index][-3])
            epoch_loss += loss.item()
            loss.backward(retain_graph=True)
            optimizer.step()
            model.zero_grad()

        end_time = time.time()
        print("Epoch %d: %.5f, Time is %.2fs" % (i, epoch_loss, end_time - start_time), flush=True)
        model.eval()
        dev_metrics = evaluate_model(config, model, dev_batches, "dev", dev_insts)
        test_metrics = evaluate_model(config, model, test_batches, "test", test_insts)
        if dev_metrics[2] > best_dev[0]:
            print("saving the best model...")
            best_dev[0] = dev_metrics[2]
            best_dev[1] = i
            best_test[0] = test_metrics[2]
            best_test[1] = i
        model.zero_grad()

    return model, best_dev, best_test
Esempio n. 2
0
def learn_from_insts(config: Config, epoch: int, train_insts, dev_insts,
                     test_insts):
    # train_insts: List[Instance], dev_insts: List[Instance], test_insts: List[Instance], batch_size: int = 1
    model = NNCRF(config)
    optimizer = get_optimizer(config, model)
    train_num = len(train_insts)
    print("number of instances: %d" % (train_num))
    print(colored("[Shuffled] Shuffle the training instance ids", "red"))
    random.shuffle(train_insts)

    batched_data = batching_list_instances(config, train_insts)
    dev_batches = batching_list_instances(config, dev_insts)
    test_batches = batching_list_instances(config, test_insts)

    best_dev = [-1, 0]
    best_test = [-1, 0]

    model_folder = "model_files"
    res_folder = "results"
    model_name = model_folder + "/lstm_{}_crf_{}_{}_dep_{}_elmo_{}_lr_{}.m".format(
        config.hidden_dim,
        config.dataset, config.train_num, config.context_emb.name,
        config.optimizer.lower(), config.learning_rate)
    res_name = res_folder + "/lstm_{}_crf_{}_{}_dep_{}_elmo_{}_lr_{}.results".format(
        config.hidden_dim,
        config.dataset, config.train_num, config.context_emb.name,
        config.optimizer.lower(), config.learning_rate)
    print("[Info] The model will be saved to: %s" % (model_name))
    if not os.path.exists(model_folder):
        os.makedirs(model_folder)
    if not os.path.exists(res_folder):
        os.makedirs(res_folder)

    for i in range(1, epoch + 1):
        epoch_loss = 0
        start_time = time.time()
        model.zero_grad()
        if config.optimizer.lower() == "sgd":
            optimizer = lr_decay(config, optimizer, i)
        for index in np.random.permutation(len(batched_data)):
            # for index in range(len(batched_data)):
            model.train()
            batch_word, batch_wordlen, batch_context_emb, batch_char, batch_charlen, batch_label = batched_data[
                index]
            loss = model.neg_log_obj(batch_word, batch_wordlen,
                                     batch_context_emb, batch_char,
                                     batch_charlen, batch_label)
            epoch_loss += loss.item()
            loss.backward()
            # # torch.nn.utils.clip_grad_norm_(model.parameters(), config.clip) ##clipping the gradient
            optimizer.step()
            model.zero_grad()

        end_time = time.time()
        print("Epoch %d: %.5f, Time is %.2fs" %
              (i, epoch_loss, end_time - start_time),
              flush=True)

        model.eval()
        dev_metrics = evaluate_model(config, model, dev_batches, "dev",
                                     dev_insts)
        test_metrics = evaluate_model(config, model, test_batches, "test",
                                      test_insts)
        if dev_metrics[2] > best_dev[0]:
            print("saving the best model...")
            best_dev[0] = dev_metrics[2]
            best_dev[1] = i
            best_test[0] = test_metrics[2]
            best_test[1] = i
            torch.save(model.state_dict(), model_name)
            write_results(res_name, test_insts)
        model.zero_grad()

    print("The best dev: %.2f" % (best_dev[0]))
    print("The corresponding test: %.2f" % (best_test[0]))
    print("Final testing.")
    model.load_state_dict(torch.load(model_name))
    model.eval()
    evaluate_model(config, model, test_batches, "test", test_insts)
    write_results(res_name, test_insts)
Esempio n. 3
0
def learn_from_insts(config: Config, epoch: int, train_insts, dev_insts,
                     test_insts):
    # train_insts: List[Instance], dev_insts: List[Instance], test_insts: List[Instance], batch_size: int = 1
    model = NNCRF(config)
    optimizer = get_optimizer(config, model)
    train_num = len(train_insts)
    print("number of instances: %d" % (train_num))
    print(colored("[Shuffled] Shuffle the training instance ids", "red"))
    random.shuffle(train_insts)

    batched_data = batching_list_instances(config, train_insts)
    dev_batches = batching_list_instances(config, dev_insts)
    test_batches = batching_list_instances(config, test_insts)

    best_dev = [-1, 0]
    best_test = [-1, 0]

    dep_model_name = config.dep_model.name
    if config.dep_model == DepModelType.dggcn:
        dep_model_name += '(' + str(config.num_gcn_layers) + "," + str(
            config.gcn_dropout) + "," + str(config.gcn_mlp_layers) + ")"
    model_name = "model_files/gcn_{}_hidden_{}_dataset_{}_{}_context_{}.m".format(
        config.num_gcn_layers, config.hidden_dim, config.dataset, config.affix,
        config.context_emb.name)
    res_name = "results/gcn_{}_hidden_{}_dataset_{}_{}_context_{}.results".format(
        config.num_gcn_layers, config.hidden_dim, config.dataset, config.affix,
        config.context_emb.name)
    print(
        "[Info] The model will be saved to: %s, please ensure models folder exist"
        % (model_name))
    if not os.path.exists("model_files"):
        os.makedirs("model_files")
    if not os.path.exists("results"):
        os.makedirs("results")

    for i in range(1, epoch + 1):
        epoch_loss = 0
        start_time = time.time()
        model.zero_grad()
        if config.optimizer.lower() == "sgd":
            optimizer = lr_decay(config, optimizer, i)
        for index in np.random.permutation(len(batched_data)):
            # for index in range(len(batched_data)):
            model.train()
            # optimizer.zero_grad()

            batch_word, batch_wordlen, batch_context_emb, batch_char, batch_charlen, adj_matrixs, adjs_in, adjs_out, graphs, dep_label_adj, batch_dep_heads, trees, batch_label, batch_dep_label, batch_poslabel = batched_data[
                index]
            loss = model.neg_log_obj(batch_word, batch_wordlen,
                                     batch_context_emb, batch_char,
                                     batch_charlen, adj_matrixs, adjs_in,
                                     adjs_out, graphs, dep_label_adj,
                                     batch_dep_heads, batch_label,
                                     batch_dep_label, batch_poslabel, trees)
            epoch_loss += loss.item()
            loss.backward()
            if config.dep_model == DepModelType.dggcn:
                torch.nn.utils.clip_grad_norm_(
                    model.parameters(), config.clip)  ##clipping the gradient
            optimizer.step()
            model.zero_grad()

        end_time = time.time()
        print("Epoch %d: %.5f, Time is %.2fs" %
              (i, epoch_loss, end_time - start_time),
              flush=True)

        if i + 1 >= config.eval_epoch:
            model.eval()
            dev_metrics = evaluate(config, model, dev_batches, "dev",
                                   dev_insts)
            if dev_metrics[2] > best_dev[0]:
                test_metrics = evaluate(config, model, test_batches, "test",
                                        test_insts)
                print("saving the best model...")
                best_dev[0] = dev_metrics[2]
                best_dev[1] = i
                best_test[0] = test_metrics[2]
                best_test[1] = i
                torch.save(model.state_dict(), model_name)
                write_results(res_name, test_insts)
            model.zero_grad()

    print("The best dev: %.2f" % (best_dev[0]))
    print("The corresponding test: %.2f" % (best_test[0]))
    print("Final testing.")
    model.load_state_dict(torch.load(model_name))
    model.eval()
    evaluate(config, model, test_batches, "test", test_insts)
    write_results(res_name, test_insts)
Esempio n. 4
0
def learn_from_insts(config: Config, epoch: int, train_insts):
    # train_insts: List[Instance], dev_insts: List[Instance], test_insts: List[Instance], batch_size: int = 1
    model = SimpleGCN(config)
    optimizer = get_optimizer(config, model)
    train_num = len(train_insts)
    print("number of instances: %d" % (train_num))
    print(colored("[Shuffled] Shuffle the training instance ids", "red"))
    random.shuffle(train_insts)

    batched_data = batching_list_instances(config, train_insts)

    model_folder = config.model_folder
    res_folder = "results"
    model_path = f"model_files/{model_folder}/gnn.pt"
    config_path = f"model_files/{model_folder}/config.conf"
    os.makedirs(f"model_files/{model_folder}",
                exist_ok=True)  ## create model files. not raise error if exist
    os.makedirs(res_folder, exist_ok=True)
    print(
        f"[Info] The model will be saved to the directory: model_files/{model_folder}"
    )
    ignored_index = -100
    loss_fcn = torch.nn.CrossEntropyLoss(
        ignore_index=ignored_index)  ## if the label value is -100, ignore it
    for i in range(1, epoch + 1):
        epoch_loss = 0
        start_time = time.time()
        model.zero_grad()

        if config.optimizer.lower() == "sgd":
            optimizer = lr_decay(config, optimizer, i)
        p = 0
        total = 0
        for index in np.random.permutation(len(batched_data)):
            # for index in range(len(batched_data)):
            model.train()
            batch_word, batch_word_len, batch_context_emb, batch_char, batch_charlen, adj_matrixs, adjs_in, adjs_out, graphs, dep_label_adj, batch_dep_heads, trees, batch_label, batch_dep_label = batched_data[
                index]
            input, output, masked_index = mask_relations(
                batch_dep_label.clone(),
                probability=0.15,
                config=config,
                ignored_index=ignored_index,
                word_seq_len=batch_word_len)
            adj_matrixs = adj_matrixs.to(config.device)
            batch_word = batch_word if config.complete_tree else None
            logits = model(adj_matrixs, input,
                           batch_word)  ## (batch_size, sent_len, score)

            ## calculating the accuracy
            max_index = logits.cpu().detach().numpy().argmax(axis=2)
            max_index[~masked_index] = ignored_index
            batch_size = max_index.shape[0]
            for idx in range(batch_size):
                max_index[idx, batch_word_len[idx]:] = ignored_index
            output_res = output.cpu().detach().numpy()
            p += np.sum(output_res[output_res != ignored_index] == max_index[
                max_index != ignored_index])
            total += len(output_res[output_res != ignored_index])

            # output: shape(batch_size, sent_len)
            loss = loss_fcn(logits.view(-1, len(config.deplabels)),
                            output.view(-1))
            epoch_loss += loss.item()
            loss.backward()
            # if config.dep_model == DepModelType.dggcn:
            #     torch.nn.utils.clip_grad_norm_(model.parameters(), config.clip) ##clipping the gradient
            optimizer.step()
            model.zero_grad()

        end_time = time.time()
        print(
            f"Epoch {i}: {epoch_loss:.5f}, Acc: {p*1.0/total*100:.2f}, Time is {end_time-start_time:.2f}s",
            flush=True)

        if i % config.epoch_k == 0:
            """
            Save the model in every k epoch
            """
            print("[Info] Saving the model...")
            torch.save(model.state_dict(), model_path)
            f = open(config_path, 'wb')
            pickle.dump(config, f)
            f.close()
            with tarfile.open(
                    f"model_files/{model_folder}/{model_folder}.tar.gz",
                    "w:gz") as tar:
                tar.add(f"model_files/{model_folder}",
                        arcname=os.path.basename(model_folder))
            ## draw and see the embeddings
            # tsne_ak_2d = TSNE(perplexity=30, n_components=2, init='pca', n_iter=3500, random_state=32)
            # embeddings = model.dep_emb.weight.detach().numpy()
            # assert len(embeddings[0]) == config.dep_emb_size
            # embeddings = tsne_ak_2d.fit_transform(embeddings)
            # tsne_plot_2d('Relation embedding', embeddings, a= 0.1, words= config.deplabels, file_name=str(i))

    print("Archiving the last Model...")
    with tarfile.open(f"model_files/{model_folder}/{model_folder}.tar.gz",
                      "w:gz") as tar:
        tar.add(f"model_files/{model_folder}",
                arcname=os.path.basename(model_folder))

    print("Finished archiving the models")
Esempio n. 5
0
def learn_from_insts(config:Config, epoch: int, train_insts, dev_insts, test_insts):
    # train_insts: List[Instance], dev_insts: List[Instance], test_insts: List[Instance], batch_size: int = 1
    if config.pretrain_dep:
        model_path = f"model_files/{config.pdep_model}/{config.pdep_model}.tar.gz"
        predictor = Predictor(model_path)
        model = NNCRF(config, pretrained_dep_model=predictor.model)
    else:
        model = NNCRF(config)
    optimizer = get_optimizer(config, model)
    train_num = len(train_insts)
    print("number of instances: %d" % (train_num))
    print(colored("[Shuffled] Shuffle the training instance ids", "red"))
    random.shuffle(train_insts)



    batched_data = batching_list_instances(config, train_insts)
    dev_batches = batching_list_instances(config, dev_insts)
    test_batches = batching_list_instances(config, test_insts)

    best_dev = [-1, 0]
    best_test = [-1, 0]

    dep_model_name = config.dep_model.name
    if config.dep_model == DepModelType.dggcn:
        dep_model_name += '(' + str(config.num_gcn_layers) + "," + str(config.gcn_dropout) + "," + str(
            config.gcn_mlp_layers) + ")"
    model_folder = config.model_folder
    res_folder = "results"
    model_path = f"model_files/{model_folder}/gnn.pt"
    config_path = f"model_files/{model_folder}/config.conf"
    res_path = f"{res_folder}/{model_folder}.res"
    os.makedirs(f"model_files/{model_folder}", exist_ok=True)  ## create model files. not raise error if exist
    os.makedirs(res_folder, exist_ok=True)
    print(f"[Info] The model will be saved to the directory: model_files/{model_folder}")

    for i in range(1, epoch + 1):
        epoch_loss = 0
        start_time = time.time()
        model.zero_grad()
        if config.optimizer.lower() == "sgd":
            optimizer = lr_decay(config, optimizer, i)
        for index in np.random.permutation(len(batched_data)):
        # for index in range(len(batched_data)):
            model.train()
            batch_word, batch_wordlen, batch_context_emb, batch_char, batch_charlen, adj_matrixs, adjs_in, adjs_out, graphs, dep_label_adj, batch_dep_heads, trees, batch_label, batch_dep_label = batched_data[index]
            loss = model.neg_log_obj(batch_word, batch_wordlen, batch_context_emb,batch_char, batch_charlen, adj_matrixs, adjs_in, adjs_out, graphs, dep_label_adj, batch_dep_heads, batch_label, batch_dep_label, trees)
            epoch_loss += loss.item()
            loss.backward()
            if config.dep_model == DepModelType.dggcn:
                torch.nn.utils.clip_grad_norm_(model.parameters(), config.clip) ##clipping the gradient
            optimizer.step()
            model.zero_grad()

        end_time = time.time()
        print("Epoch %d: %.5f, Time is %.2fs" % (i, epoch_loss, end_time - start_time), flush=True)

        if i + 1 >= config.eval_epoch:
            model.eval()
            dev_metrics = evaluate(config, model, dev_batches, "dev", dev_insts)
            test_metrics = evaluate(config, model, test_batches, "test", test_insts)
            if dev_metrics[2] > best_dev[0]:
                print("saving the best model...")
                best_dev[0] = dev_metrics[2]
                best_dev[1] = i
                best_test[0] = test_metrics[2]
                best_test[1] = i
                torch.save(model.state_dict(), model_path)
                write_results(res_path, test_insts)
            model.zero_grad()

    print("Archiving the best Model...")
    with tarfile.open(f"model_files/{model_folder}/{model_folder}.tar.gz", "w:gz") as tar:
        tar.add(f"model_files/{model_folder}", arcname=os.path.basename(model_folder))
    print("Finished archiving the models")

    print("The best dev: %.2f" % (best_dev[0]))
    print("The corresponding test: %.2f" % (best_test[0]))
    print("Final testing.")
    model.load_state_dict(torch.load(model_path))
    model.eval()
    evaluate(config, model, test_batches, "test", test_insts)
    write_results(res_path, test_insts)