コード例 #1
0
def train(training_data, word_to_ix, pretrained_weight):
    tag_to_ix = {"K": 0, "o": 1}
    model = LSTMW2VTagger(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix),
                          len(tag_to_ix), pretrained_weight)
    loss_function = nn.NLLLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.1)

    for epoch in range(EPOCHS):  # 我们要训练300次,可以根据任务量的大小酌情修改次数。
        i = 0
        for sentence, tags in training_data:
            i += 1
            model.zero_grad()
            model.hidden = model.init_hidden()
            sentence_in = lstm.prepare_sequence(sentence, word_to_ix)
            targets = lstm.prepare_sequence(tags, tag_to_ix)
            tag_scores = model(sentence_in)
            loss = loss_function(tag_scores, targets)
            loss.backward()
            optimizer.step()
            if (i % 100 == 0):
                now = datetime.strftime(datetime.now(), "%m-%d %H:%M:%S")
                print(now, "epoch:", epoch, "num:", i)
        path_name = "./model/lstm_w2v" + str(epoch) + ".pkl"
        print(path_name)
        torch.save(model, path_name)
        print("model has been saved")
コード例 #2
0
    def report_accuracy(self,
                        data,
                        word_to_ix,
                        tag_to_ix,
                        ix_to_tag,
                        print_data=False):
        """ Reports accuracy with respect to exact match (all tags correct per sentence)
            and total matches (all correctly classified tags).
        """
        # Here we don't need to train, so the code is wrapped in torch.no_grad()
        with torch.no_grad():
            total = 0
            total_correct = 0
            total_exact_correct = 0
            for sentence, tags in data:
                scores = self(prepare_sequence(sentence, word_to_ix))
                out = torch.argmax(scores, dim=1)
                out_tags = [ix_to_tag[ix] for ix in out]
                targets = prepare_sequence(tags, tag_to_ix)

                correct = 0
                length = len(tags)
                for i in range(length):
                    if out[i] == targets[i]:
                        correct += 1

                total += length
                total_correct += correct

            n = len(data)

            print('Accuracy: %d / %d, %0.4f' %
                  (total_correct, total, total_correct / total))
コード例 #3
0
def main(train_file):
    # Load the data.
    training_data = read_data(train_file)
    n = len(training_data)
    ########
    #print(n)

    # Store word -> word_index mapping.
    word_to_ix = {}
    for sent, tags in training_data:
        for word in sent:
            if word not in word_to_ix:
                word_to_ix[word] = len(word_to_ix)
    ##print(word_to_ix)

    # Store tag -> tag_index mapping.
    tag_to_ix = {tag: ix for ix, tag in enumerate(TAGS)}
    ##################
    #print(tag_to_ix)
    ##################

    # Initialize the model.
    model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix),
                       len(tag_to_ix))
    loss_function = nn.NLLLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.1)

    for epoch in range(EPOCHS):
        for i, (sentence, tags) in enumerate(training_data):
            # Step 1. Pytorch accumulates gradients.
            # We need to clear them out before each instance
            model.zero_grad()

            # Also, we need to clear out the hidden state of the LSTM,
            # detaching it from its history on the last instance.
            model.hidden = model.init_hidden()

            # Step 2. Get our inputs ready for the network, that is, turn them into
            # Tensors of word indices.
            sentence_in = prepare_sequence(sentence, word_to_ix)
            targets = prepare_sequence(tags, tag_to_ix)

            # Step 3. Run our forward pass.
            tag_scores = model(sentence_in)

            # Step 4. Compute the loss, gradients, and update the parameters by
            # calling optimizer.step()
            loss = loss_function(tag_scores, targets)
            loss.backward()
            optimizer.step()

            if i % 90 == 0:

                print('Epoch %d, sentence %d/%d, loss: %0.4f' %
                      (epoch + 1, i + 1, n, loss))

    # Report training accuracy
    model.report_accuracy(training_data, word_to_ix, tag_to_ix, TAGS)
コード例 #4
0
def test_weight(model, data, word_to_ix):
    result = []
    tmp = []
    for i, _ in enumerate(data):
        inputs = lstm.prepare_sequence(data[i][0], word_to_ix)
        tag_scores = model(inputs)
        tmp.append(tag_scores)
        result.append(data[i][0])
    return tmp
コード例 #5
0
def train(model, training_data, word_to_ix, pretrained_weight, start_epoch=0):
    print("load data finished,begin to train")
    tag_to_ix = {"K": 0, "o": 1}
    loss_function = nn.NLLLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.1)
    if start_epoch == 0:
        write_path = "w"
    else:
        write_path = "a"

    f = open("./output/bilstmw2vresult.txt", write_path, encoding="utf-8")
    for epoch in range(start_epoch, EPOCHS):
        i = 0
        for sentence, tags in training_data:
            i += 1
            model.zero_grad()
            model.hidden = model.init_hidden()
            sentence_in = lstm.prepare_sequence(sentence, word_to_ix)
            targets = lstm.prepare_sequence(tags, tag_to_ix)
            tag_scores = model(sentence_in)
            loss = loss_function(tag_scores, targets)
            loss.backward()
            optimizer.step()
            if (i % 100 == 0):
                now = datetime.strftime(datetime.now(), "%m-%d %H:%M:%S")
                print(now, "epoch:", epoch, "num:", i)
        path_name = "./model/bilstm_w2v" + str(epoch) + ".pkl"
        torch.save(model, path_name)

        yp_train = test(model, X_train, word_to_ix)
        yp_test = test(model, X_test, word_to_ix)
        p_t, r_t, f1_t = lstm.calculate(y_train, yp_train)
        p, r, f1 = lstm.calculate(y_test, yp_test)
        train_result = [p_t, r_t, f1_t]
        test_result = [p, r, f1]
        f.write(str(epoch) + "\t")
        f.write(" ".join(map(str, train_result)))
        f.write("\t")
        f.write(" ".join(map(str, test_result)))
        f.write("\n")
        f.flush()
        print("model has been saved")
        sys.stdout.flush()
    f.close()
コード例 #6
0
def test(model, data, word_to_ix):
    result = []
    tmp = []
    for i, _ in enumerate(data):
        inputs = lstm.prepare_sequence(data[i][0], word_to_ix)
        tag_scores = model(inputs)
        tmp.append(tag_scores)
    for item in tmp:
        tmpp = []
        for i, j in item:
            if (i > j):
                tmpp.append("K")
            else:
                tmpp.append("o")
        result.append(tmpp)
    return result