コード例 #1
0
def train(encoder,
          train_senetences,
          train_labels,
          w2i,
          iter=1,
          learning_rate=0.001):

    optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)

    for i in range(iter):
        sentence, label = findTrainExample(train_senetences, train_labels)
        sentence_tensor = pp.encodeSentence(sentence, w2i)
        label_tensor = torch.tensor(label, dtype=torch.float32).view(1, 1)

        encoder_hidden = encoder.initHidden()

        input_length = sentence_tensor.size(0)
        for ei in range(input_length):
            output, encoder_hidden = encoder(sentence_tensor[ei],
                                             encoder_hidden)

        loss = torch.abs(output - label_tensor)
        if i % 100 == 0:
            print("loss: ", loss)

        optimizer.zero_grad()

        loss.backward(retain_graph=True)
        optimizer.step()
コード例 #2
0
def batch_train(encoder, train_sentences, train_labels, batch_size, w2i, epochs=5, learning_rate=0.001):
    optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)

    for i in range(1000):
        batch_sentences, batch_labels = findBatch(train_sentences, train_labels, batch_size)
        print('found batch')
        loss = 0
        encoder_hidden = encoder.initHidden()
        for j in range(len(batch_sentences)):
            sentence = batch_sentences[j]
            label = batch_labels[j]
            sentence_tensor = pp.encodeSentence(sentence,w2i)
            label_tensor = torch.tensor(label,dtype=torch.float32).view(1,1)

            input_length = sentence_tensor.size(0)
            for ei in range(input_length):
                output, encoder_hidden = encoder(sentence_tensor[ei],encoder_hidden)

            loss += torch.abs(output - label_tensor)
            #print(loss)
        loss = loss/len(batch_sentences)
        print(loss)

        optimizer.zero_grad()
        loss.backward(retain_graph=True)
        optimizer.step()
コード例 #3
0
def evaluate(encoder, test_sentences, test_labels, w2i):

    accuracy = 0.0
    #print("encoder weight: ",encoder.out.weight)
    for i in range(len(test_sentences)):
        sentence = test_sentences[i]
        label = test_labels[i]
        sentence_tensor = pp.encodeSentence(sentence, w2i)
        label_tensor = torch.tensor(label, dtype=torch.float32).view(1, 1)

        encoder_hidden = encoder.initHidden()
        #encoder_hidden = encoder_hidden.to(device)
        #input_length = sentence_tensor.size(0)
        #for ei in range(input_length):
        sentence_tensor = sentence_tensor.to(device)
        label_tensor = label_tensor.to(device)
        output = encoder(sentence_tensor, encoder_hidden)
        #print("output from encoder: ",output)
        #output = torch.abs(output)
        output = torch.round(output)
        #print("output: ",output)
        #print("label: ",label_tensor)
        if torch.equal(output, label_tensor):
            accuracy += 1

        #print ("accuracy: ",accuracy)

    return accuracy / len(test_sentences)
コード例 #4
0
def batch_train(encoder, train_sentences, train_labels, batch_size, w2i, epochs=1, learning_rate=0.001):
    optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)

    for i in range(10):
        #print("encoder weight: ",encoder.out.weight)
        batch_sentences, batch_labels = findBatch(train_sentences, train_labels, batch_size)
        #print('found batch')
        loss = 0
        #encoder_hidden = encoder.initHidden()
        for j in range(len(batch_sentences)):
            sentence = batch_sentences[j]
            label = batch_labels[j]
            sentence_tensor = pp.encodeSentence(sentence,w2i)
            label_tensor = torch.tensor(label,dtype=torch.float32).view(1,1)
            encoder_hidden = encoder.initHidden()
            #encoder_hidden = encoder_hidden.to(device)
            #input_length = sentence_tensor.size(0)
            #for ei in range(input_length):
            sentence_tensor = sentence_tensor.to(device)
            label_tensor = label_tensor.to(device)
            output = encoder(sentence_tensor,encoder_hidden)

            loss += torch.mul((output - label_tensor),(output - label_tensor))
            #print(loss)
        loss = loss/len(batch_sentences)
        print(loss)

        optimizer.zero_grad()
        loss.backward(retain_graph=True)
        optimizer.step()
コード例 #5
0
def evaluate(encoder, test_sentences, test_labels, w2i):

    accuracy = 0.0

    for i in range(len(test_sentences)):
        sentence = test_sentences[i]
        label = test_labels[i]
        sentence_tensor = pp.encodeSentence(sentence,w2i)
        label_tensor = torch.tensor(label,dtype=torch.float32).view(1,1)

        encoder_hidden = encoder.initHidden()

        input_length = sentence_tensor.size(0)
        for ei in range(input_length):
            output, encoder_hidden = encoder(sentence_tensor[ei],encoder_hidden)

        output = torch.round(output)
        if torch.equal(output,label_tensor):
            accuracy+=1

    return accuracy/len(test_sentences)