Esempio n. 1
0
 def __init__(self):
     super(critic, self).__init__()
     self.target_pred = LSTMClassifier(batch_size, output_size, hidden_size,
                                       vocab_size, embedding_length,
                                       word_embeddings)
     self.active_pred = LSTMClassifier(batch_size, output_size, hidden_size,
                                       vocab_size, embedding_length,
                                       word_embeddings)
Esempio n. 2
0
def get_gen_score(batch_data, TEXT, vocab_size, word_embeddings):
    if TEXT == None:
        return 0
    LABEL = data.LabelField(tensor_type=torch.FloatTensor)
    model = LSTMClassifier(batch_size, output_size, hidden_size, vocab_size,
                           embedding_length, word_embeddings, conv_hidden, 0.1)
    state_dict = torch.load(save_path)
    model.load_state_dict(state_dict)

    test_datafields = [("headline", TEXT), ("comment", LABEL), ("share", None)]
    """
    with open('temp.tsv', 'w') as f:
        f.write("headline\tcomment\tshare\n")
    batch = ""
    for i in range(len(batch_data)):
        sentence = ""
        for j in batch_data[i][0]:
            token =  j+ " "
            sentence += token
        try:
            temp = sentence + '\t1\t1\n'
        except:
            temp = " \t1\t1\n"
        
        batch += temp
    with open('temp.tsv', 'a') as f:
        f.write(batch)
    test_data = data.TabularDataset(path="temp.tsv", format='tsv', skip_header=True, fields=test_datafields)
    """
    examples = [None] * len(batch_data)
    for i in range(len(batch_data)):
        sentence = ""
        if batch_data[i]:  # 若data不為空
            for j in batch_data[i]:
                token = j + " "
                sentence += token
            temp = [sentence, 1, 1]
        else:
            temp = [" ", 1, 1]
            print("[info] empty sentence for classifer")
        example = data.Example.fromlist(temp, test_datafields)
        examples[i] = example
    test_data = data.Dataset(examples, fields=test_datafields)

    LABEL.build_vocab(test_data)
    test_iter = data.BucketIterator(test_data,
                                    batch_size=len(test_data),
                                    sort_key=lambda x: len(x.headline),
                                    repeat=False,
                                    shuffle=True)
    gen_score = test_model(model, test_iter)
    gen_score = torch.softmax(gen_score, dim=1)
    return gen_score
def main(args):
    batch_size = 32
    output_size = 2
    hidden_size = 256
    embedding_length = 300

    tokenize = lambda x: x.split()
    TEXT = data.Field(sequential=True,
                      tokenize=tokenize,
                      lower=True,
                      include_lengths=True,
                      batch_first=True,
                      fix_length=50)
    LABEL = data.LabelField(tensor_type=torch.FloatTensor)
    train_data = data.TabularDataset(path=args.train_data_tsv_file,
                                     format='tsv',
                                     fields=[('text', TEXT), ('label', LABEL)],
                                     skip_header=True)
    TEXT.build_vocab(train_data, vectors=GloVe('840B', 300))
    LABEL.build_vocab(train_data)
    word_embeddings = TEXT.vocab.vectors
    vocab_size = len(TEXT.vocab)

    model = LSTMClassifier(batch_size, output_size, hidden_size, vocab_size,
                           embedding_length, word_embeddings)
    model.load_state_dict(torch.load(args.saved_model_path))
    model.cuda()
    model.eval()
    for segments_pkl in os.listdir(args.transcript_segments_folder):
        print(segments_pkl)
        all_segments = pickle.load(
            open(os.path.join(args.transcript_segments_folder, segments_pkl),
                 'rb'))
        readable_output_file = open(
            os.path.join(args.output_transcript_segments_folder,
                         os.path.splitext(segments_pkl)[0] + '.tsv'), 'w')
        for video_id, segments in all_segments.items():
            for i in range(len(segments)):
                sentence = word_tokenize(segments[i]['transcript'].lower())
                test_sent = [[TEXT.vocab.stoi[x] for x in sentence]]
                test_sent = np.asarray(test_sent)
                test_sent = torch.LongTensor(test_sent)
                test_tensor = Variable(test_sent, volatile=True).cuda()
                output = model(test_tensor, 1)
                out = F.softmax(output, 1)
                if (torch.argmax(out[0]) == 1):
                    pred_label = 0
                else:
                    pred_label = 1
                segments[i]['is_background'] = pred_label
                all_segments[video_id][i] = segments[i]
                readable_output_file.write('%s\t%d\n' %
                                           (' '.join(sentence), pred_label))
        pickle.dump(
            all_segments,
            open(
                os.path.join(args.output_transcript_segments_folder,
                             segments_pkl), 'wb'))
def objective(batch_size, hidden_size, learning_rate):
    batch_size = int(batch_size)
    hidden_size = int(hidden_size)
    TEXT, vocab_size, word_embeddings, train_iter, valid_iter, test_iter = load_patents.load_dataset(
        batch_size, cache_data=False)
    output_size = 2
    embedding_length = 300
    weights = word_embeddings
    model = LSTMClassifier(batch_size, output_size, hidden_size, vocab_size,
                           embedding_length, word_embeddings)
    #model = AttentionModel(batch_size, output_size, hidden_size, vocab_size, embedding_length, weights)
    #model = RNN(batch_size, output_size, hidden_size, vocab_size, embedding_length, weights)
    #model = RCNN(batch_size, output_size, hidden_size, vocab_size, embedding_length, weights)
    #model = SelfAttention(batch_size, output_size, hidden_size, vocab_size, embedding_length, weights)
    loss_fn = F.cross_entropy

    for epoch in range(10):
        #(model, train_iter, epoch, batch_size, learning_rate)
        train_loss, train_acc = train_model(model, train_iter, epoch,
                                            batch_size, learning_rate)
        val_loss, val_acc = eval_model(model, valid_iter, batch_size)

    test_loss, test_acc = eval_model(model, test_iter, batch_size)
    print(f'Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}%')

    return test_acc
def run_best_model(args):
    learning_rate = args.lr
    batch_size = args.batch_size
    hidden_size = args.hidden_size
    epochs = args.epochs
    cache_data = args.cache_data
    output_size = 2
    embedding_length = 300
    TEXT, vocab_size, word_embeddings, train_iter, valid_iter, test_iter = load_patents.load_dataset(
        batch_size, cache_data=cache_data)
    weights = word_embeddings
    model = LSTMClassifier(batch_size, output_size, hidden_size, vocab_size,
                           embedding_length, word_embeddings)

    acc_list = []
    val_list = []
    for epoch in range(epochs):
        #(model, train_iter, epoch, batch_size, learning_rate)
        train_loss, train_acc = train_model(model, train_iter, epoch,
                                            batch_size, learning_rate)
        val_loss, val_acc = eval_model(model, valid_iter, batch_size)
        print(
            f'EPOCH {epoch} -- Train/Val Loss: {train_loss:.3f}/{val_loss:.3f}, Train/Val Acc: {train_acc:.2f}%/{val_acc:.2f}%'
        )
        acc_list.append(train_acc)
        val_list.append(val_acc)

    plt.plot(acc_list, label="train")
    plt.plot(val_list, label="val")
    plt.savefig('acc_graph.png')

    test_loss, test_acc = eval_model(model, test_iter, batch_size)
    print("performance of model:")
    print(f'Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}%')
def train_and_val(args):
    TEXT, vocab_size, word_embeddings, train_iter, valid_iter, test_iter = load_patents.load_dataset(
    )

    learning_rate = args.lr
    batch_size = args.batch_size
    output_size = 2
    hidden_size = 256
    embedding_length = 300

    weights = word_embeddings  #I think ????????

    if args.arch == 'LSTM':
        model = LSTMClassifier(batch_size, output_size, hidden_size,
                               vocab_size, embedding_length, word_embeddings)
    elif args.arch == 'RNN':
        model = AttentionModel(batch_size, output_size, hidden_size,
                               vocab_size, embedding_length, weights)
    elif args.arch == 'RNN':
        model = RNN(batch_size, output_size, hidden_size, vocab_size,
                    embedding_length, weights)
    elif args.arch == 'RNN':
        model = CNN(batch_size, output_size, in_channels, out_channels,
                    kernel_heights, stride, padding, keep_probab, vocab_size,
                    embedding_length, weights)
    elif args.arch == 'RNN':
        model = RCNN(batch_size, output_size, hidden_size, vocab_size,
                     embedding_length, weights)
    elif args.arch == 'selfAttn':
        model = SelfAttention(batch_size, output_size, hidden_size, vocab_size,
                              embedding_length, weights)

    loss_fn = F.cross_entropy

    for epoch in range(10):
        train_loss, train_acc = train_model(model, train_iter, loss_fn, epoch)
        val_loss, val_acc = eval_model(model, valid_iter, loss_fn)

        print(
            f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%'
        )

    test_loss, test_acc = eval_model(model, test_iter, loss_fn)
    print(f'Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}%')

    ts = time.time()
    timestamp = time.ctime(ts)

    with open("./results.out", "w+") as f:
        f.write(timestamp + '\n')
        f.write(f'Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}%\n')
        # for arg in args:
        #     #f.write(arg.name + ':')
        #     f.write(str(arg) + ', ')
        f.write("Epochs: " + str(args.epochs))
        f.write("\n")
Esempio n. 7
0
def main(train_data_path: str, model_path: str):
    TEXT, vocab_size, word_embeddings, train_iter, valid_iter, test_iter = load_data.load_dataset(
        train_data_path)

    batch_size = 32
    output_size = 2
    hidden_size = 256
    embedding_length = 300

    # TODO: try other types of learning algorithms
    model = LSTMClassifier(batch_size, output_size, hidden_size, vocab_size,
                           embedding_length, word_embeddings)

    for epoch in range(10):
        train_loss, train_acc = train_model(model, train_iter, epoch)
        val_loss, val_acc = eval_model(model, valid_iter)

        print(
            f'Epoch: {epoch + 1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%'
        )

    test_loss, test_acc = eval_model(model, test_iter)
    print(f'Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}%')
    ''' Let us now predict the sentiment on a single sentence just for the testing purpose. '''
    test_sen1 = "This is one of the best creation of Nolan. I can say, it's his magnum opus. Loved the soundtrack and especially those creative dialogues."

    test_sen1 = TEXT.preprocess(test_sen1)
    test_sen1 = [[TEXT.vocab.stoi[x] for x in test_sen1]]

    test_sen = np.asarray(test_sen1)
    test_sen = torch.from_numpy(test_sen)
    if torch.cuda.is_available():
        test_sen = test_sen.cuda()
    model.eval()
    output = model(test_sen, 1)
    out = F.softmax(output, 1)
    if (torch.argmax(out[0]) == 1):
        print("Sentiment: Positive")
    else:
        print("Sentiment: Negative")

    # save the model
    torch.save(model.state_dict(), model_path)
Esempio n. 8
0
def run(args):
    epoch_count = 10
    model = LSTMClassifier(args.batch_size, args.output_size, args.hidden_size,
                           args.vocab_size, args.embedding_length,
                           args.word_embeddings)
    #loss_fn = F.cross_entropy
    #loss = new_loss_function

    for epoch in range(epoch_count):
        train_loss, train_acc = train_model(model, train_iter, epoch)
        val_loss, val_acc = eval_model(model, valid_iter)
        print(
            f'Epoch: {epoch + 1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%'
        )
def main(args):
    TEXT, LABEL, vocab_size, word_embeddings, train_iter, valid_iter = load_data.load_dataset(
        args)

    #learning_rate = 2e-5
    learning_rate = 0.0001
    batch_size = BATCH_SIZE
    output_size = 2
    hidden_size = 256
    #hidden_size = 64
    embedding_length = 300

    model = LSTMClassifier(batch_size, output_size, hidden_size, vocab_size,
                           embedding_length, word_embeddings)
    #model = AttentionModel(batch_size, output_size, hidden_size, vocab_size, embedding_length, word_embeddings)
    #model = SelfAttention(batch_size, output_size, hidden_size, vocab_size, embedding_length, word_embeddings)
    #loss_fn = F.cross_entropy
    print(LABEL.vocab.stoi)
    print(LABEL.vocab.freqs)
    label_weights = torch.FloatTensor(np.asarray([1.0, 2.0]))
    label_weights_tensor = Variable(label_weights, volatile=True).cuda()
    loss_fn = torch.nn.CrossEntropyLoss(weight=label_weights_tensor)

    for epoch in range(10):
        train_loss, train_acc = train_model(model, loss_fn, train_iter, epoch)
        val_loss, val_acc = eval_model(model, loss_fn, valid_iter)

        print(
            f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%'
        )
        evaluate(model, TEXT, LABEL, args, epoch)
        torch.save(model.state_dict(),
                   args.save_model_file + '.epoch' + str(epoch + 1))

    test_loss, test_acc = eval_model(model, loss_fn, test_iter)
    print(f'Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}%')
Esempio n. 10
0
def run_best_model(args):
    learning_rate = args.lr
    batch_size = args.batch_size
    hidden_size = args.hidden_size
    epochs = args.epochs
    output_size = 2
    embedding_length = 300
    TEXT, vocab_size, word_embeddings, train_iter, valid_iter, test_iter = load_patents.load_dataset(
        batch_size)
    weights = word_embeddings
    model = LSTMClassifier(batch_size, output_size, hidden_size, vocab_size,
                           embedding_length, word_embeddings)
    for epoch in range(epochs):
        #(model, train_iter, epoch, batch_size, learning_rate)
        train_loss, train_acc = train_model(model, train_iter, epoch,
                                            batch_size, learning_rate)
        val_loss, val_acc = eval_model(model, valid_iter, batch_size)

    test_loss, test_acc = eval_model(model, test_iter, batch_size)
    print("performance of model:")
    print(f'Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}%')
Esempio n. 11
0
def do_inference(sentences, TEXT, vocab_size, word_embeddings):
    ## Load mode for inference
    batch_size = len(sentences)
    model = LSTMClassifier(batch_size, output_size, hidden_size, vocab_size, embedding_length, word_embeddings, conv_hidden, 0.0)
    model.cuda()
    state_dict = torch.load(save_path)
    model.load_state_dict(state_dict)
    model.eval()

    data_field = [('headline', TEXT)]
    ## prepare data
    score = None
    examples = []
    for text in sentences:
        examples.append(data.Example.fromlist([text], data_field))
    infer_data = data.Dataset(examples, data_field, filter_pred=None)
    infer_iter = data.Iterator(dataset=infer_data, batch_size=batch_size, train=False, sort=False, device=0)
    for idx, batch in enumerate(infer_iter):
        text = batch.headline[0]
        #if (text.size()[0] is not 32):
        #    continue
        prediction = model(text)
    score = torch.max(prediction, 1)[1].float().mean().item()
    return score
Esempio n. 12
0
def setup(opt):

    if opt.model == 'lstm':
        model = LSTMClassifier(opt)
    elif opt.model == 'basic_cnn' or opt.model == "cnn":
        model = BasicCNN1D(opt)
    elif opt.model == 'baisc_cnn_2d':
        model = BasicCNN2D(opt)
    elif opt.model == 'kim_cnn':
        model = KIMCNN1D(opt)
    elif opt.model == 'kim_cnn_2d':
        model = KIMCNN2D(opt)
    elif opt.model == 'multi_cnn':
        model = MultiLayerCNN(opt)
    elif opt.model == 'inception_cnn':
        model = InceptionCNN(opt)
    elif opt.model == 'fasttext':
        model = FastText(opt)
    elif opt.model == 'capsule':
        model = CapsuleNet(opt)
    elif opt.model == 'rnn_cnn':
        model = RNN_CNN(opt)
    elif opt.model == 'rcnn':
        model = RCNN(opt)
    elif opt.model == 'bilstm':
        model = LSTMBI(opt)
    elif opt.model == "transformer":
        model = AttentionIsAllYouNeed(opt)
    elif opt.model == "selfattention":
        model = SelfAttention(opt)
    elif opt.model == "lstm_attention":
        model = LSTMAttention(opt)
    elif opt.model == "bert":
        model = BERTFast(opt)
    else:
        raise Exception("model not supported: {}".format(opt.model))
    return model
Esempio n. 13
0
                target = target.cuda()
            prediction = model(text)
            loss = loss_fn(prediction, target)
            num_corrects = (torch.max(prediction, 1)[1].view(target.size()).data == target.data).sum()
            acc = 100.0 * num_corrects / len(batch)
            total_epoch_loss += loss.item()
            total_epoch_acc += acc.item()

    return total_epoch_loss / len(val_iter), total_epoch_acc / len(val_iter)


learning_rate = 2e-5
batch_size = 32
output_size = 2
hidden_size = 256
embedding_length = 300
from models.LSTM import  LSTMClassifier
model = LSTMClassifier(batch_size, output_size, hidden_size, vocab_size, embedding_length, word_embeddings)
loss_fn = F.cross_entropy

for epoch in range(10):
    train_loss, train_acc = train_model(model, train_iter, epoch)
    val_loss, val_acc = eval_model(model, valid_iter)

    print(
        f'Epoch: {epoch + 1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%')

test_loss, test_acc = eval_model(model, test_iter)
print(f'Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}%')

Esempio n. 14
0
def classifier():
    #################################################################################
    # Write the output data into the infer data for the classifier
    """
    #path = "/home/yunzhu/Headline/FASum/FASRL/save_decode_result/BM25/test"
    #path = "/home/yunzhu/Headline/FASum/FASRL/save_decode_result/PREFIX/test"
    #path = "/home/yunzhu/Headline/FASum/FASRL/save_decode_result/random/test"
    #path = "/home/yunzhu/Headline/FASum/FASRL/save_decode_result/seq2seq/withatt/test"
    #path = "/data1/home2/Headline/PointerSumm/log/decode_model_95000_1555784722/test"
    #path = "/home/yunzhu/Headline/FASum/FASRL/save_decode_result/exp_0223/test"
    #path = "/home/yunzhu/Headline/FASum/PORLHG_v3/save_decode_result/exp_0907/extractor/test"
    #path = "/data1/home2/Headline/Dataset/CNNDM/finished_files_cleaned_single_m2/refs/test"
    #path = "/home/yunzhu/Headline/FASum/PORLHG_v3/save_decode_result/exp_0912/rl/test"
    #path = "/home/yunzhu/Headline/FASum/PORLHG_v3/save_decode_result/exp_0823_v4/test"
    path = "/home/yunzhu/Headline/FASum/PORLHG_v3/save_decode_result/exp_0823/rl_3/test"

    #path_in = path
    path_in = os.path.join(path, "output")
    print('We are testing:{}'.format(path))
    filename = "temp.tsv"
    path_out= "/home/yunzhu/Headline/FASum/FASRL/model/classifier/cls_data/{}".format(filename)

    write_file(path_in, path_out)

    TEXT, vocab_size, word_embeddings, _, _, test_iter = load_data.load_dataset(corpusdir, batch_size, filename=path_out)
    model = LSTMClassifier(batch_size, output_size, hidden_size, vocab_size, embedding_length, word_embeddings, conv_hidden, 0.0)
    print('Loading the pretrained model: {}'.format(save_path.split('/')[-1]))
    state_dict = torch.load(save_path)
    model.load_state_dict(state_dict)

    loss_fn = F.cross_entropy

    test_loss, test_acc, test_uar = eval_model(model, test_iter, loss_fn)

    print('Inference popularity predictor for: {}'.format(path_in))
    print('Test Loss: {:.2f}, Test Acc: {:.2f}%, Test Uar: {:.2f}'.format(test_loss, test_acc, test_uar))
    print('There are {:.2f}% are classified as positive'.format(100-test_acc))
    with open(os.path.join(path, "popularity.txt"), 'w') as f:
        f.write("Inference by: {}".format(save_path))
        f.write("model: {}".format(path))
        f.write("score: {}".format(100-test_acc))

    """
    #########################################################
    """
    TEXT, vocab_size, word_embeddings, train_iter, valid_iter, test_iter = load_data.load_dataset()
    loss_fn = F.cross_entropy
    
    with open('TEXT.Field', 'rb') as f:
        TEXT = dill.load(f)

    #path = "/home/yunzhu/Headline/FASum/FASRL/save_decode_result/exp_0224/test/output"
    path = "/home/yunzhu/Headline/Datasets/CNNDM/finished_files_cleaned/refs/test"
    num = len(os.listdir(path))

    total_score = 0
    for i in range(num):
        sentence = read_data(path, i, '.ref')
        
        score = do_inference(sentence, TEXT, vocab_size, word_embeddings)
        total_score += score
        print("{}/{} finished, score:{}".format(i, num, score))
    print("total_score: {}".format(total_score))
    print("avg score: {}".format(total_score/num))
     """

    #####################################################
    
    TEXT, vocab_size, word_embeddings, train_iter, valid_iter, test_iter = load_data.load_dataset(corpusdir, batch_size)
    loss_fn = F.binary_cross_entropy_with_logits

    model = LSTMClassifier(batch_size, output_size, hidden_size, vocab_size, embedding_length, word_embeddings, conv_hidden, 0.1)

    val_acc_best=0.
    optim = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()))  ## move the optim from train() to here
    scheduler = ReduceLROnPlateau(optim, 'min', verbose=True, patience=2)

    for epoch in range(10):
        train_loss, train_acc = train_model(model, train_iter, epoch, loss_fn, optim)
        val_loss, val_acc, _ = eval_model(model, valid_iter, loss_fn)
        scheduler.step(val_loss)
        if val_acc_best < val_acc:
            torch.save(model.state_dict(), save_path)
            test_loss, test_acc, _ = eval_model(model, test_iter, loss_fn)
            print('[info] Epoch{} Test Loss: {:.2f}, Test Acc: {:.2f}%'.format(epoch, test_loss, test_acc))
            val_acc_best = val_acc
        print('Epoch: {}, Train Loss: {:.2f}, Train Acc: {:.2f}%, Val Loss: {:.2f}, Val Acc: {:.2f}%'.format(epoch+1,  train_loss, train_acc, val_loss, val_acc)) 

    #test_loss, test_acc = eval_model(model, test_iter, loss_fn)

    #print('Test Loss: {}, Test Acc: {}'.format(test_loss, test_acc))
    
    ##################################################################    
    return TEXT, vocab_size, word_embeddings
Esempio n. 15
0
import os
import time
import load_data
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
from models.LSTM import LSTMClassifier
import TextClassificationModel

clf = LSTMClassifier(256, 256, 500, 10000, 500, None)
model = TextClassificationModel(model=clf)

for epoch in range(10):
    train_loss, train_acc = model.train_model(model, train_iter, epoch)
    val_loss, val_acc = model.eval_model(model, valid_iter)

    print(
        f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%'
    )

test_loss, test_acc = eval_model(model, test_iter)
print(f'Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}%')
''' Let us now predict the sentiment on a single sentence just for the testing purpose. '''
test_sen1 = "This is one of the best creation of Nolan. I can say, it's his magnum opus. Loved the soundtrack and especially those creative dialogues."
test_sen2 = "Ohh, such a ridiculous movie. Not gonna recommend it to anyone. Complete waste of time and money."

test_sen1 = TEXT.preprocess(test_sen1)
test_sen1 = [[TEXT.vocab.stoi[x] for x in test_sen1]]
Esempio n. 16
0
class critic(nn.Module):
    def __init__(self):
        super(critic, self).__init__()
        self.target_pred = LSTMClassifier(batch_size, output_size, hidden_size,
                                          vocab_size, embedding_length,
                                          word_embeddings)
        self.active_pred = LSTMClassifier(batch_size, output_size, hidden_size,
                                          vocab_size, embedding_length,
                                          word_embeddings)

    def forward(self, x, scope):
        if scope == "target":
            out = self.target_pred(x)
        if scope == "active":
            out = self.active_pred(x)
        return out

    def assign_target_network(self):
        params = []
        for name, x in self.active_pred.named_parameters():
            params.append(x)
        i = 0
        for name, x in self.target_pred.named_parameters():
            x.data = deepcopy(params[i].data)
            i += 1

    def update_target_network(self):
        params = []
        for name, x in self.active_pred.named_parameters():
            params.append(x)
        i = 0
        for name, x in self.target_pred.named_parameters():
            x.data = deepcopy(params[i].data * (tau) + x.data * (1 - tau))
            i += 1

    def assign_active_network(self):
        params = []
        for name, x in self.target_pred.named_parameters():
            params.append(x)
        i = 0
        for name, x in self.active_pred.named_parameters():
            x.data = deepcopy(params[i].data)
            i += 1

    def assign_active_network_gradients(self):
        params = []
        for name, x in self.target_pred.named_parameters():
            params.append(x)
        i = 0
        for name, x in self.active_pred.named_parameters():
            x.grad = deepcopy(params[i].grad)
            i += 1
        for name, x in self.target_pred.named_parameters():
            x.grad = None

    def forward_lstm(self, hc, x, scope):
        if scope == "target":
            out, state = self.target_pred.getNextHiddenState(hc, x)
        if scope == "active":
            out, state = self.active_pred.getNextHiddenState(hc, x)
        return out, state

    def wordvector_find(self, x):
        return self.target_pred.wordvector_find(x)