示例#1
0
    def __init__(self,base_model_list=bagging_config.base_model_list):
        self.base_model_list = base_model_list.split("-")
        self.num_random=len(self.base_model_list)
        self.dataDir = general_config.data_dir + "/random"
        createRandomData(self.num_random)

        self.models = []
        self.models_name=[]
        for i in range(self.num_random):
            base_model = self.base_model_list[i]
            assert base_model in ["1", "2", "3", "4","5"], "Invalid base model type!"
            if base_model == "1":
                model = TextCNN()
            elif base_model == "2":
                model = TextRNN()
            elif base_model == "3":
                model = CRNN()
            elif base_model=="4":
                model = RCNN()
            else:
                model=HAN()
            self.models.append(model)
            self.models_name.append(modelDict[base_model])
        self.logDir = ensure_dir_exist(general_config.log_dir + "/bagging/" + "-".join(self.models_name))
        self.saveDir = ensure_dir_exist(general_config.save_dir + "/bagging/" + "-".join(self.models_name))
        self.logger=my_logger(self.logDir+"/log.txt")
示例#2
0
    def __init__(self,
                 base_model_list=stacking_config.base_model_list,
                 num_cv=stacking_config.num_cv):
        self.base_model_list = base_model_list.split("-")
        self.num_models = len(self.base_model_list)
        self.num_cv = num_cv
        self.dataDir = general_config.data_dir + "/cv/" + str(self.num_cv)
        if not os.path.exists(self.dataDir):
            createCrossValidationData(self.num_cv)

        self.models = []
        self.models_name = []
        for n in range(self.num_models):
            base_model = self.base_model_list[n]
            assert base_model in ["1", "2", "3", "4",
                                  "5"], "Invalid base model type!"
            if base_model == "1":
                model = TextCNN()
            elif base_model == "2":
                model = TextRNN()
            elif base_model == "3":
                model = CRNN()
            elif base_model == "4":
                model = RCNN()
            else:
                model = HAN()
            self.models.append(model)
            self.models_name.append(modelDict[base_model])
        self.logDir = ensure_dir_exist(general_config.log_dir + "/stacking/" +
                                       "-".join(self.models_name) + "/" +
                                       str(self.num_cv))
        self.saveDir = ensure_dir_exist(general_config.save_dir +
                                        "/stacking/" +
                                        "-".join(self.models_name) + "/" +
                                        str(self.num_cv))
        self.classifier = LogisticRegression()
        self.logger = my_logger(self.logDir + "/log.txt")
示例#3
0
def setup(opt):

    if opt.model == 'lstm':
        model = LSTMClassifier(opt)
    elif opt.model == 'basic_cnn' or opt.model == "cnn":
        model = BasicCNN1D(opt)
    elif opt.model == 'baisc_cnn_2d':
        model = BasicCNN2D(opt)
    elif opt.model == 'kim_cnn':
        model = KIMCNN1D(opt)
    elif opt.model == 'kim_cnn_2d':
        model = KIMCNN2D(opt)
    elif opt.model == 'multi_cnn':
        model = MultiLayerCNN(opt)
    elif opt.model == 'inception_cnn':
        model = InceptionCNN(opt)
    elif opt.model == 'fasttext':
        model = FastText(opt)
    elif opt.model == 'capsule':
        model = CapsuleNet(opt)
    elif opt.model == 'rnn_cnn':
        model = RNN_CNN(opt)
    elif opt.model == 'rcnn':
        model = RCNN(opt)
    elif opt.model == 'bilstm':
        model = LSTMBI(opt)
    elif opt.model == "transformer":
        model = AttentionIsAllYouNeed(opt)
    elif opt.model == "selfattention":
        model = SelfAttention(opt)
    elif opt.model == "lstm_attention":
        model = LSTMAttention(opt)
    elif opt.model == "bert":
        model = BERTFast(opt)
    else:
        raise Exception("model not supported: {}".format(opt.model))
    return model
示例#4
0
# read text dataset
print("-------Loading Data--------\n")
TEXT, vocab_size, word_embeddings, _, _, mapping = load_data.load_dataset(
    embed_len=embedding_length, batch_size=batch_size)


# Map the output of the model (0-4) back to original region (1-5)
def map_reverse(label):
    for k, v in mapping.items():
        if label == v:
            return float(k)


# Load the saved model
model = RCNN(batch_size, output_size, hidden_size, vocab_size,
             embedding_length, word_embeddings)
print(f"\nLoading checkpoint file: {saved}\n")
checkpoint = torch.load(saved)
model.load_state_dict(checkpoint['model_state_dict'])
model.to(device)

# Run inference on validation set
pred_val = []
for i in range(len(val['text'])):
    with torch.no_grad():
        test_sen = TEXT.preprocess(val['text'][i])
        test_sen = np.asarray([[TEXT.vocab.stoi[x] for x in test_sen]])
        test_sen = torch.LongTensor(test_sen)
        test_tensor = Variable(test_sen)
        test_tensor = test_tensor.cuda(device)
        model.eval()
示例#5
0
            num_corrects = (torch.max(prediction_gender, 1)[1].view(target.size()).data == target.data).sum()
            acc = 100.0 * num_corrects/len(batch)
            total_epoch_loss += loss.item()
            total_epoch_acc += acc.item()

    return total_epoch_loss/len(val_iter), total_epoch_acc/len(val_iter)


learning_rate = 2e-5
batch_size = 32
output_size = 2
output_size2 = 9
hidden_size = 256
embedding_length = 100

model = RCNN(batch_size, output_size, hidden_size, vocab_size, embedding_length, word_embeddings)
loss_fn = F.cross_entropy

for epoch in range(20):
    train_loss, train_acc = train_model(model, train_iter, epoch)
    val_loss, val_acc = eval_model(model, valid_iter)

    pickle.dump(model, open("model_rcnn_idx"+str(epoch)+".pickle","wb"))
    print(f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%')

test_loss, test_acc = eval_model(model, test_iter)
print(f'Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}%')

''' Let us now predict the sentiment on a single sentence just for the testing purpose. '''
test_sen1 = "This is one of the best creation of Nolan. I can say, it's his magnum opus. Loved the soundtrack and especially those creative dialogues."
test_sen2 = "Ohh, such a ridiculous movie. Not gonna recommend it to anyone. Complete waste of time and money."
示例#6
0
            if torch.cuda.is_available():
                text = text.cuda(device)
                target = target.cuda(device)
            prediction = model(text)
            loss = loss_fn(prediction, target)
            num_corrects = (torch.max(prediction, 1)[1].view(
                target.size()).data == target.data).sum()
            acc = 100.0 * num_corrects / len(batch)
            total_epoch_loss += loss.item()
            total_epoch_acc += acc.item()

    return total_epoch_loss / len(val_iter), total_epoch_acc / len(val_iter)


# Define model
model = RCNN(batch_size, output_size, hidden_size, vocab_size,
             embedding_length, word_embeddings)
model.to(device)

prev_epochs = 0
# reload model if a checkpoint is specified
if args.checkpoint:
    model, optim, min_loss, prev_epochs = load_checkpoint(
        args.checkpoint, model, optim)

#model = torch.nn.DataParallel(model)
loss_fn = F.cross_entropy
optim = torch.optim.Adam(model.parameters(),
                         lr=learning_rate,
                         weight_decay=1e-5)

scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=5, gamma=0.1)
示例#7
0
def train(config):
    """
    Train and evaluate the model with training and validation data.
    """
    print('Loading data...')
    start_time = time.time()

    corpus = Corpus(config.file_path,
                    config.test_path,
                    config.abbre,
                    config.seq_length,
                    config.vocab_size,
                    over_sample=config.over)
    config.num_classes = corpus.num_classes
    print(corpus)
    config.vocab_size = len(corpus.words)  #useless now

    config.model_file = config.model_file + '.pk'

    train_data = TensorDataset(torch.LongTensor(corpus.x_train_text),
                               torch.LongTensor(corpus.x_train_ids),
                               torch.LongTensor(corpus.y_train))
    test_data = TensorDataset(torch.LongTensor(corpus.x_test_text),
                              torch.LongTensor(corpus.x_test_ids),
                              torch.LongTensor(corpus.y_test))

    print('Configuring model...', config.elmo)
    if config.model_name == 'cnn':
        model = CNNClassifier(config)
        print('You choose to use CNN')
    elif config.model_name == 'lstm':
        model = LSTMClassifier(config)
        print('You choose to use LSTM')
    elif config.model_name == 'rcnn':
        model = RCNN(config)
        print('You choose to use RCNN')
    elif config.model_name == 'self':
        model = SelfAttention(config)
        print('You choose to use Self-Attention')
    else:
        model = AttentionModel(config)
        print('You choose to use LSTM-attention')

    print(model)

    if use_cuda:
        model.cuda()

    # optimizer and loss function
    criterion = nn.CrossEntropyLoss(size_average=False)
    optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)

    # set the mode to train
    print("Training and evaluating...")

    best_acc = 0.0
    for epoch in range(config.num_epochs):
        # load the training data in batch
        model.train()
        train_loader = DataLoader(train_data, batch_size=config.batch_size)
        for x_batch, x_batch_elmo, y_batch in train_loader:

            inputs, inputs_elmo, targets = Variable(x_batch), Variable(
                x_batch_elmo), Variable(y_batch)
            if use_cuda:
                inputs, inputs_elmo, targets = inputs.cuda(), inputs_elmo.cuda(
                ), targets.cuda()
            # if (inputs.size()[0] is not config.batch_size):  # One of the batch returned by BucketIterator has length different than 32.
            #     print ('Size wrong')
            #     continue
            optimizer.zero_grad()
            outputs = model(inputs, inputs_elmo, batch_size=inputs.size()
                            [0])  # forward computation: provide batch_size
            loss = criterion(outputs, targets)

            # backward propagation and update parameters
            loss.backward(retain_graph=True)
            optimizer.step()

        # evaluate on both training and test dataset
        train_acc, train_loss = evaluate(train_data, model, criterion)
        test_acc, test_loss = evaluate(test_data, model, criterion)

        if test_acc > best_acc:
            # store the best result
            best_acc = test_acc
            improved_str = '*'
            torch.save(model.state_dict(), config.model_file)
        else:
            improved_str = ''

        time_dif = get_time_dif(start_time)
        msg = "Epoch {0:3}, Train_loss: {1:>7.2}, Train_acc {2:>6.2%}, " \
              + "Test_loss: {3:>6.2}, Test_acc {4:>6.2%}, Time: {5} {6}"
        print(
            msg.format(epoch + 1, train_loss, train_acc, test_loss, test_acc,
                       time_dif, improved_str))

    test_acc, test_f1 = test(model, test_data, config.model_file)

    return test_acc, test_f1