def test_model(loader, model):
    """
    Help function that tests the model's performance on a dataset
    @param: loader - data loader for the dataset to test against
    """
    correct = 0
    total = 0
    model.eval()
    if args.model != 'RNN':
        for data, lengths, labels in loader:
            data_batch, length_batch, label_batch = data.to(
                device), lengths.to(device), labels.to(device)
            #print(data_batch,label_batch,length_batch)
            outputs = F.softmax(model(data_batch, length_batch), dim=1)
            predicted = outputs.max(1, keepdim=True)[1]
            total += labels.size(0)
            correct += predicted.eq(
                labels.view_as(predicted).to(device)).sum().item()
        return (100 * correct / total)
    else:
        for data, lengths, unsort_idx, labels in loader:
            data_batch, length_batch, unsort_batch, label_batch = data.to(
                device), lengths.to(device), unsort_idx.to(device), labels.to(
                    device)
            #print(data_batch,label_batch,length_batch)
            outputs = F.softmax(model(data_batch, length_batch, unsort_batch),
                                dim=1)
            predicted = outputs.max(1, keepdim=True)[1]
            total += labels.size(0)
            correct += predicted.eq(
                labels.view_as(predicted).to(device)).sum().item()
        print("testing complete")
        return (100 * correct / total)
示例#2
0
def test(model, test_loader, criterion, device):
    model.eval()
    with torch.no_grad():
        print("======================================================")
        print("TESTING")
        print("======================================================")
        total_loss = 0
        total_correct = 0
        total_data = 0
        loss = 0
        for data, labels in test_loader:
            model = model.to(device)
            data = data.to(device)
            labels = labels.to(device)

            output = model(data)
            loss = criterion(output, labels)

            total_data += labels.size(0)
            total_loss += loss.item()
            _, p = torch.max(output.data, dim=1)
            total_correct += (p == labels).sum().item()

        print("Testing: Loss: [{:.2f}] Accuracy [{:.2f}]".format(
            total_loss / len(test_loader), total_correct * 100 / total_data))
示例#3
0
def train(net, train_iter, valid_iter, device, num_epochs):
    train_loss, valid_loss = [], []
    train_batch_num = 0
    best_valid_acc = 0.
    for epoch in range(num_epochs):
        total_loss = 0.
        correct = 0
        sample_num = 0
        net.train()
        for context in train_iter:
            train_batch_num += 1
            data = context.Text
            data = data.to(device).long()
            target = context.Label
            target = target.to(device).long()
            masks = (data != TEXT.vocab.stoi["<pad>"])
            output = net(data, masks)
            optimizer.zero_grad()
            loss = net.loss(output, masks, target)
            loss.backward()
            optimizer.step()
            total_loss += loss.item()

        v_loss,precision,recall,f1 = evaluate_accuracy(valid_iter, net)
        valid_loss.append(v_loss)
        loss = total_loss / train_batch_num
        train_loss.append(loss)
        if f1 > best_valid_acc:
            best_valid_acc = f1
            torch.save(net.state_dict(), "./best1.pth")
        print('epoch %d, train loss %.4f,  valid loss %.4f, valid precision %.4f, valid recall %.4f,valid f1 %.4f'
              % (epoch + 1, loss, v_loss,precision,recall,f1))
    return train_loss, valid_loss
示例#4
0
def train_model(net, train_iter, test_iter, epoch, lr, batch_size):
    print("begin training")

    optimizer = optim.Adam(net.parameters(), lr=lr)
    criterion = nn.CrossEntropyLoss()

    best_acc = 0
    for i in range(epoch):  # 多批次循环
        net.to(device)
        net.train()  # 必备,将模型设置为训练模式
        for batch_idx, batch in enumerate(train_iter):
            # 注意target=batch.label - 1,因为数据集中的label是1,2,3,4,但是pytorch的label默认是从0开始,所以这里需要减1
            data, target = batch.text, batch.label - 1
            data, target = data.to(device), target.to(device)
            optimizer.zero_grad()  # 清除所有优化的梯度
            output = net(data)  # 传入数据并前向传播获取输出
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

            # 打印状态信息
            logging.info("train epoch=" + str(i) + ",batch_id=" +
                         str(batch_idx) + ",loss=" +
                         str(loss.item() / batch_size))

        acc = model_test(net, test_iter, batch_size)
        if acc > best_acc:
            torch.save({'state_dict': net.cpu().state_dict()},
                       "saved_model/ag_fasttext_model.pth.tar")
            best_acc = acc

    print('Finished Training')
示例#5
0
def model_test(net, test_iter):
    net.eval()  # 必备,将模型设置为训练模式
    correct = torch.zeros(CLASS_NUM)
    total = torch.zeros(CLASS_NUM)
    with torch.no_grad():
        for i, batch in enumerate(test_iter):
            # 注意target=batch.label - 1,因为数据集中的label是1,2,3,4,但是pytorch的label默认是从0开始,所以这里需要减1
            data, label = batch.text, batch.label
            data, label = data.to(device), label.to(device)
            logging.info("test batch_id=" + str(i))
            data = net.embed(data)
            outputs = net(data)
            # torch.max()[0]表示最大值的值,troch.max()[1]表示回最大值的每个索引
            _, predicted = torch.max(outputs.data,
                                     1)  # 每个output是一行n列的数据,取一行中最大的值
            predicted += 1

            for i in range(1, 5):
                filter = (torch.ones(label.size(0)) * i).to(device)
                filtered_label = ((label == filter).int()).to(device)
                total[i - 1] += filtered_label.sum().item()
                correct[i - 1] += ((filtered_label *
                                    i) == predicted).int().sum().item()

        totoal_accuracy = round(correct.sum().item() / total.sum().item(), 3)
        print('Accuracy of the network on test set:()', totoal_accuracy)
        accuray = (correct.float() / total.float()).numpy()
        accuray = np.round(accuray, 3)
        accuray = dict(zip(ag_news_label.values(), accuray.tolist()))
        print(accuray)

        return totoal_accuracy
def batchify(data, bsz):
    data = TEXT.numericalize([data.examples[0].text])
    # Divide the dataset into bsz parts.
    nbatch = data.size(0) // bsz
    # Trim off any extra elements that wouldn't cleanly fit (remainders).
    data = data.narrow(0, 0, nbatch * bsz)
    # Evenly divide the data across the bsz batches.
    data = data.view(bsz, -1).t().contiguous()
    return data.to(device)
示例#7
0
def train(language_model, data_source, optimizer, criterion):

    epoch_loss = 0

    language_model.train()

    for i in tqdm(range(0, data_source.size(0) - 1, args.bptt)):

        optimizer.zero_grad()

        data, targets = get_batch(data_source, i, args.bptt)

        data = data.to(device)

        #data = [bptt, batch size]
        #targets = [bptt, batch size]

        if args.model == 'transformer':
            mask = generate_square_subsequent_mask(data.shape[0])
            mask = mask.to(device)
            predictions = language_model(data, mask)
        else:
            predictions = language_model(data)

        #predictions = [batch size, bptt, vocab size]

        predictions = predictions.permute(1, 0, 2)

        #predictions = [bptt, batch size, vocab size]

        predictions = predictions.contiguous().view(-1, vocab_size)

        #predictions = [bptt * batch size, vocab size]

        targets = targets.view(-1)

        #targets = [bptt * batch size]

        targets = targets.to(device)

        loss = criterion(predictions, targets)

        loss.backward()

        torch.nn.utils.clip_grad_norm_(language_model.parameters(),
                                       args.grad_clip)

        optimizer.step()

        epoch_loss += loss.item()

    return epoch_loss / len(data_source)
示例#8
0
def evaluate(language_model, data_source, criterion):

    epoch_loss = 0

    language_model.eval()

    for i in tqdm(range(0, data_source.size(0) - 1, args.bptt)):

        optimizer.zero_grad()

        data, targets = get_batch(data_source, i, args.bptt)

        data = data.to(device)

        #data = [bptt, batch size]
        #targets = [bptt, batch size]

        with torch.no_grad():

            if args.model == 'transformer':
                mask = generate_square_subsequent_mask(data.shape[0])
                mask = mask.to(device)
                predictions = language_model(data, mask)
            else:
                predictions = language_model(data)

            #predictions = [batch size, bptt, vocab size]

            predictions = predictions.permute(1, 0, 2)

            #predictions = [bptt, batch size, vocab size]

            predictions = predictions.contiguous().view(-1, vocab_size)

            #predictions = [bptt * batch size, vocab size]

            targets = targets.view(-1)

            #targets = [bptt * batch size]

            targets = targets.to(device)

            loss = criterion(predictions, targets)

        epoch_loss += loss.item()

    return epoch_loss / len(data_source)
def train(model, train_loader, num_epochs, opt, criterion, device):
    train_loss_list = []
    iteration_list = []
    train_accuracy_list = []
    print("Beginning to Train")
    for epoch in range(num_epochs):
        total_loss = 0
        total_correct = 0
        total_data = 0
        loss = 0

        model.train()
        for i, (data, labels) in enumerate(train_loader):

            model = model.to(device)
            data = data.to(device)
            labels = labels.to(device)

            output = model(images)
            loss = criterion(output, labels)

            opt.zero_grad()
            loss.backward()
            opt.step()

            total_data += labels.size(0)
            total_loss += loss.item()
            _, p = torch.max(output.data, dim=1)
            total_correct += (p == labels).sum().item()

        print("Training: epoch: [{}/{}] Loss: [{:.2f}] Accuracy [{:.2f}] ".
              format(epoch + 1, num_epochs, total_loss / len(train_loader),
                     total_correct * 100 / total_data))

        train_loss_list.append(total_loss / len(train_loader))
        iteration_list.append(epoch)
        train_accuracy_list.append(total_correct * 100 / total_data)

        history = {
            'train_loss': train_loss_list,
            'train_acc': train_accuracy_list,
        }
    return history
示例#10
0
def train(net, train_iter, valid_iter, device, num_epochs):
    train_loss, valid_loss = [], []
    train_batch_num = 0
    best_valid_acc = 0.

    for epoch in range(num_epochs):
        total_loss = 0.
        correct = 0
        sample_num = 0
        net.train()
        for context in train_iter:
            train_batch_num += 1
            data = context.Text
            data = data.to(device).long()
            target = context.Label
            target = target.to(device).long()
            output = net(data)
            optimizer.zero_grad()
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
            prediction = torch.argmax(output, 1)
            #print(prediction)
            correct += (prediction == target).sum().item()
            sample_num += len(prediction)
        valid_acc, v_loss = evaluate_accuracy(valid_iter, net)
        valid_loss.append(v_loss)
        loss = total_loss / train_batch_num
        train_loss.append(loss)
        acc = correct / sample_num
        if valid_acc > best_valid_acc:
            best_valid_acc = valid_acc
            torch.save(net.state_dict(), "./best.pth")
        print(
            'epoch %d, train loss %.4f, train acc %.3f, valid loss %.4f, valid acc %.3f'
            % (epoch + 1, loss, acc, v_loss, valid_acc))
    return train_loss, valid_loss
示例#11
0
def train_eval(model, train_loader, val_loader, num_epochs, opt, criterion,
               device):
    train_loss_list = []
    iteration_list = []
    train_accuracy_list = []
    val_loss_list = []
    val_accuracy_list = []
    print("Beginning to Train")
    for epoch in range(num_epochs):
        total_loss = 0
        total_correct = 0
        total_data = 0
        loss = 0

        model.train()
        for i, (data, labels) in enumerate(train_loader):

            model = model.to(device)
            data = data.to(device)
            labels = labels.to(device)

            output = model(data)
            loss = criterion(output, labels)

            #  compute gradients, do parameter update and compute loss
            opt.zero_grad()
            loss.backward()
            opt.step()

            total_data += labels.size(0)
            total_loss += loss.item()
            _, p = torch.max(output.data, dim=1)
            total_correct += (p == labels).sum().item()

        model.eval()
        val_total_loss = 0
        val_total_correct = 0
        val_total_data = 0
        val_loss = 0
        for data, labels in val_loader:
            model = model.to(device)
            data = data.to(device)
            labels = labels.to(device)

            output = model(data)
            val_loss = criterion(output, labels)

            val_total_data += labels.size(0)
            val_total_loss += val_loss.item()
            _, p = torch.max(output.data, dim=1)
            val_total_correct += (p == labels).sum().item()

        print(
            "Training: epoch: [{}/{}] Loss: [{:.2f}] Accuracy [{:.2f}] Eval: Loss: [{:.2f}] Accuracy[{:.2f}]"
            .format(epoch + 1, num_epochs, total_loss / len(train_loader),
                    total_correct * 100 / total_data,
                    val_total_loss / len(val_loader),
                    val_total_correct * 100 / val_total_data))

        train_loss_list.append(total_loss / len(train_loader))
        iteration_list.append(epoch)
        train_accuracy_list.append(total_correct * 100 / total_data)
        val_loss_list.append(val_total_loss / len(val_loader))
        val_accuracy_list.append(val_total_correct * 100 / val_total_data)

        history = {
            'train_loss': train_loss_list,
            'train_acc': train_accuracy_list,
            'val_loss': val_loss_list,
            'val_acc': val_accuracy_list
        }
    return history
print("Starting training")
j = 0
time_point = []
time_string = []
val_acc_list = []
train_acc_list = []
max_acc = 0
for epoch in range(num_epochs):
    #linear annealing of learning rate at every 4th epoch
    if epoch % 3 == 2:
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=learning_rate * 0.5)
    if args.model != 'RNN':
        for i, (data, lengths, labels) in enumerate(train_loader):
            model.train()
            data_batch, length_batch, label_batch = data.to(
                device), lengths.to(device), labels.to(device)
            optimizer.zero_grad()
            #for k in label_batch:
            #    print(k.type())
            outputs = model(data_batch, length_batch).to(device)
            loss = criterion(outputs, label_batch)
            loss.backward()
            optimizer.step()
            # validate every 1000 iterations
            if i > 0 and i % 100 == 0:
                # validate
                #                import pdb; pdb.set_trace()

                time_point.append(j)
                j += 1
                val_acc = test_model(val_loader, model)