def train_fine_tuning(net, optimizer, batch_size=128, num_epochs=5):
    train_iter = DataLoader(ImageFolder(os.path.join(dataset_dir, 'train'), transform=train_augs),
                            batch_size, shuffle=True)
    test_iter = DataLoader(ImageFolder(os.path.join(dataset_dir, 'test'), transform=test_augs),
                           batch_size)
    loss = torch.nn.CrossEntropyLoss()
    d2l.train(train_iter, test_iter, net, loss, optimizer, device, num_epochs)
Esempio n. 2
0
def train_fine_tuning(net, optimizer, batch_size=20, num_epochs=5):
    print("start to load data")
    train_iter = DataLoader(ImageFolder(os.path.join(data_dir, 'hotdog/train'),
                                        transform=train_augs),
                            batch_size,
                            shuffle=True)
    test_iter = DataLoader(
        ImageFolder(os.path.join(data_dir, 'hotdog/test'),
                    transform=test_augs), batch_size)
    print("finish loading")
    loss = torch.nn.CrossEntropyLoss()
    print("start to train")
    d2l.train(train_iter, test_iter, net, loss, optimizer, device, num_epochs)
Esempio n. 3
0
def train_fine_tuning(net, optimizer, batch_size=128, num_epochs=5):

    # 创建两个ImageFolder实例来分别读取训练数据集和测试数据集中的所有图像文件
    train_iter = DataLoader(ImageFolder(os.path.join(data_dir, 'hotdog/train'),
                                        transform=train_augs),
                            batch_size,
                            shuffle=True)

    test_iter = DataLoader(
        ImageFolder(os.path.join(data_dir, 'hotdog/test'),
                    transform=test_augs), batch_size)

    loss = nn.CrossEntropyLoss()
    d2l.train(train_iter, test_iter, net, loss, optimizer, device, num_epochs)
Esempio n. 4
0
def train_with_data_aug(train_augs, test_augs, lr=0.001):
    batch_size, net = 128, d2l.resnet18(10)  # 使用 resnet-18
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    loss = torch.nn.CrossEntropyLoss()  # 交叉熵损失用于分类
    # 分别读取训练集和测试集
    train_iter = load_cifar10(True, train_augs, batch_size)
    test_iter = load_cifar10(False, test_augs, batch_size)
    d2l.train(train_iter,
              test_iter,
              net,
              loss,
              optimizer,
              device,
              num_epochs=10)
Esempio n. 5
0
def train_with_data_aug(train_augs, test_augs, lr=0.001):

    # 数据
    batch_size = 256
    train_iter = load_cifar10(True, train_augs, batch_size)  # 训练数据的迭代器
    test_iter = load_cifar10(False, test_augs, batch_size)  # 预测数据的迭代器

    net = d2l.resnet18(10)  # 模型,输出10种分类概率

    num_epochs = 10  # 参数
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)  # 优化器
    loss = torch.nn.CrossEntropyLoss()  # 损失函数

    d2l.train(train_iter, test_iter, net, loss, optimizer, device,
              num_epochs)  # 训练
Esempio n. 6
0
def train_and_pred(train_features, test_features, train_labels, test_data,
                   num_epochs, lr, weight_decay, batch_size):
    # 适配网络
    net = get_net(train_features.shape[1])
    # 进行训练,这里是使用了前面猜测好的超参数进行的
    train_ls, _ = train(net, train_features, train_labels, None, None,
                        num_epochs, lr, weight_decay, batch_size)
    # 绘图和输出
    plot.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'rmse')
    print('train rmse %f' % train_ls[-1])
    # detach用于得到当前计算图的变量
    preds = net(test_features).detach().numpy()
    # 保存到提交文档中
    test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
    submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)
    submission.to_csv('./Datasets/KaggleHouse/submission.csv', index=False)
Esempio n. 7
0
def k_fold(k, X_train, y_train, num_epochs, learning_rate, weight_decay,
           batch_size):
    train_l_sum, valid_l_sum = 0, 0
    # 对于每一种折法
    for i in range(k):
        # 得到k折的验证集和训练集
        data = get_k_fold_data(k, i, X_train, y_train)
        # 用X的特征数来生成一个对应大小的网络
        net = get_net(X_train.shape[1])
        # 将数据应用到上面的训练函数中,*data自动将四个返回值展开输入到参数中
        train_ls, valid_ls = train(net, *data, num_epochs, learning_rate,
                                   weight_decay, batch_size)
        # -1指取出最后一格元素
        train_l_sum += train_ls[-1]
        valid_l_sum += valid_ls[-1]
        # 绘图第一折的误差曲线,并输出每一折的训练结果
        if i == 0:
            plot.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'rmse',
                          range(1, num_epochs + 1), valid_ls,
                          ['train', 'valid'])
        print('fold %d, train rmse %f, valid rmse %f' %
              (i, train_ls[-1], valid_ls[-1]))
    # 返回误差总和的平均值,valid部分可以较好地衡量泛化误差
    return train_l_sum / k, valid_l_sum / k
# 参数设置
embed_size, num_hiddens, num_layers = 50, 100, 2
net = BiRNN(vocab, embed_size, num_hiddens, num_layers)

# 由于情感分类的训练数据集并不是很大,为应对过拟合
# 我们将直接使用在更大规模语料上预训练的词向量作为每个词的特征向量
# 预训练词向量的维度需要与创建的模型中的嵌入层输出大小embed_size一致
cache_dir = os.path.join(DATA_ROOT, 'glove')
glove_vocab = Vocab.pretrained_aliases["glove.6B.50d"](cache=cache_dir)

# 将预训练的的词向量copy到模型参数上
net.embedding.weight.data.copy_(
    d2l.load_pretrained_embedding(vocab.itos, glove_vocab))
# 词向量脱离梯度求导
net.embedding.weight.requires_grad = False

# 训练参数
lr, num_epochs = 0.01, 3
# 词向量脱离梯度求导
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    net.parameters()),
                             lr=lr)
loss = nn.CrossEntropyLoss()
d2l.train(train_iter, test_iter, net, loss, optimizer, device, num_epochs)

# 测试
print(d2l.predict_sentiment(
    net, vocab, ['this', 'movie', 'is', 'so', 'great']))  # positive
print(d2l.predict_sentiment(net, vocab,
                            ['this', 'movie', 'is', 'so', 'bad']))  # negative