コード例 #1
0
ファイル: hotdog_classify.py プロジェクト: shibing624/cvnet
def main():
    cwd = os.getcwd()
    print(cwd)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    pretrained_net = models.resnet18(pretrained=True)
    print(pretrained_net.fc)
    pretrained_net.fc = nn.Linear(512, 2)
    # 输出分类由imagenet的1000类,改为热狗数据集的2类
    print(pretrained_net.fc)

    # 分开设置lr,预训练部分模型的学习率0.01,fc部分的学习率0.1
    output_params = list(map(id, pretrained_net.fc.parameters()))
    feature_params = filter(lambda p: id(p) not in output_params,
                            pretrained_net.parameters())

    lr = 0.001
    optimizer = optim.SGD([{
        'params': feature_params
    }, {
        'params': pretrained_net.fc.parameters(),
        'lr': lr * 10
    }],
                          lr=lr,
                          weight_decay=0.001)
    batch_size = 128
    num_epochs = 5
    train_iter, test_iter = load_data_hotdog(batch_size)
    trainer.train(pretrained_net, train_iter, test_iter, batch_size, optimizer,
                  device, num_epochs)
コード例 #2
0
ファイル: hotdog_classify.py プロジェクト: shibing624/cvnet
def main_no_pretrained():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    scratch_net = models.resnet18(pretrained=False, num_classes=2)

    lr = 0.1
    optimizer = optim.SGD(scratch_net.parameters(), lr=lr, weight_decay=0.001)
    batch_size = 128
    num_epochs = 5
    train_iter, test_iter = load_data_hotdog(batch_size)
    trainer.train(scratch_net, train_iter, test_iter, batch_size, optimizer,
                  device, num_epochs)
コード例 #3
0
def main():
    cwd = os.getcwd()
    print(cwd)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(net)

    X = torch.rand((1, 1, 224, 224))
    print(X, X.shape)
    for name, blk in net.named_children():
        X = blk(X)
        print(name, 'output shape:', X.shape)

    batch_size = 256
    lr = 0.001
    num_epochs = 5
    # 在cifar10数据集上测试
    train_iter, test_iter = cifar.load_data_cifar10(batch_size=batch_size)
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    trainer.train(net, train_iter, test_iter, batch_size, optimizer, device,
                  num_epochs)
コード例 #4
0
    FlattenLayer(),
    nn.Linear(16 * 4 * 4, 120),
    nn.BatchNorm1d(120),
    nn.Sigmoid(),
    nn.Linear(120, 84),
    nn.BatchNorm1d(84),
    nn.Sigmoid(),
    nn.Linear(84, 10))

if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    net = LeNet()
    print(net)
    batch_size = 256
    train_iter, test_iter = fashion_mnist.load_data_fashion_mnist(
        batch_size=batch_size)

    lr = 0.001
    num_epochs = 5
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    trainer.train(net, train_iter, test_iter, batch_size, optimizer, device,
                  num_epochs)

    print("-" * 42)
    print(lenet_bn)
    optimizer = torch.optim.Adam(lenet_bn.parameters(), lr=lr)
    train_iter, test_iter = fashion_mnist.load_data_fashion_mnist(
        batch_size=batch_size)
    trainer.train(lenet_bn, train_iter, test_iter, batch_size, optimizer,
                  device, num_epochs)
コード例 #5
0
# 同ResNet一样,最后接上全局池化层和全连接层来输出。
net.add_module("BN", nn.BatchNorm2d(num_channels))
net.add_module("rele", nn.ReLU())
net.add_module(
    "global_avg_pool",
    GlobalAvgPool2d())  # GlobalAvgPool2d的输出(batch, num_channels, 1, 1)
net.add_module("fc", nn.Sequential(FlattenLayer(), nn.Linear(num_channels,
                                                             10)))  # 类别数10

if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(net)

    X = torch.rand((1, 1, 96, 96))
    for name, blk in net.named_children():
        X = blk(X)
        print(name, 'output shape:', X.shape)

    batch_size = 256
    # 如出现“out of memory”的报错信息,可减小batch_size或resize
    train_iter, test_iter = fashion_mnist.load_data_fashion_mnist(
        batch_size=batch_size, resize=96)

    lr = 0.001
    num_epochs = 5
    model_path = "densenet_mnist.pt"
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    trainer.train(net, train_iter, test_iter, batch_size, optimizer, device,
                  num_epochs, model_path)