コード例 #1
0
ファイル: DenseNet.py プロジェクト: clw5180/PyTorch_Practice
    def __init__(self):
        super(DenseNet, self).__init__()

        # 卷积层部分
        self.conv = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
            nn.BatchNorm2d(64), nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
        num_channels, growth_rate = 64, 32  # num_channels为当前的通道数
        num_convs_in_dense_blocks = [4, 4, 4, 4]

        for i, num_convs in enumerate(num_convs_in_dense_blocks):
            DB = DenseBlock(num_convs, num_channels, growth_rate)
            self.conv.add_module("DenseBlosk_%d" % i, DB)
            # 上一个稠密块的输出通道数
            num_channels = DB.out_channels
            # 在稠密块之间加入通道数减半的过渡层
            if i != len(num_convs_in_dense_blocks) - 1:
                self.conv.add_module(
                    "transition_block_%d" % i,
                    self.transition_block(num_channels, num_channels // 2))
                num_channels = num_channels // 2
        self.conv.add_module("BN", nn.BatchNorm2d(num_channels))
        self.conv.add_module("relu", nn.ReLU())

        self.fc = nn.Sequential(
            utils.GlobalAvgPool2d(), utils.FlattenLayer(),
            nn.Linear(num_channels,
                      10))  # GlobalAvgPool2d的输出: (Batch, num_channels, 1, 1)
コード例 #2
0
    def create_model(self):
        net = torch.nn.Sequential(
            utils.FlattenLayer(),
            torch.nn.Linear(self.input_num, self.hide_num),
            torch.nn.ReLU(),
            torch.nn.Linear(self.hide_num, self.output_num),
        )
        for param in net.parameters():
            torch.nn.init.normal_(param, mean=0., std=0.01)

        return net
コード例 #3
0
 def net(self):
     net=nn.Sequential(
         utils.FlattenLayer(),
         nn.Linear(self.n_input,self.n_hidden1),
         nn.ReLU(),
         nn.Dropout(self.drop_prob1),
         nn.Linear(self.n_hidden1,self.n_hidden2),
         nn.ReLU(),
         nn.Dropout(self.drop_prob2),
     )
     for param in net.parameters():
         nn.init.normal_(param,mean=0,std=0.01)
     return net
コード例 #4
0
def vgg(conv_arch, fc_features, fc_hidden_units=4096):
    net = nn.Sequential()
    # 卷积层部分
    for i, (num_convs, in_channels, out_channels) in enumerate(conv_arch):
        net.add_module("vgg_block_" + str(i + 1),
                       vgg_block(num_convs, in_channels, out_channels))
    # 全连接层部分
    net.add_module(
        "fc",
        nn.Sequential(utils.FlattenLayer(),
                      nn.Linear(fc_features, fc_hidden_units), nn.ReLU(),
                      nn.Dropout(0.5),
                      nn.Linear(fc_hidden_units, fc_hidden_units), nn.ReLU(),
                      nn.Dropout(0.5), nn.Linear(fc_hidden_units, 10)))
    return net
コード例 #5
0
    def __init__(self, conv_arch, fc_features, fc_hidden_units=4096):
        super(VGGNet, self).__init__()

        # 卷积层部分
        self.conv = nn.Sequential()
        for i, (num_convs, in_channels, out_channels) in enumerate(conv_arch):
            self.conv.add_module(
                "vgg_block_" + str(i + 1),
                self.vgg_block(num_convs, in_channels, out_channels))

        # 全连接层部分
        self.fc = nn.Sequential(utils.FlattenLayer(),
                                nn.Linear(fc_features, fc_hidden_units),
                                nn.ReLU(), nn.Dropout(0.5),
                                nn.Linear(fc_hidden_units, fc_hidden_units),
                                nn.ReLU(), nn.Dropout(0.5),
                                nn.Linear(fc_hidden_units, 10))
コード例 #6
0
    def __init__(self, ):
        super(ResNet, self).__init__()

        # 卷积层部分
        self.conv = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
            nn.BatchNorm2d(64), nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
        self.conv.add_module("resnet_block1",
                             self.resnet_block(64, 64, 2, first_block=True))
        self.conv.add_module("resnet_block2", self.resnet_block(64, 128, 2))
        self.conv.add_module("resnet_block3", self.resnet_block(128, 256, 2))
        self.conv.add_module("resnet_block4", self.resnet_block(256, 512, 2))

        # 全连接层部分
        self.fc = nn.Sequential(
            utils.GlobalAvgPool2d(), utils.FlattenLayer(),
            nn.Linear(512, 10))  # GlobalAvgPool2d的输出: (Batch, 512, 1, 1)
コード例 #7
0
    net.add_module("DenseBlosk_%d" % i, DB)
    # 上一个稠密块的输出通道数
    num_channels = DB.out_channels
    # 在稠密块之间加入通道数减半的过渡层
    if i != len(num_convs_in_dense_blocks) - 1:
        net.add_module("transition_block_%d" % i,
                       transition_block(num_channels, num_channels // 2))
        num_channels = num_channels // 2

net.add_module("BN", nn.BatchNorm2d(num_channels))  # 248
net.add_module("relu", nn.ReLU())
net.add_module(
    "global_avg_pool",
    d2l.GlobalAvgPool2d())  # GlobalAvgPool2d的输出: (Batch, num_channels, 1, 1)
net.add_module("fc",
               nn.Sequential(d2l.FlattenLayer(), nn.Linear(num_channels, 10)))

X = torch.rand((1, 1, 96, 96))
for name, layer in net.named_children():
    X = layer(X)
    print(name, ' output shape:\t', X.shape)


def load_data_fashion_mnist(batch_size,
                            resize=None,
                            root='input/FashionMNIST2065'):
    """Download the fashion mnist dataset and then load into memory."""
    trans = []
    if resize:
        trans.append(torchvision.transforms.Resize(size=resize))
    trans.append(torchvision.transforms.ToTensor())
コード例 #8
0
import torchvision
import torch.utils.data as Data
import torchvision.transforms as transforms
import torch.nn as nn
from torch.nn import init
import numpy as np
import utils

print('获取和读取数据')
batch_size = 256
train_iter, test_iter = utils.load_data_fashion_mnist(batch_size)

print('定义模型')
num_inputs, num_outputs, num_hiddens = 784, 10, 256

net = nn.Sequential(utils.FlattenLayer(), nn.Linear(num_inputs, num_hiddens),
                    nn.ReLU(), nn.Linear(num_hiddens, num_outputs))

for params in net.parameters():
    init.normal_(params, mean=0, std=0.01)

print('训练模型')
loss = torch.nn.CrossEntropyLoss()

optimizer = torch.optim.SGD(net.parameters(), lr=0.5)

num_epochs = 10
utils.train_softmax(net, train_iter, test_iter, loss, num_epochs, batch_size,
                    None, None, optimizer)
'''
训练模型
コード例 #9
0
import torch
from torch import nn
from torch.nn import init
import numpy as np
import sys
import utils as d2l

print(torch.__version__)

num_inputs, num_outputs, num_hiddens = 784, 10, 256

net = nn.Sequential(
    d2l.FlattenLayer(),
    nn.Linear(num_inputs, num_hiddens),
    nn.ReLU(),
    nn.Linear(num_hiddens, num_outputs),
)

for params in net.parameters():
    init.normal_(params, mean=0, std=0.01)

batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(
    batch_size, root='input/FashionMNIST2065')
loss = torch.nn.CrossEntropyLoss()

optimizer = torch.optim.SGD(net.parameters(), lr=0.5)

num_epochs = 5
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None,
              None, optimizer)
コード例 #10
0
from torch import nn
from torch.nn import init
import numpy as np
import sys
sys.path.append("..")
import utils as utils

'''
借助pytorch实现多层感知机
'''

# 定义模型
dim_inputs, dim_outputs, dim_hiddens = 784, 10, 256

net = nn.Sequential(
        utils.FlattenLayer(),
        nn.Linear(dim_inputs, dim_hiddens),
        nn.ReLU(),
        nn.Linear(dim_hiddens, dim_outputs)
    )

# 读取数据, 训练模型
batch_size = 256
train_iter, test_iter = utils.load_data_fashion_mnist(batch_size)
loss = torch.nn.CrossEntropyLoss()

optimizer = torch.optim.SGD(net.parameters(), lr=0.5) # 使用optim自带SGD()函数

num_epochs = 5
utils.train_ch3(net, train_iter, test_iter, loss, num_epochs,
    batch_size, None, None, optimizer)
コード例 #11
0
b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),
                   Inception(256, 128, (128, 192), (32, 96), 64),
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))

b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),
                   Inception(512, 160, (112, 224), (24, 64), 64),
                   Inception(512, 128, (128, 256), (24, 64), 64),
                   Inception(512, 112, (144, 288), (32, 64), 64),
                   Inception(528, 256, (160, 320), (32, 128), 128),
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))

b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),
                   Inception(832, 384, (192, 384), (48, 128), 128),
                   d2l.GlobalAvgPool2d())

net = nn.Sequential(b1, b2, b3, b4, b5, d2l.FlattenLayer(),
                    nn.Linear(1024, 10))

net = nn.Sequential(b1, b2, b3, b4, b5, d2l.FlattenLayer(),
                    nn.Linear(1024, 10))

X = torch.rand(1, 1, 96, 96)

for blk in net.children():
    X = blk(X)
    print('output shape: ', X.shape)

batch_size = 16
# 如出现“out of memory”的报错信息,可减小batch_size或resize
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)
コード例 #12
0
        nn.Conv2d(64, 192, kernel_size=3, padding=1),
        nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),
        Inception(256, 128, (128, 192), (32, 96), 64),
        nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),
        Inception(512, 160, (112, 224), (24, 64), 64),
        Inception(512, 128, (128, 256), (24, 64), 64),
        Inception(512, 112, (144, 288), (32, 64), 64),
        Inception(528, 256, (160, 320), (32, 128), 128),
        nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),
        Inception(832, 384, (192, 384), (48, 128), 128),
        utils.GlobalAvgPool2d())

net = nn.Sequential(b1, b2, b3, b4, b5,
        utils.FlattenLayer(), nn.Linear(1024, 10))
X = torch.rand(1, 1, 96, 96)
for blk in net.children():
    X = blk(X)
    print('output shape: ', X.shape)

# 获取数据
batch_size = 128
train_iter, test_iter = utils.load_data_fashion_mnist(batch_size, resize=96) # 调整尺寸为96

# 训练模型
lr, num_epochs = 0.001, 5
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
utils.train_ch5(net, train_iter, test_iter, batch_size, optimizer,
    device, num_epochs)
コード例 #13
0
'''
使用pytorch自带函数实现softmax回归
'''

# 获取数据
batch_size = 256
train_iter, test_iter = utils.load_data_fashion_mnist(batch_size)

# 定义模型
dim_inputs = 28 * 28 * 1
dim_outputs = 10

from collections import OrderedDict
net = nn.Sequential(
        OrderedDict([
            ('flatten', utils.FlattenLayer()),
            ('linear', nn.Linear(dim_inputs, dim_outputs))
        ])
)

# 初始化参数
init.normal_(net.linear.weight, mean=0, std=0.01)
init.constant_(net.linear.bias, val=0)

# 交叉熵损失函数
loss = nn.CrossEntropyLoss()

# 优化算法
optimizer = torch.optim.SGD(net.parameters(), lr=0.1)

# 训练
コード例 #14
0
ファイル: 5.8-NIN.py プロジェクト: ygtxr1997/DiveIntoPytorch
def nin_block(in_channels, out_channels, kernel_size, stride, padding):
    blk = nn.Sequential(
        nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
        nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1),
        nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1),
        nn.ReLU())
    return blk


net = nn.Sequential(nin_block(1, 96, kernel_size=11, stride=4, padding=0),
                    nn.MaxPool2d(kernel_size=3, stride=2),
                    nin_block(96, 256, kernel_size=5, stride=1, padding=2),
                    nn.MaxPool2d(kernel_size=3, stride=2),
                    nin_block(256, 384, kernel_size=3, stride=1, padding=1),
                    nn.MaxPool2d(kernel_size=3, stride=2), nn.Dropout(0.5),
                    nin_block(384, 10, kernel_size=3, stride=1, padding=1),
                    utils.GlobalAvgPool2d(), utils.FlattenLayer())

X = torch.rand(1, 1, 224, 224)
for name, blk in net.named_children():
    X = blk(X)
    print(name, 'output shape: ', X.shape)

# 训练模型
batch_size = 128
train_iter, test_iter = utils.load_data_fashion_mnist(batch_size, resize=224)

lr, num_epochs = 0.002, 5
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
utils.train_ch5(net, train_iter, test_iter, batch_size, optimizer, device,
                num_epochs)
コード例 #15
0
import time
import torch
from torch import nn, optim
import torch.nn.functional as F
import sys
sys.path.append("..")
import utils
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

net = nn.Sequential(nn.Conv2d(1, 6, 5), nn.BatchNorm2d(6), nn.Sigmoid(),
                    nn.MaxPool2d(2, 2), nn.Conv2d(6, 16, 5),
                    nn.BatchNorm2d(16), nn.Sigmoid(), nn.MaxPool2d(2, 2),
                    utils.FlattenLayer(), nn.Linear(16 * 4 * 4, 120),
                    nn.BatchNorm1d(120), nn.Sigmoid(), nn.Linear(120, 84),
                    nn.BatchNorm1d(84), nn.Sigmoid(), nn.Linear(84, 10))

batch_size = 256
train_iter, test_iter = utils.load_data_fashion_mnist(batch_size=batch_size)

lr, num_epochs = 0.001, 5
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
utils.train_ch5(net, train_iter, test_iter, batch_size, optimizer, device,
                num_epochs)
コード例 #16
0
        else:
            blk.append(utils.Residual(out_channels, out_channels))
    return nn.Sequential(*blk)


net = nn.Sequential(nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3),
                    nn.BatchNorm2d(64), nn.ReLU(),
                    nn.MaxPool2d(kernel_size=3, stride=2, padding=1))

net.add_module("resnet_block1", resnet_block(64, 64, 2, first_block=True))
net.add_module("resnet_block2", resnet_block(64, 128, 2))
net.add_module("resnet_block3", resnet_block(128, 256, 2))
net.add_module("resent_block4", resnet_block(256, 512, 2))

net.add_module("global_avg_pool", utils.GlobalAvgPool2d())
net.add_module("fc", nn.Sequential(utils.FlattenLayer(), nn.Linear(512, 10)))


# 训练模型
def train_with_data_aug(train_augs, test_augs, lr=0.001):
    batch_size = 256
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    loss = torch.nn.CrossEntropyLoss()
    train_iter = load_cifar10(True, train_augs, batch_size)
    test_iter = load_cifar10(False, test_augs, batch_size)
    utils.train(train_iter,
                test_iter,
                net,
                loss,
                optimizer,
                device,
コード例 #17
0
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))

b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),
                   Inception(512, 160, (112, 224), (24, 64), 64),
                   Inception(512, 128, (128, 256), (24, 64), 64),
                   Inception(512, 112, (144, 288), (32, 64), 64),
                   Inception(528, 256, (160, 320), (32, 128), 128),
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))

b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),
                   Inception(832, 384, (192, 384), (48, 128), 128),
                   utils.GlobalAvgPool2d())

print('查看网络结构')

net = nn.Sequential(b1, b2, b3, b4, b5, utils.FlattenLayer(),
                    nn.Linear(1024, 10))
X = torch.rand(1, 1, 96, 96)
for blk in net.children():
    X = blk(X)
    print('output shape: ', X.shape)

print('获取和读取数据,这里缩减尺寸为 96')
batch_size = 256
train_iter, test_iter = utils.load_data_fashion_mnist(batch_size, resize=96)

print('训练模型,只 1 轮')
lr, num_epochs = 0.002, 1
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
utils.train_cnn(net, train_iter, test_iter, batch_size, optimizer, device,
                num_epochs)
コード例 #18
0
"""
# 模型
net = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
                    nn.BatchNorm2d(64), nn.ReLU(),
                    nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
num_channels, growth_rate = 64, 32  # 当前通道数, 卷积层通道数
num_convs_in_dense_blocks = [4, 4, 4, 4]  # 每个稠密块使用4个卷积层

for i, num_convs in enumerate(num_convs_in_dense_blocks):
    DB = DenseBlock(num_convs, num_channels, growth_rate)  # 稠密块
    net.add_module("DenseBlock_%d" % i, DB)
    num_channels = DB.out_channels  # 下一个稠密块的输入通道数
    if i != len(num_convs_in_dense_blocks) - 1:  # 过渡层
        net.add_module("trainsition_block_%d" % i,
                       transition_block(num_channels, num_channels // 2))
        num_channels = num_channels // 2

net.add_module("BN", nn.BatchNorm2d(num_channels))
net.add_module("relu", nn.ReLU())
net.add_module("global_avg_pool", utils.GlobalAvgPool2d())
net.add_module(
    "fc", nn.Sequential(utils.FlattenLayer(), nn.Linear(num_channels, 10)))

# 获取数据, 训练模型
batch_size = 256
train_iter, test_iter = utils.load_data_fashion_mnist(batch_size, resize=96)

lr, num_epochs = 0.001, 5
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
utils.train_ch5(net, train_iter, test_iter, batch_size, optimizer, device,
                num_epochs)