Beispiel #1
0
def train():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    batch_size = 16
    # 如出现“out of memory”的报错信息,可减小batch_size或resize
    train_iter, test_iter = d2l.load_data_fashion_mnist(
        batch_size, root='Data/softmax/FashionMNIST2065')
    lr, num_epochs = 0.001, 5
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    d2l.train_ch5(net, train_iter, test_iter, batch_size, optimizer, device,
                  num_epochs)
Beispiel #2
0
def main():
    # 5.5.1 模型
    # x = torch.rand(1, 1, 28, 28)
    # print(x.shape)
    net = LeNet()
    # print(net(x))

    # 5.5.2 获取数据和训练模型
    batch_size = 256
    train_iter, test_iter = d2l.load_data_from_fashion_mnist(batch_size)
    lr, num_epochs = 0.001, 20
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    d2l.train_ch5(net, train_iter, test_iter, batch_size, optimizer, device,
                  num_epochs)
Beispiel #3
0
def main():
    # 5.6.2 模型
    net = AlexNet()
    print(net)

    # 5.6.3 读取数据
    batch_size = 128
    train_iter, test_iter = d2l.load_data_from_fashion_mnist(batch_size,
                                                             resize=224)

    # 5.6.4 训练
    lr, num_epochs = 0.001, 5
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    d2l.train_ch5(net, train_iter, test_iter, batch_size, optimizer, device,
                  num_epochs)
Beispiel #4
0
    mnist_test = torchvision.datasets.FashionMNIST(root=root, train=False, download=True, transform=transform)

    train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=4)
    test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=4)

    return train_iter, test_iter

batch_size = 128
# 如出现“out of memory”的报错信息,可减小batch_size或resize
train_iter, test_iter = load_data_fashion_mnist(batch_size, resize=224)




lr, num_epochs = 0.001, 5
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
d2l.train_ch5(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs)













Beispiel #5
0
        # output_size 为输出的步长,即未来几个时刻的数据
        self.fc = nn.Linear(args.hidden_size, args.output_size)

    def forward(self, x):
        # fixme
        # x = x.permute(0, 2, 1) # 转置
        x = self.conv(
            x)  # input_shape=(width, height, channel) channel即是通道数,数列的个数
        x = x.permute(0, 2, 1)
        x, _ = self.lstm(x)
        x = self.fc(x)

        x = x[:, -1, :]
        return x


if __name__ == '__main__':
    lr, dropout, num_epochs = 0.001, 0.75, 100
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    loss = torch.nn.MSELoss()

    train_iter, test_iter = loan_data()
    net = CNN_LSTM(Arg(1, 18, 3, 4, 1, dropout))
    optimizer = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=1e-8)
    d2l.train_ch5(net, train_iter, test_iter, 0, optimizer, device, num_epochs,
                  loss)
    # net.args={"in_channels":7,"out_channels":6,"hidden_size":3,"num_layers":5}
    # net.args.in_channels=7
    # print(net.args.num_layers)