import torch import torch.nn as nn import torch.nn.parallel class Model(nn.Module): def __init__(self): super(Model, self).__init__() self.layer1 = nn.Linear(10, 5) self.layer2 = nn.Linear(5, 2) def forward(self, x): x = self.layer1(x) x = self.layer2(x) return x device = torch.device("cuda:0") model = Model().to(device) model = nn.DataParallel(model) inputs = torch.randn(100, 10).to(device) outputs = model(inputs)
torch.save(model.state_dict(), 'model.pth')This will save the internal state of the model, including the learnable parameters and buffers, to a file named 'model.pth'. Overall, these code examples utilize the PyTorch framework and its associated package libraries for parallel training of neural networks.