Пример #1
0
class Model():
    def __init__(self,
                 num_epochs=5,
                 num_classes=10,
                 batch_size=100,
                 learning_rate=0.001):

        self.num_epochs = num_epochs
        self.num_classes = num_classes
        self.batch_size = batch_size
        self.learning_rate = learning_rate

        self.model = ConvNet(num_classes)

        # Loss and optimizer
        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr=learning_rate)

    def train(self, train_loader):

        total_step = len(train_loader)

        for epoch in range(self.num_epochs):
            for i, (images, labels) in enumerate(train_loader):
                # Forward pass
                outputs = self.model(images)
                loss = self.criterion(outputs, labels)

                # Backward and optimize
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

                if (i + 1) % 100 == 0:
                    print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(
                        epoch + 1, self.num_epochs, i + 1, total_step,
                        loss.item()))

    def eval(self, test_loader):

        self.model.eval()

        with torch.no_grad():

            correct = 0
            total = 0

            for images, labels in test_loader:
                outputs = self.model(images)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

    def save(self):
        # Save the model checkpoint
        torch.save(self.model.state_dict(), 'model.ckpt')
Пример #2
0
quot = int(len(training_files) / num_images)
print('quot:', quot)

#Neural Network declarations

num_epochs = 2
num_classes = 2
batch_size = 64  #TO BE CHANGED
learning_rate = 0.001

#Neural Net framework

model = ConvNet()

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                       'min',
                                                       factor=0.9)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

for j in range(0, quot):
    input_tensor_training = []
    block_labels_training = []
    print('This is the ', j + 1, ' set of , ', num_images)
    if (j > 0):
        model = ConvNet()
        model.cuda(cuda0)
        optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
        checkpoint = torch.load('/home/sharan/model_1.pth')