Example #1
0
class TrainingTestCase(unittest.TestCase):
    """ Test cases to see if we can build a model,
    train it, evaluate, quantize it and prune it"""
    def setUp(self):
        dataset, classes = prepare_cifar10()
        self.models_path = "./models"
        self.trainer = SimpleTrainer(pruning=True,
                                     datasets=dataset,
                                     models_path=self.models_path)
        experiment = ax.Experiment(
            name="model_test",
            search_space=search_space(),
        )
        sobol = ax.Models.SOBOL(search_space=experiment.search_space)
        self.param = sobol.gen(1).arms[0].parameters
        self.net = Net(self.param, classes=classes, input_shape=(3, 32, 32))

        if not os.path.exists(self.models_path):
            os.mkdir(self.models_path)

    def tearDown(self):
        shutil.rmtree(self.models_path)
        shutil.rmtree("./data")

    def test_training(self):
        self.trainer.load_dataloaders(batch_size=4, collate_fn=None)
        self.trainer.train(self.net,
                           self.param,
                           name="0",
                           epochs=1,
                           reload=False,
                           old_net=None)
        self.trainer.evaluate(self.net.to('cpu'), quant_mode=False)
Example #2
0
if __name__ == '__main__':
    # dataiter = iter(trainloader)
    # images, labels = dataiter.next()
    # imshow(torchvision.utils.make_grid(images, padding=2))
    # print(' '.join(classes[labels[j]] for j in range(4)))

    bn = False
    net = Net(bn)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    if torch.cuda.device_count() > 1:
        net = nn.DataParallel(net)

    net.to(device)

    # training the network
    for epoch in range(2):
        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()
            # forward
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            # backward
            loss.backward()
            #optimize
            optimizer.step()