Пример #1
0
 def test_model(self, epoch):
     # Test the Model
     self.net.eval()
     correct = 0
     total = 0
     with torch.no_grad():
         for i in range(int(len(self.x_test) / self.args.batch_size)):
             images, labels, _ = sample_minibatch_deterministically(
                 x=self.x_test,
                 y=self.y_test,
                 batch_i=i,
                 batch_size=self.args.batch_size)
             outputs = self.net(images)
             outputs = outputs[-1]
             _, predicted = torch.max(outputs.data, 1)
             total += labels.size(0)
             correct += (predicted.cpu() == labels).sum()
         perf = 100 * correct / total
     print(
         'Epoch %d: Accuracy of the network on the 10000 test images: %d %%'
         % (epoch, perf))
     logging.info(
         'Epoch %d: Accuracy of the network on the 10000 test images: %d %%'
         % (epoch, perf))
     return perf
Пример #2
0
    def train_model_unsupervised(self, x, y, num_epochs):
        print("Unsupervised Training")
        logging.info("Unsupervised Training")
        for epoch in range(num_epochs):
            x, y = shuffle(x=x, y=y)
            for i in range(int(len(x) / self.args.batch_size)):
                x_l, y_l, _ = sample_minibatch_deterministically(x, y, batch_i=i, batch_size=self.args.batch_size)

                labels_onehot = torch.zeros([y_l.size(0), self.num_classes])
                labels_onehot.scatter_(1, y_l.unsqueeze(1).long(), 1)
                out = x_l
                # Forward + Backward + Optimize
                for (optimizer, forward) in zip(self.net.optimizers, self.net.forwards):
                    if self.conditioned:
                        out = self.optimizer_module(optimizer, forward, out, labels_onehot)
                    else:
                        out = self.optimizer_module(optimizer, forward, out)

            if (epoch+1) % 1 == 0:
                perf = self.test_model(epoch+1)
                if perf > self.best_perf:
                    torch.save(self.net.state_dict(), self.model_name+'_model_best.pkl')
                    self.net.train()

        # Save the Model ans Stats
        pkl.dump(self.stats, open(self.model_name+'_stats.pkl', 'wb'))
        torch.save(self.net.state_dict(), self.model_name+'_model.pkl')
        if self.plot:
            plot(self.stats, name=self.model_name)
Пример #3
0
    def train_model_supervised(self, x, y, num_epochs):
        print("Supervised Training")
        logging.info("Supervised Training")
        for epoch in range(num_epochs):
            x, y = shuffle(x=x, y=y)
            # for i, (images, labels) in enumerate(self.train_loader):
            for i in range(int(len(x) / self.args.batch_size)):
                # Convert torch tensor to Variable

                x_l, y_l, _ = sample_minibatch_deterministically(x, y, batch_i=i, batch_size=self.args.batch_size)

                labels_onehot = torch.zeros([y_l.size(0), self.num_classes])
                labels_onehot.scatter_(1, y_l.long().unsqueeze(1), 1)
                # Forward + Backward + Optimize
                loss, grad_loss = self.optimize_grad_and_net(x_l, y_l.long(), labels_onehot,
                                          self.net.grad_optimizer, self.net.optimizer, self.net)

                if (i+1) % 10 == 0:
                    print ('Epoch [%d/%d], Step [%d/%d], Loss: %.6f, Grad Loss: %.8f'
                         %(epoch+1, self.num_epochs, i+1, len(x)//self.batch_size, loss.item(), grad_loss.item()))

                    logging.info('Epoch [%d/%d], Step [%d/%d], Loss: %.6f, Grad Loss: %.8f'
                         %(epoch+1, self.num_epochs, i+1, len(x)//self.batch_size, loss.item(), grad_loss.item()))

            if (epoch + 1) % 10 == 0:
                perf = self.test_model(epoch + 1)
                if perf > self.best_perf:
                    torch.save(self.net.state_dict(), self.model_name + '_model_best.pkl')
                    self.net.train()

        # Save the Model ans Stats
        pkl.dump(self.stats, open(self.model_name + '_stats.pkl', 'wb'))
        torch.save(self.net.state_dict(), self.model_name + '_model.pkl')
        if self.plot:
            plot(self.stats, name=self.model_name)
    def train_model_helper(self, x, y, is_supervised=True, weight=0.0):
        # can replace the two lines below with sample_minibatch
        x, y = shuffle(x=x, y=y)
        x_mb, y_mb, _ = sample_minibatch_deterministically(
            x, y, batch_i=1, batch_size=self.args.batch_size)

        labels_onehot = torch.zeros([y_mb.size(0), self.num_classes])
        labels_onehot.scatter_(1, y_mb.long().unsqueeze(1), 1)

        if is_supervised:
            loss, grad_loss = self.optimize_grad_and_net(
                x_mb,
                y_mb.long(),
                self.net.grad_optimizer,
                self.net.optimizer,
                self.net,
                is_supervised=is_supervised)
        else:
            loss, grad_loss = self.optimize_grad_and_net(
                x_mb,
                y_mb.long(),
                self.net.grad_optimizer,
                self.net.optimizer,
                self.net,
                is_supervised=is_supervised,
                weight=weight)

        return loss, grad_loss
Пример #5
0
    def train_model_supervised(self, x, y, num_epochs):
        for epoch in range(num_epochs):
            x, y = shuffle(x=x, y=y)
            for i in range(int(len(x) / self.args.batch_size)):

                x_l, y_l, _ = sample_minibatch_deterministically(
                    x, y, batch_i=i, batch_size=self.args.batch_size)

                labels_onehot = torch.zeros([y_l.size(0), self.num_classes])
                labels_onehot.scatter_(1, y_l.long().unsqueeze(1), 1)
                out = x_l

                for (optimizer, forward) in zip(self.net.optimizers,
                                                self.net.forwards):
                    if self.conditioned:
                        out = self.optimizer_module(optimizer, forward, out,
                                                    labels_onehot)
                    else:
                        out = self.optimizer_module(optimizer, forward, out)
                # synthetic model
                # Forward + Backward + Optimize
                loss, grad_loss = self.optimizer_dni_module(
                    x_l, y_l.long(), labels_onehot, self.net.grad_optimizer,
                    self.net.optimizer, self.net)

                if (i + 1) % 10 == 0:
                    print(
                        'Epoch [%d/%d], Step [%d/%d], Loss: %.6f, Grad Loss: %.8f'
                        % (epoch + 1, self.num_epochs, i + 1, len(x) //
                           self.batch_size, loss.item(), grad_loss.item()))

                    logging.info(
                        'Epoch [%d/%d], Step [%d/%d], Loss: %.6f, Grad Loss: %.8f'
                        % (epoch + 1, self.num_epochs, i + 1, len(x) //
                           self.batch_size, loss.item(), grad_loss.item()))

            if (epoch + 1) % 10 == 0:
                perf = self.test_model(epoch + 1)
                if perf > self.best_perf:
                    torch.save(self.net.state_dict(),
                               self.model_name + '_model_best.pkl')
                    self.net.train()

        # Save the Model ans Stats
        pkl.dump(self.stats, open(self.model_name + '_stats.pkl', 'wb'))
        torch.save(self.net.state_dict(), self.model_name + '_model.pkl')
        if self.plot:
            plot(self.stats, name=self.model_name)