def test(self, data, label): ''' Test the model with unnormalized ndarray test dataset. @param data [list or ndarray, (batch, input_dim)] @param label [list or ndarray, (batch, output_dim)] ''' pred = self.predict(data) pred = CUDA(torch.tensor(pred).long()) labels = CUDA(torch.tensor(label).long()) acc = pred.eq(labels.view_as(pred)).mean().item() return acc
def fit(self, x=None, y=None, use_data_buf=True, normalize=True): ''' Train the model either from external data or internal data buf. @param x [list or ndarray, (batch, input_dim)] @param y [list or ndarray, (batch, output_dim)] ''' if use_data_buf: x, y = self.data_buf.get_all() train_loader, test_loader = self.make_dataloader( x, y, normalize=normalize) else: # use external data loader train_loader, test_loader = self.make_dataloader( x, y, normalize=normalize) for epoch in range(self.n_epochs): loss_train, acc_train = 0, 0 self.model.train() for datas, labels in train_loader: datas = CUDA(datas) labels = CUDA(labels) self.optimizer.zero_grad() outputs = self.model(datas) labels = torch.squeeze(labels) loss = self.criterion(outputs, labels) loss.backward() self.optimizer.step() loss_train += loss.item() * datas.shape[0] # sum of the loss pred = outputs.argmax(dim=1, keepdim=True) # get the index acc_train += pred.eq(labels.view_as(pred)).sum().item() if self.save and (epoch + 1) % self.save_freq == 0: self.save_model(self.save_path) if (epoch + 1) % self.test_freq == 0: loss_train /= len(train_loader.dataset) acc_train /= len(train_loader.dataset) loss_test, acc_test = -0.1234, -0.12 if len(test_loader) > 0: loss_test, acc_test = self.test_model(test_loader) print( f"epoch[{epoch}/{self.n_epochs}],train l|acc: {loss_train:.4f}|{100.*acc_train:.2f}%, test l|acc {loss_test:.4f}|{100.*acc_test:.2f}%" ) else: print( f"epoch[{epoch}/{self.n_epochs}],train l|acc: {loss_train:.4f}|{100.*acc_train:.2f}%" ) if self.save: self.save_model(self.save_path)
def test_model(self, testloader): ''' Test the model with normalized test dataset. @param test_loader [torch.utils.data.DataLoader] ''' self.model.eval() loss_test, acc_test = 0, 0 for datas, labels in testloader: datas = CUDA(datas) labels = CUDA(labels) outputs = self.model(datas) labels = torch.squeeze(labels) loss = self.criterion(outputs, labels) loss_test += loss.item() * datas.shape[0] pred = outputs.argmax( dim=1, keepdim=True) # get the index of the max log-probability acc_test += pred.eq(labels.view_as(pred)).sum().item() loss_test /= len(testloader.dataset) acc_test /= len(testloader.dataset) self.model.train() return loss_test, acc_test