def train(self): tr_runnning_loss = 0. tr_runnning_correct = 0. num_data = 0 self.epoch += 1 n_iter = 1 for x, y in self.train_loader: if self.gpu: tr_batch_x = Variable(x.cuda()) tr_batch_y = Variable(y.cuda()) else: tr_batch_x = Variable(x) tr_batch_y = Variable(y) self.optimizer.zero_grad() outputs = self.model(tr_batch_x) num_data += outputs.data.shape[0] loss = self.criterion(outputs, tr_batch_y) corrects = num_of_correct(outputs, tr_batch_y) loss.backward() self.optimizer.step() n_iter += 1 tr_runnning_loss += loss.data[0] tr_runnning_correct += corrects tr_runnning_loss /= n_iter training_acc = tr_runnning_correct/num_data if self.epoch == 1: self.train_best_acc = training_acc self.train_best_loss = tr_runnning_loss if self.train_best_acc < training_acc: self.train_best_acc = training_acc if self.train_best_loss > tr_runnning_loss: self.train_best_loss = tr_runnning_loss # self.writer.add_scalar('training loss', tr_runnning_loss, self.epoch) # self.writer.add_scalar('training accuracy', training_acc, self.epoch) print('epoch:{}, tr_loss:{:0.4f}, tr_acc:{:0.4f}, '.format( self.epoch, tr_runnning_loss, training_acc), end='') # if self.epoch % self.val_num == 0: # self.val_loss, val_accuracy = evaluator(self.model, # self.criterion, self.val_loader) val_runnning_loss = 0. val_runnning_correct = 0. val_num_data = 0 self.model.eval() n_iter = 1 for x, y in self.val_loader: if self.gpu: batch_x = Variable(x.cuda(), volatile=True) batch_y = Variable(y.cuda()) else: batch_x = Variable(x, volatile=True) batch_y = Variable(y) outputs = self.model(batch_x) val_num_data += outputs.data.shape[0] val_loss = self.criterion(outputs, batch_y) corrects = num_of_correct(outputs, batch_y) n_iter += 1 val_runnning_loss += val_loss.data[0] val_runnning_correct += corrects val_runnning_loss /= n_iter val_acc = val_runnning_correct/val_num_data if self.epoch == 1: self.val_best_acc = val_acc self.val_best_loss = val_runnning_loss if self.val_best_acc < val_acc: self.val_best_acc = val_acc if self.val_best_loss > val_runnning_loss: self.val_best_loss = val_runnning_loss # self.writer.add_scalar('validation loss', val_runnning_loss, self.epoch) # self.writer.add_scalar('validation accuracy', val_acc, self.epoch) print('val_loss:{:0.4f}, val_acc:{:0.4f}'.format( val_runnning_loss, val_acc)) self.model.train()
trainer = Trainer(model, criterion, optimizer, train_loader, val_loader, val_num=1, early_stopping=2, writer=writer, gpu=True) trainer.run(epochs=epochs) model.eval() test_y = nn.functional.softmax( model(Variable(torch.from_numpy(test_)).float().cuda())) test_y.size() corrects = num_of_correct(test_y, Variable(torch.from_numpy(test_t).long()).cuda()) accuracy = corrects / test_y.size(0) print('accuracy:{}'.format(accuracy)) # loss, acc, pre_array = evaluator(model, criterion, test_loader, gpu=True) # print('Test Accuracy of the model on {} test data:{:0.4f}'.format( # test_.shape[0] , acc)) dt = 1 / 128 N = test_.shape[0] t = np.linspace(1, N, N) * dt - dt plt.plot(t, test_y.data.cpu().numpy()[:, 0]) plt.plot(t, test_t) plt.grid() plt.xlabel('time')