def train(self, epoch): self.model.train() train_loss = MovingAverageMeter() train_acc = AccuracyMeter() for i, (x, y) in enumerate(self.train_loader): x = Variable(x) y = Variable(y) if self.use_cuda: x = x.cuda() y = y.cuda() output = self.model(x) loss = F.cross_entropy(output, y) self.optimizer.zero_grad() loss.backward() self.optimizer.step() train_loss.update(float(loss.data)) y_pred = output.data.max(dim=1)[1] correct = int(y_pred.eq(y.data).cpu().sum()) train_acc.update(correct, x.size(0)) return train_loss.average, train_acc.accuracy
def train(self, epoch): self.model.train() train_loss = AverageMeter() train_acc = AccuracyMeter() for i, (x, y) in enumerate(self.train_loader): x = Variable(x) y = Variable(y) if self.use_cuda: x = x.cuda() y = y.cuda() output = self.model(x) loss = F.cross_entropy(output, y) self.optimizer.zero_grad() loss.backward() clip_grad_norm(self.optimizer, max_norm=1) #防止梯度爆炸 self.optimizer.step() train_loss.update(float(loss.data), x.size(0)) y_pred = output.data.max(dim=1)[1] #correct = int(y_pred.eq(y.data).cpu().sum()) _, correct, _ = get_accuracy(y.data, y_pred) train_acc.update(correct, x.size(0)) if i % 100 == 0: print( '\nTrain Epoch/batch| [{}/{}]: Average batch loss:{:.6f},acc: {:.6f}\n' .format(epoch, i, train_loss.average, train_acc.accuracy)) #save_model_checkpoint(self.model,epoch,self.save) return train_loss.average, train_acc.accuracy
def validate(self): self.model.eval() valid_loss = AverageMeter() valid_acc = AccuracyMeter() for i, (x, y) in enumerate(self.valid_loader): x = Variable(x, volatile=True) y = Variable(y).long() if self.use_cuda: x = x.cuda() y = y.cuda() output = self.model(x) loss = F.cross_entropy(output, y) valid_loss.update(float(loss.data)) y_pred = output.data.max(dim=1)[1] correct = int(y_pred.eq(y.data).cpu().sum()) valid_acc.update(correct, x.size(0)) print('\nTrain Epoch [{}]: Average batch loss: {:.6f}\n'.format(epoch,valid_acc.accuracy)) return valid_loss.average, valid_acc.accuracy