def train(epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data), Variable(target) optimizer.zero_grad() output = model(data) loss = F.mse_loss(output, data) loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) if epoch % args.save_image_epoch: utils.save_image(data.data, 'origin_pictures.png', normalize=True, scale_each=True) utils.save_image(output.data, 'reconstruct_pictures.png', normalize=True, scale_each=True) if epoch % args.save_model_epoch: torch.save(model.state_dict(), 'model.pth')
def train(epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data), Variable(target) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.data[0]))
def train(epoch): model.train() correct = 0 correct_batch = 0 for batch_idx, batch in enumerate(train_loader): data = batch['image'] target = batch['label'].view(-1) if args.cuda: data = data.cuda() target = target.cuda() data, target = Variable(data).float(), Variable(target).long() optimizer.zero_grad() output = model(data) _, pred = torch.max(output.data, 1) correct_batch += pred.eq(target.data).sum() correct += pred.eq(target.data).sum() # loss = F.nll_loss(output, target) loss = criterion(output, target) loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print( 'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t Accuracy: {}/{}' .format(epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.data[0], correct_batch, len(pred) * args.log_interval)) correct_batch = 0 print('\nTrain set: Accuracy: {}/{} ({:.0f}%)'.format( correct, len(train_loader.dataset), 100. * correct / len(train_loader.dataset))) if epoch % args.save_model_epoch == 0: if args.aug == 0: torch.save( model.state_dict(), args.model + '_bs' + str(args.batch_size) + 'e' + str(epoch) + '.pth') else: torch.save( model.state_dict(), args.model + '_bs' + str(args.batch_size) + 'e' + str(epoch) + '_aug' + '.pth') return correct, 100 * correct / len(train_loader.dataset)
def train_sw(epoch): train_loss = 0 model.train() for batch_idx, (data, target) in enumerate(train_loader_sw): scheduler_sw.batch_step() data, target = data.cuda(), target.cuda() data, target = Variable(data), Variable(target).float() optimizer.zero_grad() output = model(data) loss = F.mse_loss(output, target, size_average=False) loss.backward() optimizer.step() train_loss += loss.data[0] if epoch % args.save_model_epoch == 0: torch.save( model.state_dict(), 'results/' + 'weights_' + args.activation + '_l' + str(args.layers) + '_u' + str(args.units) + '.pth') if epoch % 1 == 0: print('Epoch {} \n \nTrain set: Average Loss: {:.4f}'.format( epoch, train_loss / len(train_loader_sw.dataset))) return train_loss / len(train_loader_sw.dataset)