def test_epoch(dataloader, network, loss, loggers): logger, vizlogger = loggers printer = Printer(N=len(dataloader)) logger.set_mode("test") mean_loss = 0. with torch.no_grad(): for iteration, data in enumerate(dataloader): input, target = data if torch.cuda.is_available(): input = input.cuda() target = target.cuda() output = network.forward(input) l = loss(output, target) stats = {"loss": l.data.cpu().numpy()} mean_loss += l.data.cpu().numpy() printer.print(stats, iteration) logger.log(stats, iteration) vizlogger.plot_steps(logger.get_data()) vizlogger.plot_images(target.cpu().detach().numpy(), output.cpu().detach().numpy()) print('Loss: %.4f' % (mean_loss / iteration))
def train_epoch(dataloader, network, optimizer, loss, loggers): logger, vizlogger = loggers printer = Printer(N=len(dataloader)) logger.set_mode("train") for iteration, data in enumerate(dataloader): optimizer.zero_grad() input, target = data if torch.cuda.is_available(): input = input.cuda() target = target.cuda() output = network.forward(input) l = loss(output, target) #print(l) stats = {"loss":l.data.cpu().numpy()} l.backward() optimizer.step() printer.print(stats, iteration) logger.log(stats, iteration) vizlogger.plot_steps(logger.get_data())