def evaluate(net, testloader, criterion='', model_name='im2height'): l1 = nn.L1Loss() mse = nn.MSELoss() use_gpu = torch.cuda.is_available() device = 'cuda:0' if use_gpu else 'cpu' if use_gpu: print('Using CUDA') # net.cuda() data_size = 500 since = time.time() running_loss = 0.0 running_l1 = 0.0 running_mse = 0.0 running_ssim = 0.0 net.eval() i = 1 for image, mask in tqdm(testloader): image = image.to(device) mask = mask.to(device) with torch.set_grad_enabled(False): output = net(image) _output, _image, _mask = output, image, mask ssim_value = ssim(output, mask) mask, output = get_nonzero_value(mask, output) if mask.size(0) == 0: del image, mask, output torch.cuda.empty_cache() continue loss = criterion(output, mask) l1_value = l1(output, mask) mse_value = mse(output, mask) save_fig(_image, _mask, _output, i) i += 1 running_loss += loss.item() * image.size(0) running_ssim += ssim_value.item() * image.size(0) running_l1 += l1_value.item() * image.size(0) running_mse += mse_value.item() * image.size(0) del image, mask, output torch.cuda.empty_cache() epoch_loss = running_loss / i epoch_ssim = running_ssim / i epoch_l1 = running_l1 / i epoch_mse = running_mse / i print('{} -> Loss: {:.4f} SSIM: {:.4f} L1: {:.4f} MSE: {:.4f}'.format( 'Evaluate', epoch_loss, epoch_ssim, epoch_l1, epoch_mse)) print('\ttime', time.time() - since)
def train(net, dataloader, criterion=None, optimizer=None, num_epochs=100, model_name='im2height', learning_rate=1e-4, comment='comment'): ssim_criterion = SSIM() use_gpu = torch.cuda.is_available() device = 'cuda:0' if use_gpu else 'cpu' if use_gpu: print('Using CUDA') # net.cuda() train_size = 2376 valid_size = 370 since = time.time() now = date.today().strftime('%d-%m-%Y_') + datetime.now().strftime( '%H:%M:%S') train_writer = SummaryWriter(log_dir='logs-tensorboard/%s/train' % now, comment='-' + comment) val_writer = SummaryWriter(log_dir='logs-tensorboard/%s/val' % now, comment='-' + comment) ssim_writer = SummaryWriter(log_dir='logs-tensorboard/%s/ssim' % now, comment='-' + comment) es = EarlyStopping(mode='max', patience=5) best_ssim = 0.0 scheduler = MultiStepLR(optimizer, milestones=[10], gamma=0.1) for epoch in range(num_epochs): start = time.time() print("Epoch {}/{}".format(epoch, num_epochs)) print('-' * 10) for phase in ['train', 'val']: if phase == 'train': net.train() else: net.eval() running_loss = 0.0 running_ssim = 0.0 i = 0 for image, mask in tqdm(dataloader[phase]): image = image.to(device) mask = mask.to(device) optimizer.zero_grad() with torch.set_grad_enabled(phase == 'train'): output = net(image) # loss = criterion(output, mask) mask.requires_grad = False loss = 1 - ssim_criterion(output, mask) ssim_value = ssim(output, mask) if phase == 'train': loss.backward() optimizer.step() ssim_writer.add_scalar('loss %s' % str(epoch), loss.item(), i) ssim_writer.add_scalar('ssim %s' % str(epoch), ssim_value.item(), i) i += 1 running_loss += loss.item() * image.size(0) running_ssim += ssim_value.item() * image.size(0) del image, mask, output torch.cuda.empty_cache() if phase == 'train': scheduler.step() data_size = train_size if phase == 'train' else valid_size epoch_loss = running_loss / data_size epoch_ssim = running_ssim / data_size print('{} -> Loss: {:.7f} SSIM: {:.7f}'.format( phase, epoch_loss, epoch_ssim)) print('\ttime', time.time() - start) if phase == 'train': train_writer.add_scalar('L1Loss', epoch_loss, epoch) train_writer.add_scalar('SSIM', epoch_ssim, epoch) if phase == 'val': val_writer.add_scalar('L1Loss', epoch_loss, epoch) val_writer.add_scalar('SSIM', epoch_ssim, epoch) # ssim = epoch_ssim if es.step(epoch_ssim): time_elapsed = time.time() - since print('Early Stopping') print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val ssim: {:7f}'.format(best_ssim)) return if epoch_ssim > best_ssim: best_ssim = epoch_ssim print('Update best loss: {:7f}'.format(best_ssim)) torch.save(net.state_dict(), '{}.pt'.format(model_name))
print('\ttime', time.time() - start) if phase == 'train': train_writer.add_scalar('L1Loss', epoch_loss, epoch) train_writer.add_scalar('SSIM', epoch_ssim, epoch) if phase == 'val': val_writer.add_scalar('L1Loss', epoch_loss, epoch) val_writer.add_scalar('SSIM', epoch_ssim, epoch) # ssim = epoch_ssim if es.step(epoch_ssim): time_elapsed = time.time() - since print('Early Stopping') print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val ssim: {:7f}'.format(best_ssim)) return if epoch_ssim > best_ssim: best_ssim = epoch_ssim print('Update best loss: {:7f}'.format(best_ssim)) torch.save(net.state_dict(), '{}.pt'.format(model_name)) if __name__ == '__main__': i1 = torch.rand((1, 1, 256, 256)) i2 = torch.rand((1, 1, 256, 256)) loss = ssim(i1, i1) print(loss.item())
def train(net, dataloader, num_epochs=100, model_name='im2height', learning_rate=1e-4): logger = Logger('im2hi') use_gpu = torch.cuda.is_available() device = 'cuda:0' if use_gpu else 'cpu' if use_gpu: print('Using CUDA') net.cuda() train_size = len(dataloader['train']) valid_size = len(dataloader['val']) since = time.time() train_writer = SummaryWriter(log_dir='logs-tensorboard/train') val_writer = SummaryWriter(log_dir='logs-tensorboard/val') es = EarlyStopping(mode='max', patience=10) criterion = nn.L1Loss() optimizer = optim.Adam(net.parameters(), lr=learning_rate) best_ssim = 0.0 for epoch in range(num_epochs): start = time.time() print("Epoch {}/{}".format(epoch, num_epochs)) print('-' * 10) for phase in ['train', 'val']: if phase == 'train': net.train() else: net.eval() running_loss = 0.0 running_ssim = 0.0 for image, mask in tqdm(dataloader[phase]): image = image.to(device) mask = mask.to(device) optimizer.zero_grad() with torch.set_grad_enabled(phase == 'train'): output = net(image) loss = criterion(output, mask) ssim_value = ssim(output, mask) if phase == 'train': loss.backward() optimizer.step() running_loss += loss.item() * image.size(0) running_ssim += ssim_value.item() del image, mask, output torch.cuda.empty_cache() data_size = train_size if phase == 'train' else valid_size epoch_loss = running_loss / data_size epoch_ssim = running_ssim / data_size print('{} -> Loss: {:.4f} SSIM: {:.4f}'.format( phase, epoch_loss, epoch_ssim)) print('\ttime', time.time() - start) if phase == 'train': train_writer.add_scalar('L1Loss', epoch_loss, epoch) train_writer.add_scalar('SSIM', epoch_ssim, epoch) if phase == 'val': val_writer.add_scalar('L1Loss', epoch_loss, epoch) val_writer.add_scalar('SSIM', epoch_ssim, epoch) # ssim = epoch_ssim if es.step(epoch_ssim): time_elapsed = time.time() - since print('Early Stopping') print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val ssim: {:4f}'.format(best_ssim)) return if epoch_ssim > best_ssim: best_ssim = epoch_ssim print('Update best loss: {:4f}'.format(best_ssim)) torch.save(net.state_dict(), '{}.pt'.format(model_name))