def test_supervised_epoch(self, loader, loss_fn, epoch, writer=None): # make sure network is on the gpu and in training mode self.cuda() self.eval() # keep track of the average loss and metrics during the epoch loss_cum = 0.0 j_cum = 0.0 a_cum = 0.0 p_cum = 0.0 r_cum = 0.0 f_cum = 0.0 cnt = 0 # test loss for i, data in enumerate(loader): # get the inputs x, y = data[0].cuda(), data[1].cuda() # forward prop y_pred = self(x) # compute loss loss = loss_fn(y_pred, y) loss_cum += loss.data.cpu().numpy() cnt += 1 # compute other interesting metrics y_ = F.softmax(y_pred, dim=1).data.cpu().numpy()[:, 1] j_cum += jaccard(y_, y.cpu().numpy()) a, p, r, f = accuracy_metrics(y_, y.cpu().numpy()) a_cum += a p_cum += p r_cum += r f_cum += f # don't forget to compute the average and print it loss_avg = loss_cum / cnt j_avg = j_cum / cnt a_avg = a_cum / cnt p_avg = p_cum / cnt r_avg = r_cum / cnt f_avg = f_cum / cnt print('[%s] Epoch %5d - Average test loss: %.6f' % (datetime.datetime.now(), epoch, loss_avg)) # log everything if writer is not None: # always log scalars writer.add_scalar('test/loss-seg', loss_avg, epoch) writer.add_scalar('test/jaccard', j_avg, epoch) writer.add_scalar('test/accuracy', a_avg, epoch) writer.add_scalar('test/precision', p_avg, epoch) writer.add_scalar('test/recall', r_avg, epoch) writer.add_scalar('test/f-score', f_avg, epoch) return loss_avg
""" print('[%s] Starting segmentation' % (datetime.datetime.now())) segmentation = segment_pixels(test_data, net, args.input_size, batch_size=args.batch_size, crf_iterations=args.crf_iterations, mu=mu, std=std) """ Validate the segmentation """ print('[%s] Validating segmentation' % (datetime.datetime.now())) test_data_labels = read_tif(args.data_labels, dtype='uint8') test_data_labels = normalize(test_data_labels, 0, 255) j = jaccard(segmentation, test_data_labels) d = dice(segmentation, test_data_labels) a, p, r, f = accuracy_metrics(segmentation, test_data_labels) print('[%s] RESULTS:' % (datetime.datetime.now())) print('[%s] Jaccard: %f' % (datetime.datetime.now(), j)) print('[%s] Dice: %f' % (datetime.datetime.now(), d)) print('[%s] Accuracy: %f' % (datetime.datetime.now(), a)) print('[%s] Precision: %f' % (datetime.datetime.now(), p)) print('[%s] Recall: %f' % (datetime.datetime.now(), r)) print('[%s] F-score: %f' % (datetime.datetime.now(), f)) """ Write out the results """ if args.write_dir is not None: print('[%s] Writing the output' % (datetime.datetime.now())) imwrite3D(segmentation, args.write_dir, rescale=True)
gamma=args.gamma, epochs=args.epochs, test_freq=args.test_freq, print_stats=args.print_stats, log_dir=args.log_dir) """ Validate the trained network """ print('[%s] Validating the trained network' % (datetime.datetime.now())) test_data = test.data test_labels = test.labels segmentation_last_checkpoint = segment_pixels(test_data, net, args.input_size, batch_size=args.test_batch_size) j = jaccard(segmentation_last_checkpoint, test_labels) d = dice(segmentation_last_checkpoint, test_labels) a, p, r, f = accuracy_metrics(segmentation_last_checkpoint, test_labels) print('[%s] RESULTS:' % (datetime.datetime.now())) print('[%s] Jaccard: %f' % (datetime.datetime.now(), j)) print('[%s] Dice: %f' % (datetime.datetime.now(), d)) print('[%s] Accuracy: %f' % (datetime.datetime.now(), a)) print('[%s] Precision: %f' % (datetime.datetime.now(), p)) print('[%s] Recall: %f' % (datetime.datetime.now(), r)) print('[%s] F-score: %f' % (datetime.datetime.now(), f)) net = torch.load(os.path.join(args.log_dir, 'best_checkpoint.pytorch')) segmentation_best_checkpoint = segment_pixels(test_data, net, args.input_size, batch_size=args.test_batch_size) j = jaccard(segmentation_best_checkpoint, test_labels)
def test_epoch(self, loader, loss_fn, epoch, writer=None, write_images=False): # make sure network is on the gpu and in training mode self.cuda() self.eval() # keep track of the average loss and metrics during the epoch loss_cum = 0.0 j_cum = 0.0 a_cum = 0.0 p_cum = 0.0 r_cum = 0.0 f_cum = 0.0 cnt = 0 # test loss for i, data in enumerate(loader): # get the inputs x, y = data[0].cuda(), data[1].cuda() # forward prop y_pred = self(x) # compute loss loss = loss_fn(y_pred, y) loss_cum += loss.data.cpu().numpy() cnt += 1 # compute other interesting metrics y_ = F.softmax(y_pred, dim=1).data.cpu().numpy()[:, 1, ...] j_cum += jaccard(y_, y.cpu().numpy()) a, p, r, f = accuracy_metrics(y_, y.cpu().numpy()) a_cum += a p_cum += p r_cum += r f_cum += f # don't forget to compute the average and print it loss_avg = loss_cum / cnt j_avg = j_cum / cnt a_avg = a_cum / cnt p_avg = p_cum / cnt r_avg = r_cum / cnt f_avg = f_cum / cnt print('[%s] Epoch %5d - Average test loss: %.6f' % (datetime.datetime.now(), epoch, loss_avg)) # log everything if writer is not None: # always log scalars writer.add_scalar('train/loss-seg', loss_avg, epoch) writer.add_scalar('test/loss', loss_avg, epoch) writer.add_scalar('test/jaccard', j_avg, epoch) writer.add_scalar('test/accuracy', a_avg, epoch) writer.add_scalar('test/precision', p_avg, epoch) writer.add_scalar('test/recall', r_avg, epoch) writer.add_scalar('test/f-score', f_avg, epoch) if write_images: # write images x = x[:, :, x.size(2) // 2, ...] y = y[:, :, y.size(2) // 2, ...] y_pred = y_pred[:, :, y_pred.size(2) // 2, ...] x = vutils.make_grid(x, normalize=True, scale_each=True) y = vutils.make_grid(y, normalize=y.max() - y.min() > 0, scale_each=True) y_pred = vutils.make_grid( F.softmax(y_pred, dim=1)[:, 1:2, :, :].data, normalize=y_pred.max() - y_pred.min() > 0, scale_each=True) writer.add_image('test/x', x, epoch) writer.add_image('test/y', y, epoch) writer.add_image('test/y_pred', y_pred, epoch) return loss_avg