def train_epoch( epoch, acc_top1, net, train_data, use_cuda, L, optimizer, # lr_scheduler, batch_size, log_interval): tic = time.time() net.train() acc_top1.reset() train_loss = 0.0 btic = time.time() for i, (data, target) in enumerate(train_data): if use_cuda: data = data.cuda(non_blocking=True) target = target.cuda(non_blocking=True) output = net(data) loss = L(output, target) optimizer.zero_grad() loss.backward() optimizer.step() train_loss += loss.item() prec1 = accuracy(output, target, topk=(1, )) acc_top1.update(prec1[0], data.size(0)) if log_interval and not (i + 1) % log_interval: top1 = acc_top1.avg.item() err_top1_train = 1.0 - top1 speed = batch_size * log_interval / (time.time() - btic) logging.info( 'Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\ttop1-err={:.4f}\tlr={:.4f}' .format(epoch + 1, i, speed, err_top1_train, optimizer.param_groups[0]['lr'])) btic = time.time() top1 = acc_top1.avg.item() err_top1_train = 1.0 - top1 train_loss /= (i + 1) throughput = int(batch_size * (i + 1) / (time.time() - tic)) logging.info('[Epoch {}] training: err-top1={:.4f}\tloss={:.4f}'.format( epoch + 1, err_top1_train, train_loss)) logging.info( '[Epoch {}] speed: {:.2f} samples/sec\ttime cost: {:.2f} sec'.format( epoch + 1, throughput, time.time() - tic)) return err_top1_train, train_loss
def evaluate(segmentation_module, loader, cfg, gpu, activations, num_class, patch_size, patch_size_padded, class_names, channels, index_test, visualize, results_dir, arch_encoder): acc_meter = AverageMeter() intersection_meter = AverageMeter() union_meter = AverageMeter() acc_meter_patch = AverageMeter() intersection_meter_patch = AverageMeter() union_meter_patch = AverageMeter() time_meter = AverageMeter() # initiate confusion matrix conf_matrix = np.zeros((num_class, num_class)) conf_matrix_patch = np.zeros((num_class, num_class)) # turn on for initialise for umap area_activations_mean = np.zeros((len(index_test), 32 // 4 * 32 // 4)) area_activations_max = np.zeros((len(index_test), 32 // 4 * 32 // 4)) area_cl = np.zeros((len(index_test), ), dtype=np.int) area_loc = np.zeros((len(index_test), 3), dtype=np.int) j = 0 segmentation_module.eval() pbar = tqdm(total=len(loader)) for batch_data in loader: # process data batch_data = batch_data[0] seg_label = as_numpy(batch_data['seg_label'][0]) img_resized_list = batch_data['img_data'] torch.cuda.synchronize() tic = time.perf_counter() with torch.no_grad(): segSize = (seg_label.shape[0], seg_label.shape[1]) scores = torch.zeros(1, num_class, segSize[0], segSize[1]) scores = async_copy_to(scores, gpu) for img in img_resized_list: feed_dict = batch_data.copy() feed_dict['img_data'] = img del feed_dict['img_ori'] del feed_dict['info'] feed_dict = async_copy_to(feed_dict, gpu) # forward pass scores_tmp = segmentation_module(feed_dict, segSize=segSize) scores = scores + scores_tmp _, pred = torch.max(scores, dim=1) pred = as_numpy(pred.squeeze(0).cpu()) torch.cuda.synchronize() time_meter.update(time.perf_counter() - tic) # calculate accuracy acc, pix = accuracy(pred, seg_label) acc_patch, pix_patch = accuracy( pred[patch_size:2 * patch_size, patch_size:2 * patch_size], seg_label[patch_size:2 * patch_size, patch_size:2 * patch_size]) intersection, union = intersectionAndUnion(pred, seg_label, num_class) intersection_patch, union_patch = intersectionAndUnion( pred[patch_size:2 * patch_size, patch_size:2 * patch_size], seg_label[patch_size:2 * patch_size, patch_size:2 * patch_size], num_class) acc_meter.update(acc, pix) intersection_meter.update(intersection) union_meter.update(union) acc_meter_patch.update(acc_patch, pix_patch) intersection_meter_patch.update(intersection_patch) union_meter_patch.update(union_patch) conf_matrix = updateConfusionMatrix(conf_matrix, pred, seg_label) # update conf matrix patch conf_matrix_patch = updateConfusionMatrix( conf_matrix_patch, pred[patch_size:2 * patch_size, patch_size:2 * patch_size], seg_label[patch_size:2 * patch_size, patch_size:2 * patch_size]) # visualization if visualize: info = batch_data['info'] img_name = info.split('/')[-1] #np.save(os.path.join(test_dir, 'result', img_name), pred) np.save(os.path.join(results_dir, img_name), pred) # ============================================================================= # if visualize: # visualize_result( # (batch_data['img_ori'], seg_label, batch_data['info']), # pred, # os.path.join(test_dir, 'result') # ) # ============================================================================= pbar.update(1) # turn on for UMAP row, col, cl = find_constant_area( seg_label, 32, patch_size_padded ) #TODO patch_size_padded must be patch_size if only inner patch is checked. if not (row == 999999): activ_mean = np.mean( as_numpy(activations.features.squeeze(0).cpu()), axis=0, keepdims=True)[:, row // 4:row // 4 + 8, col // 4:col // 4 + 8].reshape(1, 8 * 8) activ_max = np.max(as_numpy(activations.features.squeeze(0).cpu()), axis=0, keepdims=True)[:, row // 4:row // 4 + 8, col // 4:col // 4 + 8].reshape( 1, 8 * 8) area_activations_mean[j] = activ_mean area_activations_max[j] = activ_max area_cl[j] = cl area_loc[j, 0] = row area_loc[j, 1] = col area_loc[j, 2] = int(batch_data['info'].split('.')[0]) j += 1 else: area_activations_mean[j] = np.full((1, 64), np.nan, dtype=np.float32) area_activations_max[j] = np.full((1, 64), np.nan, dtype=np.float32) area_cl[j] = 999999 area_loc[j, 0] = row area_loc[j, 1] = col area_loc[j, 2] = int(batch_data['info'].split('.')[0]) j += 1 #activ = np.mean(as_numpy(activations.features.squeeze(0).cpu()),axis=0)[row//4:row//4+8, col//4:col//4+8] #activ = as_numpy(activations.features.squeeze(0).cpu()) # summary iou = intersection_meter.sum / (union_meter.sum + 1e-10) for i, _iou in enumerate(iou): print('class [{}], IoU: {:.4f}'.format(i, _iou)) iou_patch = intersection_meter_patch.sum / (union_meter_patch.sum + 1e-10) for i, _iou_patch in enumerate(iou_patch): print('class [{}], patch IoU: {:.4f}'.format(i, _iou_patch)) print('[Eval Summary]:') print( 'Mean IoU: {:.4f}, Accuracy: {:.2f}%, Inference Time: {:.4f}s'.format( iou.mean(), acc_meter.average() * 100, time_meter.average())) print( 'Patch: Mean IoU: {:.4f}, Accuracy: {:.2f}%, Inference Time: {:.4f}s'. format(iou_patch.mean(), acc_meter_patch.average() * 100, time_meter.average())) print('Confusion matrix:') plot_confusion_matrix(conf_matrix, class_names, normalize=True, title='confusion matrix patch+padding', cmap=plt.cm.Blues) plot_confusion_matrix(conf_matrix_patch, class_names, normalize=True, title='confusion matrix patch', cmap=plt.cm.Blues) np.save(os.path.join(results_dir, 'confmatrix.npy'), conf_matrix) np.save(os.path.join(results_dir, 'confmatrix_patch.npy'), conf_matrix_patch) # turn on for UMAP np.save(os.path.join(results_dir, 'activations_mean.npy'), area_activations_mean) np.save(os.path.join(results_dir, 'activations_max.npy'), area_activations_max) np.save(os.path.join(results_dir, 'activations_labels.npy'), area_cl) np.save(os.path.join(results_dir, 'activations_loc.npy'), area_loc) mcc = compute_mcc(conf_matrix) mcc_patch = compute_mcc(conf_matrix_patch) # save summary of results in csv summary = pd.DataFrame([[ arch_encoder, patch_size, channels, acc_meter.average(), acc_meter_patch.average(), iou.mean(), iou_patch.mean(), mcc, mcc_patch ]], columns=[ 'model', 'patch_size', 'channels', 'test_accuracy', 'test_accuracy_patch', 'meanIoU', 'meanIoU_patch', 'mcc', 'mcc_patch' ]) summary.to_csv(os.path.join(results_dir, 'summary_results.csv'))