def get_score(im_path, mask_path, threshold=0.5): use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if use_cuda else "cpu") # load image im = img_as_float(np.array(Image.open(im_path).convert( 'L'))) # this can be a grayscale or binary prediction im = im > threshold # this is binary for sure mask = img_as_float( Image.open(mask_path).convert('L')) # this should be a binary image mask = mask > 0.5 # this is binary for sure # load model model = get_arch('resnet18') load_checkpoint = 'experiments/best_mse/' model, _ = load_model(model, load_checkpoint, device=device, with_opt=False) model.eval() im_prep = prepare_single_image(im, mask) score = torch.sigmoid(model(im_prep.to(device))).item() return score
bs = args.batch_size csv_test_od = args.csv_test_od csv_test_mac = args.csv_test_mac n_classes = args.n_classes tta = args.tta csv_out = args.csv_out #################################################################################################################### # build results for od-centered with OD model print('* Instantiating model {}, pretrained={}'.format( model_name, pretrained)) model, mean, std = get_arch(model_name, pretrained=pretrained, n_classes=n_classes) model, stats = load_model(model, load_path_od, device='cpu') model = model.to(device) print("Total params: {0:,}".format( sum(p.numel() for p in model.parameters() if p.requires_grad))) print('* Creating Dataloaders, batch size = {:d}'.format(bs)) test_loader = get_test_loader(csv_path_test=csv_test_od, batch_size=bs, mean=mean, std=std) if tta: probs_od, preds_od, labels = test_cls_tta_dihedral(model, test_loader, n=3) else: probs_od, preds_od, labels = test_cls(model, test_loader)
json.dump(vars(args), f, indent=2) else: experiment_path = None print('* Instantiating model {}, pretrained={}'.format( model_name, pretrained)) model, mean, std = get_arch(model_name, pretrained=pretrained, n_classes=n_classes) print("Total params: {0:,}".format( sum(p.numel() for p in model.parameters() if p.requires_grad))) if load_checkpoint != 'no': print('* Loading weights from previous checkpoint={}'.format( load_checkpoint)) model, stats, optimizer_state_dict = load_model(model, load_checkpoint, device='cpu', with_opt=True) model = model.to(device) print('* Creating Dataloaders, batch size = {:d}'.format(bs)) train_loader, val_loader = get_train_val_loaders(csv_path_train=csv_train, csv_path_val=csv_val, batch_size=bs, mean=mean, std=std) if optimizer_choice == 'sgd': optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9) elif optimizer_choice == 'adam': optimizer = torch.optim.Adam(model.parameters(), lr=lr) else:
path_test_preds = args.path_test_preds checkpoint_folder = args.checkpoint_folder if csv_test is not None: print( 'Training with pseudo-labels, completing training set with predictions on test set' ) from utils.get_loaders import build_pseudo_dataset tr_im_list, tr_gt_list, tr_mask_list = build_pseudo_dataset( csv_train, csv_test, path_test_preds) train_loader.dataset.im_list = tr_im_list train_loader.dataset.gt_list = tr_gt_list train_loader.dataset.mask_list = tr_mask_list print('* Loading weights from previous checkpoint={}'.format( checkpoint_folder)) model, stats, optimizer_state_dict = load_model(model, checkpoint_folder, device=device, with_opt=True) optimizer.load_state_dict(optimizer_state_dict) for i, param_group in enumerate(optimizer.param_groups): param_group['lr'] = max_lr param_group['initial_lr'] = max_lr scheduler = CosineAnnealingLR(optimizer, T_max=cycle_lens[0] * len(train_loader) // (grad_acc_steps + 1), eta_min=min_lr) setattr(optimizer, 'max_lr', max_lr) # store it inside the optimizer for accessing to it later setattr(scheduler, 'cycle_lens', cycle_lens) criterion = torch.nn.BCEWithLogitsLoss(
if public: data_path = osp.join('data', dataset) else: data_path = osp.join('private_data', dataset) csv_path = 'test_all.csv' print('* Reading test data from ' + osp.join(data_path, csv_path)) test_dataset = get_test_dataset(data_path, csv_path=csv_path, tg_size=tg_size) print('* Instantiating model = ' + str(model_name)) model = get_arch(model_name, in_c=in_c).to(device) if model_name == 'wnet': model.mode = 'eval' print('* Loading trained weights from ' + experiment_path) try: model, stats = load_model(model, experiment_path, device) except RuntimeError: sys.exit( '---- bad config specification (check layers, n_classes, etc.) ---- ' ) model.eval() save_results_path = osp.join(args.result_path, dataset, experiment_path) print('* Saving predictions to ' + save_results_path) times = [] for i in tqdm(range(len(test_dataset))): im_tens, mask, coords_crop, original_sz, im_name = test_dataset[i] start_time = time.perf_counter() full_pred = create_pred(model, im_tens, mask,
pretrained = args.pretrained bs = args.batch_size csv_test_od = args.csv_test_od csv_test_mac = args.csv_test_mac n_classes = args.n_classes tta = args.tta csv_out = args.csv_out #################################################################################################################### # build results for od-centered with OD models #################################################################################################################### # FOLD 1 print('* Instantiating model {}, pretrained={}, trained on OD_fold 1'.format(model_name, pretrained)) model, mean, std = get_arch(model_name, pretrained=pretrained, n_classes=n_classes) model, stats = load_model(model, load_path_od_f1, device='cpu') model = model.to(device) print("Total params: {0:,}".format(sum(p.numel() for p in model.parameters() if p.requires_grad))) print('* Creating Dataloaders, batch size = {:d}'.format(bs)) test_loader = get_test_loader(csv_path_test=csv_test_od, batch_size=bs, mean=mean, std=std) probs_od_f1, preds_od_f1, labels = test_cls_tta_dihedral(model, test_loader, n=TTA_N) # FOLD 2 print('* Instantiating model {}, pretrained={}, trained on OD_fold 2'.format(model_name, pretrained)) model, mean, std = get_arch(model_name, pretrained=pretrained, n_classes=n_classes) model, stats = load_model(model, load_path_od_f2, device='cpu') model = model.to(device) print("Total params: {0:,}".format(sum(p.numel() for p in model.parameters() if p.requires_grad))) print('* Creating Dataloaders, batch size = {:d}'.format(bs)) test_loader = get_test_loader(csv_path_test=csv_test_od, batch_size=bs, mean=mean, std=std) probs_od_f2, preds_od_f2, labels = test_cls_tta_dihedral(model, test_loader, n=TTA_N) # FOLD 3
bs = args.batch_size csv_test_q = args.csv_test_q n_classes = args.n_classes tta = args.tta csv_out = args.csv_out #################################################################################################################### # build results for MT model n_classes = 18 print('* Instantiating MT model {}, pretrained={}'.format( model_name_MT, pretrained)) model, mean, std = get_arch(model_name_MT, pretrained=pretrained, n_classes=n_classes) model, stats = load_model(model, load_path_MT, device='cpu') model = model.to(device) print("Total params: {0:,}".format( sum(p.numel() for p in model.parameters() if p.requires_grad))) print('* Creating Dataloaders, batch size = {:d}'.format(bs)) test_loader = get_test_loader(csv_path_test=csv_test_q_MT, batch_size=bs, mean=mean, std=std, qualities=True) probs_tta_q, preds_tta_q, probs_tta_a, preds_tta_a, probs_tta_c, preds_tta_c, probs_tta_f, preds_tta_f \ = test_cls_tta_dihedral_MT(model, test_loader, n=3) #################################################################################################################### # build results for QUALITY model
bs = args.batch_size csv_test_od = args.csv_test_od csv_test_mac = args.csv_test_mac n_classes = args.n_classes tta = args.tta csv_out = args.csv_out #################################################################################################################### # build results for od-centered with OD_k model print('* Instantiating model {}, pretrained={}'.format( model_name, pretrained)) model, mean, std = get_arch(model_name, pretrained=pretrained, n_classes=n_classes) model, stats = load_model(model, load_path_od_k, device='cpu') model = model.to(device) print("Total params: {0:,}".format( sum(p.numel() for p in model.parameters() if p.requires_grad))) print('* Creating Dataloaders, batch size = {:d}'.format(bs)) test_loader = get_test_loader(csv_path_test=csv_test_od, batch_size=bs, mean=mean, std=std) if tta: probs_od_k, preds_od_k, labels = test_cls_tta_dihedral(model, test_loader, n=4) else: probs_od_k, preds_od_k, labels = test_cls(model, test_loader)
bs = args.batch_size csv_test_q = args.csv_test_q n_classes = args.n_classes tta = args.tta csv_out = args.csv_out #################################################################################################################### # build results for MT model n_classes = 18 print('* Instantiating MT model {}, pretrained={}'.format( model_name_MT, pretrained)) model, mean, std = get_arch(model_name_MT, pretrained=pretrained, n_classes=n_classes) model, stats = load_model(model, load_path_MT, device='cpu') model = model.to(device) print("Total params: {0:,}".format( sum(p.numel() for p in model.parameters() if p.requires_grad))) print('* Creating Dataloaders, batch size = {:d}'.format(bs)) test_loader = get_test_loader(csv_path_test=csv_test_q, batch_size=bs, mean=mean, std=std, qualities=True) probs_tta_q, preds_tta_q, probs_tta_a, preds_tta_a, probs_tta_c, preds_tta_c, probs_tta_f, preds_tta_f \ = test_cls_tta_dihedral_MT(model, test_loader, n=3) df_quality = pd.DataFrame(zip(list(test_loader.dataset.im_list), preds_tta_q),
else: mask = Image.open(mask_path).convert('L') mask = np.array(mask).astype(bool) img, coords_crop = crop_to_fov(img, mask) original_sz = img.size[1], img.size[0] # in numpy convention rsz = p_tr.Resize(tg_size) tnsr = p_tr.ToTensor() tr = p_tr.Compose([rsz, tnsr]) im_tens = tr(img) # only transform image print('* Instantiating model = ' + str(model_name)) model = get_arch(model_name, n_classes=4).to(device) if model_name == 'big_wnet': model.mode='eval' print('* Loading trained weights from ' + model_path) model, stats = load_model(model, model_path, device) model.eval() print('* Saving prediction to ' + im_path_out) start_time = time.perf_counter() full_pred, full_pred_bin = create_pred(model, im_tens, mask, coords_crop, original_sz, tta=tta) with warnings.catch_warnings(): warnings.simplefilter("ignore") imsave(im_path_out, img_as_ubyte(full_pred)) imsave(im_path_out_bin, img_as_ubyte(full_pred_bin)) print('Done, time spent = {:.3f} secs'.format(time.perf_counter() - start_time))
'L') # this is a grayscale prediction im1 = np.array(im1) > THRESHOLD im2 = Image.open('better.gif').convert( 'L') # this is a grayscale prediction im2 = np.array(im2) > THRESHOLD im3 = Image.open('perfect.gif').convert('L') # this is a binary image mask = Image.open('01_mask.gif') # this is a binary image # load model model = get_arch('resnet18') load_checkpoint = 'experiments/best_mse/' model, _ = load_model(model, load_checkpoint, device=device, with_opt=False) # generate results t0 = time.time() im_prep = prepare_single_image(im1, mask) score1 = torch.sigmoid(model(im_prep.to(device))).item() print('Worst segmentation score = {:.3f} (inference time = {:.3f})'.format( score1, time.time() - t0)) t0 = time.time() im_prep = prepare_single_image(im2, mask) score2 = torch.sigmoid(model(im_prep.to(device))).item() print( 'Better segmentation score = {:.3f} (inference time = {:.3f})'.format(
load_path_f_f3 = 'experiments/best_f_f3' load_path_f_f4 = 'experiments/best_f_f4' load_path_c_f1 = 'experiments/best_c_f1' load_path_c_f2 = 'experiments/best_c_f2' load_path_c_f3 = 'experiments/best_c_f3' load_path_c_f4 = 'experiments/best_c_f4' # #################################################################################################################### # # build results for MT model n_classes = 18 # F1 print('* Instantiating MT model {}, pretrained={}, fold 1/MT'.format(get_model_name(load_path_MT_f1), pretrained)) model, mean, std = get_arch(get_model_name(load_path_MT_f1), pretrained=pretrained, n_classes=n_classes) model, stats = load_model(model, load_path_MT_f1, device='cpu') model = model.to(device) print("Total params: {0:,}".format(sum(p.numel() for p in model.parameters() if p.requires_grad))) print('* Creating Dataloaders, batch size = {:d}'.format(bs)) test_loader = get_test_loader(csv_path_test=csv_test_q_MT, batch_size=bs, mean=mean, std=std, qualities=True) probs_qMT_f1, preds_qMT_f1, probs_aMT_f1, preds_aMT_f1, probs_cMT_f1, preds_cMT_f1, probs_fMT_f1, preds_fMT_f1\ = test_cls_tta_dihedral_MT(model, test_loader, n=TTA_N) # F2 print('* Instantiating MT model {}, pretrained={}, fold 2/MT'.format(get_model_name(load_path_MT_f2), pretrained)) model, mean, std = get_arch(get_model_name(load_path_MT_f2), pretrained=pretrained, n_classes=n_classes) model, stats = load_model(model, load_path_MT_f2, device='cpu') model = model.to(device) print("Total params: {0:,}".format(sum(p.numel() for p in model.parameters() if p.requires_grad))) print('* Creating Dataloaders, batch size = {:d}'.format(bs)) test_loader = get_test_loader(csv_path_test=csv_test_q_MT, batch_size=bs, mean=mean, std=std, qualities=True) probs_qMT_f2, preds_qMT_f2, probs_aMT_f2, preds_aMT_f2, probs_cMT_f2, preds_cMT_f2, probs_fMT_f2, preds_fMT_f2\