Exemple #1
0
    train_loader, val_loader = get_train_val_loaders(
        csv_path_train=csv_train,
        csv_path_val=csv_val,
        batch_size=bs,
        tg_size=tg_size,
        label_values=label_values,
        num_workers=args.num_workers)

    # grad_acc_steps: if I want to train with a fake_bs=K but the actual bs I want is bs=N, then you use
    # grad_acc_steps = N/K - 1.
    # Example: bs=4, fake_bs=4 -> grad_acc_steps = 0 (default)
    # Example: bs=4, fake_bs=2 -> grad_acc_steps = 1
    # Example: bs=4, fake_bs=1 -> grad_acc_steps = 3

    print('* Instantiating a {} model'.format(model_name))
    model = get_arch(model_name, n_classes=n_classes)
    model = model.to(device)

    print("Total params: {0:,}".format(
        sum(p.numel() for p in model.parameters() if p.requires_grad)))
    optimizer = torch.optim.Adam(model.parameters(), lr=max_lr)

    ### TRAINING WITH PSEUDO-LABELS
    csv_test = args.csv_test
    path_test_preds = args.path_test_preds
    checkpoint_folder = args.checkpoint_folder
    if csv_test is not None:
        print(
            'Training with pseudo-labels, completing training set with predictions on test set'
        )
        from utils.get_loaders import build_pseudo_dataset
Exemple #2
0
    load_path_od = args.load_path_od
    load_path_mac = args.load_path_mac
    pretrained = args.pretrained
    bs = args.batch_size
    csv_test_od = args.csv_test_od
    csv_test_mac = args.csv_test_mac
    n_classes = args.n_classes
    tta = args.tta
    csv_out = args.csv_out

    ####################################################################################################################
    # build results for od-centered with OD model
    print('* Instantiating model {}, pretrained={}'.format(
        model_name, pretrained))
    model, mean, std = get_arch(model_name,
                                pretrained=pretrained,
                                n_classes=n_classes)

    model, stats = load_model(model, load_path_od, device='cpu')
    model = model.to(device)
    print("Total params: {0:,}".format(
        sum(p.numel() for p in model.parameters() if p.requires_grad)))
    print('* Creating Dataloaders, batch size = {:d}'.format(bs))
    test_loader = get_test_loader(csv_path_test=csv_test_od,
                                  batch_size=bs,
                                  mean=mean,
                                  std=std)

    if tta:
        probs_od, preds_od, labels = test_cls_tta_dihedral(model,
                                                           test_loader,
Exemple #3
0
    load_path_clarity = args.load_path_clarity

    pretrained = args.pretrained
    bs = args.batch_size
    csv_test_q = args.csv_test_q
    n_classes = args.n_classes
    tta = args.tta
    csv_out = args.csv_out

    ####################################################################################################################
    # build results for MT model
    n_classes = 18
    print('* Instantiating MT model {}, pretrained={}'.format(
        model_name_MT, pretrained))
    model, mean, std = get_arch(model_name_MT,
                                pretrained=pretrained,
                                n_classes=n_classes)

    model, stats = load_model(model, load_path_MT, device='cpu')
    model = model.to(device)
    print("Total params: {0:,}".format(
        sum(p.numel() for p in model.parameters() if p.requires_grad)))
    print('* Creating Dataloaders, batch size = {:d}'.format(bs))
    test_loader = get_test_loader(csv_path_test=csv_test_q_MT,
                                  batch_size=bs,
                                  mean=mean,
                                  std=std,
                                  qualities=True)

    probs_tta_q, preds_tta_q, probs_tta_a, preds_tta_a, probs_tta_c, preds_tta_c, probs_tta_f, preds_tta_f \
        = test_cls_tta_dihedral_MT(model, test_loader, n=3)
        tg_size = (im_size[0], im_size[0])
    elif isinstance(im_size, tuple) and len(im_size) == 2:
        tg_size = (im_size[0], im_size[1])
    else:
        sys.exit('im_size should be a number or a tuple of two numbers')

    if public: data_path = osp.join('data', dataset)
    else: data_path = osp.join('private_data', dataset)

    csv_path = 'test_all.csv'
    print('* Reading test data from ' + osp.join(data_path, csv_path))
    test_dataset = get_test_dataset(data_path,
                                    csv_path=csv_path,
                                    tg_size=tg_size)
    print('* Instantiating model  = ' + str(model_name))
    model = get_arch(model_name, in_c=in_c).to(device)
    if model_name == 'wnet': model.mode = 'eval'

    print('* Loading trained weights from ' + experiment_path)
    try:
        model, stats = load_model(model, experiment_path, device)
    except RuntimeError:
        sys.exit(
            '---- bad config specification (check layers, n_classes, etc.) ---- '
        )
    model.eval()

    save_results_path = osp.join(args.result_path, dataset, experiment_path)
    print('* Saving predictions to ' + save_results_path)
    times = []
    for i in tqdm(range(len(test_dataset))):
        n_classes=1
        label_values = [0, 255]


    print(f"* Creating Dataloaders, batch size = {bs}, workers = {args.num_workers}")
    train_loader, val_loader = get_train_val_loaders(csv_path_train=csv_train, csv_path_val=csv_val, batch_size=bs, tg_size=tg_size, label_values=label_values, num_workers=args.num_workers)

    # grad_acc_steps: if I want to train with a fake_bs=K but the actual bs I want is bs=N, then you use
    # grad_acc_steps = N/K - 1.
    # Example: bs=4, fake_bs=4 -> grad_acc_steps = 0 (default)
    # Example: bs=4, fake_bs=2 -> grad_acc_steps = 1
    # Example: bs=4, fake_bs=1 -> grad_acc_steps = 3


    print('* Instantiating a {} model'.format(model_name))
    model = get_arch(model_name, n_classes=n_classes, compose='cat')
    model = model.to(device)

    print("Total params: {0:,}".format(sum(p.numel() for p in model.parameters() if p.requires_grad)))
    optimizer = torch.optim.Adam(model.parameters(), lr=max_lr)

    ### TRAINING WITH PSEUDO-LABELS
    csv_test = args.csv_test
    path_test_preds = args.path_test_preds
    checkpoint_folder = args.checkpoint_folder
    if csv_test is not None:
        print('Training with pseudo-labels, completing training set with predictions on test set')
        from utils.get_loaders import build_pseudo_dataset
        tr_im_list, tr_gt_list, tr_mask_list = build_pseudo_dataset(csv_train, csv_test, path_test_preds)
        train_loader.dataset.im_list = tr_im_list
        train_loader.dataset.gt_list = tr_gt_list
Exemple #6
0
    if isinstance(im_size, tuple) and len(im_size) == 1:
        tg_size = (im_size[0], im_size[0])
    elif isinstance(im_size, tuple) and len(im_size) == 2:
        tg_size = (im_size[0], im_size[1])
    else:
        sys.exit('im_size should be a number or a tuple of two numbers')

    data_path = osp.join('data', dataset)

    csv_path = 'test_all.csv'
    print('* Reading test data from ' + osp.join(data_path, csv_path))
    test_dataset = get_test_dataset(data_path,
                                    csv_path=csv_path,
                                    tg_size=tg_size)
    print('* Instantiating model  = ' + str(model_name))
    model = get_arch(model_name, n_classes=4).to(device)
    if 'wnet' in model_name: model.mode = 'eval'

    print('* Loading trained weights from ' + experiment_path)
    try:
        model, stats = load_model(model, experiment_path, device)
    except RuntimeError:
        sys.exit(
            '---- bad config specification (check layers, n_classes, etc.) ---- '
        )
    model.eval()

    save_results_path = osp.join(args.results_path, dataset, experiment_path)
    print('* Saving predictions to ' + save_results_path)
    for i in tqdm(range(len(test_dataset))):
        im_tens, mask, coords_crop, original_sz, im_name = test_dataset[i]
    train_loader, val_loader = get_train_val_loaders(
        csv_path_train=csv_train,
        csv_path_val=csv_val,
        batch_size=bs,
        tg_size=tg_size,
        label_values=label_values,
        num_workers=args.num_workers)

    # grad_acc_steps: if I want to train with a fake_bs=K but the actual bs I want is bs=N, then you use
    # grad_acc_steps = N/K - 1.
    # Example: bs=4, fake_bs=4 -> grad_acc_steps = 0 (default)
    # Example: bs=4, fake_bs=2 -> grad_acc_steps = 1
    # Example: bs=4, fake_bs=1 -> grad_acc_steps = 3

    print('* Instantiating a {} model'.format(model_name))
    model = get_arch(model_name, in_c=args.in_c, n_classes=n_classes)
    model = model.to(device)

    print("Total params: {0:,}".format(
        sum(p.numel() for p in model.parameters() if p.requires_grad)))
    optimizer = torch.optim.Adam(model.parameters(), lr=max_lr)

    ### TRAINING WITH PSEUDO-LABELS
    csv_test = args.csv_test
    path_test_preds = args.path_test_preds
    checkpoint_folder = args.checkpoint_folder
    if csv_test is not None:
        print(
            'Training with pseudo-labels, completing training set with predictions on test set'
        )
        from utils.get_loaders import build_pseudo_dataset
Exemple #8
0
        mask = get_fov(img)
        print('* FOV mask generated')
    else:
        mask = Image.open(mask_path).convert('L')
    mask = np.array(mask).astype(bool)

    img, coords_crop = crop_to_fov(img, mask)
    original_sz = img.size[1], img.size[0]  # in numpy convention

    rsz = p_tr.Resize(tg_size)
    tnsr = p_tr.ToTensor()
    tr = p_tr.Compose([rsz, tnsr])
    im_tens = tr(img)  # only transform image

    print('* Instantiating model  = ' + str(model_name))
    model = get_arch(model_name).to(device)
    if model_name == 'wnet': model.mode = 'eval'

    print('* Loading trained weights from ' + model_path)
    model, stats = load_model(model, model_path, device)
    model.eval()

    print('* Saving prediction to ' + im_path_out)
    start_time = time.perf_counter()
    full_pred, full_pred_bin = create_pred(model,
                                           im_tens,
                                           mask,
                                           coords_crop,
                                           original_sz,
                                           bin_thresh=bin_thresh,
                                           tta=tta)
    load_path_f_f2 = 'experiments/best_f_f2'
    load_path_f_f3 = 'experiments/best_f_f3'
    load_path_f_f4 = 'experiments/best_f_f4'

    load_path_c_f1 = 'experiments/best_c_f1'
    load_path_c_f2 = 'experiments/best_c_f2'
    load_path_c_f3 = 'experiments/best_c_f3'
    load_path_c_f4 = 'experiments/best_c_f4'


    # ####################################################################################################################
    # # build results for MT model
    n_classes = 18
    # F1
    print('* Instantiating MT model {}, pretrained={}, fold 1/MT'.format(get_model_name(load_path_MT_f1), pretrained))
    model, mean, std = get_arch(get_model_name(load_path_MT_f1), pretrained=pretrained, n_classes=n_classes)
    model, stats = load_model(model, load_path_MT_f1, device='cpu')
    model = model.to(device)
    print("Total params: {0:,}".format(sum(p.numel() for p in model.parameters() if p.requires_grad)))
    print('* Creating Dataloaders, batch size = {:d}'.format(bs))
    test_loader = get_test_loader(csv_path_test=csv_test_q_MT,  batch_size=bs, mean=mean, std=std, qualities=True)
    probs_qMT_f1, preds_qMT_f1, probs_aMT_f1, preds_aMT_f1, probs_cMT_f1, preds_cMT_f1, probs_fMT_f1, preds_fMT_f1\
        = test_cls_tta_dihedral_MT(model, test_loader, n=TTA_N)
    # F2
    print('* Instantiating MT model {}, pretrained={}, fold 2/MT'.format(get_model_name(load_path_MT_f2), pretrained))
    model, mean, std = get_arch(get_model_name(load_path_MT_f2), pretrained=pretrained, n_classes=n_classes)
    model, stats = load_model(model, load_path_MT_f2, device='cpu')
    model = model.to(device)
    print("Total params: {0:,}".format(sum(p.numel() for p in model.parameters() if p.requires_grad)))
    print('* Creating Dataloaders, batch size = {:d}'.format(bs))
    test_loader = get_test_loader(csv_path_test=csv_test_q_MT,  batch_size=bs, mean=mean, std=std, qualities=True)