Beispiel #1
0
    # Transformation parameters
    transform = tsfrm.Compose([
        tsfrm.RandomHorizontalFlip(p=0.5),
        tsfrm.RandomVerticalFlip(p=0.5),
        tsfrm.RandomAffine(90,
                           translate=(0.15, 0.15),
                           scale=(0.75, 1.5),
                           resample=3,
                           fillcolor=0)
    ])

    # Dataset definitions
    dataset_train = OvaryDataset(im_dir='../datasets/ovarian/im/train/',
                                 gt_dir='../datasets/ovarian/gt/train/',
                                 clahe=False,
                                 transform=transform,
                                 ovary_inst=has_ovary)
    dataset_val = OvaryDataset(im_dir='../datasets/ovarian/im/val/',
                               gt_dir='../datasets/ovarian/gt/val/',
                               clahe=False,
                               transform=False,
                               ovary_inst=has_ovary)

    # Optmization
    #optimizer = optim.Adam(model.parameters(), lr=0.001)
    optimizer = optim.SGD(model.parameters(),
                          lr=0.005,
                          momentum=0.9,
                          weight_decay=0.0005)
Beispiel #2
0
]
gt_list = [
    "../datasets/ovarian/gt/test", "../datasets/ovarian/gt/train",
    "../datasets/ovarian/gt/val"
]

widths = []
heights = []

# Read all datasets
for (path_im, path_gt) in zip(im_list, gt_list):

    # pre-set
    dataset = OvaryDataset(im_dir=path_im,
                           gt_dir=path_gt,
                           clahe=False,
                           transform=False,
                           ovary_inst=ovary)
    # Loader
    data_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=False,
                             collate_fn=dataset.collate_fn_yolo)

    # iterate
    for _, (fname, img, targets) in enumerate(data_loader):
        # Load data

        img_size = img.shape[-1]

        # read each image in batch
Beispiel #3
0
    opt = parser.parse_args()
    print(opt)

    # Classes names
    class_names = ['background', 'follicle', 'ovary']

    # Get data configuration
    n_classes = opt.num_classes
    has_ovary = True if n_classes > 2 else False

    weights_path = opt.weights_path

    # Dataset
    dataset = OvaryDataset(im_dir='../datasets/ovarian/im/test/',
                           gt_dir='../datasets/ovarian/gt/test/',
                           clahe=False,
                           transform=False,
                           ovary_inst=has_ovary)
    data_loader = DataLoader(dataset,
                             batch_size=opt.batch_size,
                             shuffle=False,
                             collate_fn=dataset.collate_fn_rcnn)

    # Get device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Initiate model
    model = FasterRCNN(num_channels=opt.num_channels,
                       num_classes=n_classes,
                       pretrained=True).to(device)
    if weights_path is not None:
Beispiel #4
0
    elif net_type == 'unet_light':
        model = UnetLight(n_channels=in_channels, n_classes=n_classes, bilinear=bilinear)
    elif net_type == 'sp_unet':
        model = SpatialPyramidUnet(n_channels=in_channels, n_classes=n_classes, bilinear=bilinear)
    elif net_type == 'sp_unet2':
        model = SpatialPyramidUnet2(n_channels=in_channels, n_classes=n_classes, bilinear=bilinear)
    elif net_type == 'd_unet':
        model = DilatedUnet2(n_channels=in_channels, n_classes=n_classes, bilinear=bilinear)
    else:
        model = Unet2(n_channels=in_channels, n_classes=n_classes, bilinear=bilinear)

    # Load CUDA if exist
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Dataset definitions
    dataset_test = OvaryDataset(im_dir='../datasets/ovarian/im/test/',
                                gt_dir='../datasets/ovarian/gt/test/')

    # Test network model
    print('Testing')
    print('')
    weights_path = folder_weights + train_name + '_weights.pth.tar'
    # Output folder
    out_folder = folder_preds + train_name + '/'
    if not os.path.exists(out_folder):
        os.makedirs(out_folder)
    # Load inference
    inference = Inference(model, device, weights_path,
                    batch_size=batch_size, folder=out_folder)
    # Run inference
    inference.predict(dataset_test)
Beispiel #5
0
]
data_table = [table_header]

# Bounding-box colors
cmap = plt.get_cmap("tab20b")
colormap = [cmap(i) for i in np.linspace(0, 1, 20)]
# colors = random.sample(colormap, len(dataset_names))
colors = [colormap[1], colormap[6], colormap[14]]

# Read datasets
for dname, fname in zip(dataset_names, dataset_folder):
    # Set paths
    path_im = dataset_path + '/im/' + fname + '/'
    path_gt = path_im.replace('/im/', '/gt/')
    # pre-set dataset
    dataset = OvaryDataset(im_dir=path_im, gt_dir=path_gt, ovary_inst=True)
    # define pytorch data loader
    data_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=False,
                             collate_fn=dataset.collate_fn_rcnn)
    # Iterate dataset
    for batch_idx, (names, imgs, targets) in enumerate(data_loader):
        # Iterate inside batch
        for i in range(len(names)):
            filename = names[i]
            full_path = os.path.join(path_im, filename)
            # Create plot
            img = np.array(
                Image.open(full_path).convert('RGB')
            )  # Convert to RGB to save the image with original grayscale colormap
Beispiel #6
0
        tsfrm.RandomVerticalFlip(p=0.5),
        tsfrm.RandomAffine(90,
                           translate=(0.15, 0.15),
                           scale=(0.75, 1.5),
                           resample=3,
                           fillcolor=0)
    ])

    # Dataset definitions
    if dataset_name == 'ovarian':
        im_dir = '../datasets/ovarian/im/'
        gt_dir = '../datasets/ovarian/gt/'
        dataset_train = OvaryDataset(im_dir=im_dir + 'train/',
                                     gt_dir=gt_dir + 'train/',
                                     imap=interaction,
                                     clahe=clahe,
                                     ovary_inst=train_with_targets,
                                     out_tuple=train_with_targets,
                                     transform=transform)
        dataset_val = OvaryDataset(im_dir=im_dir + 'val/',
                                   gt_dir=gt_dir + 'val/',
                                   imap=interaction,
                                   clahe=clahe)
        dataset_test = OvaryDataset(im_dir=im_dir + 'test/',
                                    gt_dir=gt_dir + 'test/',
                                    imap=interaction,
                                    clahe=clahe)
    else:
        im_dir = '../datasets/voc2012/JPEGImages/'
        gt_dir = '../datasets/voc2012/SegmentationClass/'
        list_dir = '../datasets/voc2012/'