示例#1
0
def preprocess(inp):
    slice1 = dicom.read_file(inp)
    img = slice1.pixel_array
    seg = img.copy() # Since the model needs seg as input, create a dumb seg here just to match the format

    img -= img.min()
    augmentations = Compose([PaddingCenterCrop(256)])
    img, seg = augmentations(img.astype(np.uint32), seg.astype(np.uint8))
    
    mu = img.mean()
    sigma = img.std()
    img = (img - mu) / (sigma+1e-10)

    if img.ndim == 2:
            img = np.expand_dims(img, axis=0)
            img = np.concatenate((img, img, img), axis=0)

    img = torch.from_numpy(img).float()
    mask = mask_to_edges(seg)
    seg = torch.from_numpy(seg).long()

    data_dict = {
        "image": img,
        "mask": (seg, mask),
    }

    print("finish preprocess")

    return data_dict
示例#2
0
    def __init__(self, 
                 dataset_dir, 
                 training=True,
                 min_level=3,
                 max_level=7,
                 batch_size=32, 
                 input_size=(512, 512), 
                 augmentation=[],
                 **kwargs):
        self.dataset_dir = dataset_dir
        self.training = training
        self.batch_size = batch_size
        self.input_size = input_size

        self.num_images_per_record = 10000

        self.tf_record_sources = None
        
        self.min_level = min_level
        self.max_level = max_level
        if "anchor" in kwargs and kwargs["anchor"]:
            self.anchor_args = kwargs.pop("anchor")
            self.anchor_generator = AnchorGenerator()

        self.assigner_args = kwargs.pop("assigner")
        assigner_name = self.assigner_args["assigner"]
        self._use_fcos_assigner = assigner_name == "fcos_assigner"
        self._use_mask_assigner = "mask" in assigner_name
        self.assigner = build_assigner(**self.assigner_args)

        self.augment = Compose(input_size, augmentation) if augmentation is not None else None
        self.test_process = RetinaCrop(input_size, training=False)
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()
    
    unet = builder.build_unet(num_class=args.num_class,
        arch=args.arch_unet,
        weights=args.weights_unet)

    crit = Loss()

    segmentation_module = SegmentationModule(unet, crit)
    
    test_augs = Compose([PaddingCenterCrop(224)])
    
    dataset_val = AC17(
            root=args.data_root,
            split='val',
            k_split=args.k_split,
            augmentations=test_augs,
            img_norm=args.img_norm)
    ac17_val = load2D(dataset_val, split='val', deform=False)
    loader_val = data.DataLoader(
        ac17_val,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args)

    print('Evaluation Done!')
示例#4
0
def main(args):
    # Network Builders
    builder = ModelBuilder()

    unet = builder.build_unet(num_class=args.num_class,
        arch=args.unet_arch,
        weights=args.weights_unet)

    print("Froze the following layers: ")
    for name, p in unet.named_parameters():
        if p.requires_grad == False:
            print(name)
    print()

    crit = DualLoss(mode="train")

    segmentation_module = SegmentationModule(crit, unet)

    test_augs = Compose([PaddingCenterCrop(256)])
    
    print("ready to load data")

    dataset_val = LungData( 
            root=args.data_root,
            split='test',
            k_split=args.k_split,
            augmentations=test_augs)

    
    loader_val = data.DataLoader(
        dataset_val,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    print(len(loader_val))

    # load nets into gpu
    if len(args.gpus) > 1:
        segmentation_module = UserScatteredDataParallel(
            segmentation_module,
            device_ids=args.gpus)
        # For sync bn
        patch_replication_callback(segmentation_module)
    segmentation_module.cuda()

    # Set up optimizers
    nets = (net_encoder, net_decoder, crit) if args.unet == False else (unet, crit)
    optimizers = create_optimizers(nets, args)

    '''
    # Start the webapp: user update a dcm file, output the predicted segmentation pic of it
    inp = gradio.inputs.DcmUpload(preprocessing_fn=preprocess)
    #inp = gradio.inputs.ImageUpload(preprocessing_fn=preprocess)
    io = gradio.Interface(inputs=inp, outputs="image", model_type="lung_seg", model=segmentation_module, args=args)
    io.launch(validate=False)
    '''

    iou, loss = eval(loader_val, segmentation_module, args, crit)
    print('Evaluation Done!')
示例#5
0
def main(args):
    torch.cuda.set_device(args.gpu)

    # Network Builders
    builder = ModelBuilder()
    
    net_encoder = None
    net_decoder = None
    unet = None

    if args.unet == False:
        net_encoder = builder.build_encoder(
            arch=args.arch_encoder,
            fc_dim=args.fc_dim,
            weights=args.weights_encoder)
        net_decoder = builder.build_decoder(
            arch=args.arch_decoder,
            fc_dim=args.fc_dim,
            num_class=args.num_class,
            weights=args.weights_decoder,
            use_softmax=True)
    else:
        unet = builder.build_unet(num_class=args.num_class,
            arch=args.arch_unet,
            weights=args.weights_unet)

    #crit = nn.NLLLoss()
    crit = ACLoss()

    if args.unet == False:
        segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
    else:
        segmentation_module = SegmentationModule(net_encoder, net_decoder, crit,
                                                is_unet=args.unet, unet=unet)
    '''
    # Dataset and Loader
    dataset_val = dl.loadVal()

    loader_val = torchdata.DataLoader(
        dataset_val,
        batch_size=5,
        shuffle=False,
        num_workers=1,
        drop_last=True)
    '''
    test_augs = Compose([PaddingCenterCrop(256)])
    
    dataset_val = AC17(
            root=args.data_root,
            split='val',
            k_split=args.k_split,
            augmentations=test_augs,
            img_norm=args.img_norm)
    ac17_val = load2D(dataset_val, split='val', deform=False)
    loader_val = data.DataLoader(
        ac17_val,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()

    # Main loop
    evaluate(segmentation_module, loader_val, args)

    print('Evaluation Done!')
示例#6
0
def main(args):
    # Network Builders
    builder = ModelBuilder()

    unet = builder.build_unet(num_class=args.num_class,
        arch=args.unet_arch,
        weights=args.weights_unet)

    print("Froze the following layers: ")
    for name, p in unet.named_parameters():
        if p.requires_grad == False:
            print(name)
    print()

    crit = DualLoss(mode="train")

    segmentation_module = SegmentationModule(crit, unet)

    train_augs = Compose([PaddingCenterCrop(256), RandomHorizontallyFlip(), RandomVerticallyFlip(), RandomRotate(180)])
    test_augs = Compose([PaddingCenterCrop(256)])

    # Dataset and Loader
    dataset_train = AC17( #Loads 3D volumes
            root=args.data_root,
            split='train',
            k_split=args.k_split,
            augmentations=train_augs,
            img_norm=args.img_norm)
    ac17_train = load2D(dataset_train, split='train', deform=True) #Dataloader for 2D slices. Requires 3D loader.

    loader_train = data.DataLoader(
        ac17_train,
        batch_size=args.batch_size_per_gpu,
        shuffle=True,
        num_workers=int(args.workers),
        drop_last=True,
        pin_memory=True)

    dataset_val = AC17(
            root=args.data_root,
            split='val',
            k_split=args.k_split,
            augmentations=test_augs,
            img_norm=args.img_norm)

    ac17_val = load2D(dataset_val, split='val', deform=False)

    loader_val = data.DataLoader(
        ac17_val,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    # load nets into gpu
    if len(args.gpus) > 1:
        segmentation_module = UserScatteredDataParallel(
            segmentation_module,
            device_ids=args.gpus)
        # For sync bn
        patch_replication_callback(segmentation_module)
    segmentation_module.cuda()

    # Set up optimizers
    nets = (net_encoder, net_decoder, crit) if args.unet == False else (unet, crit)
    optimizers = create_optimizers(nets, args)

    # Main loop
    history = {'train': {'epoch': [], 'loss': [], 'acc': [], 'jaccard': []}}
    best_val = {'epoch_1': 0, 'mIoU_1': 0,
                'epoch_2': 0, 'mIoU_2': 0,
                'epoch_3': 0, 'mIoU_3': 0,
                'epoch' : 0, 'mIoU': 0}

    for epoch in range(args.start_epoch, args.num_epoch + 1):
        train(segmentation_module, loader_train, optimizers, history, epoch, args)
        iou, loss = eval(loader_val, segmentation_module, args, crit)
        #checkpointing
        ckpted = False
        if loss < 0.215:
            ckpted = True
        if iou[0] > best_val['mIoU_1']:
            best_val['epoch_1'] = epoch
            best_val['mIoU_1'] = iou[0]
            ckpted = True

        if iou[1] > best_val['mIoU_2']:
            best_val['epoch_2'] = epoch
            best_val['mIoU_2'] = iou[1]
            ckpted = True

        if iou[2] > best_val['mIoU_3']:
            best_val['epoch_3'] = epoch
            best_val['mIoU_3'] = iou[2]
            ckpted = True

        if (iou[0]+iou[1]+iou[2])/3 > best_val['mIoU']:
            best_val['epoch'] = epoch
            best_val['mIoU'] = (iou[0]+iou[1]+iou[2])/3
            ckpted = True

        if epoch % 50 == 0:
            checkpoint(nets, history, args, epoch)
            continue

        if epoch == args.num_epoch:
            checkpoint(nets, history, args, epoch)
            continue
        if epoch < 15:
            ckpted = False
        if ckpted == False:
            continue
        else:
            checkpoint(nets, history, args, epoch)
            continue
        print()

    print('Training Done!')
def main(args):
    # Network Builders
    builder = ModelBuilder()
    net_encoder=None
    net_decoder=None
    unet=None
    
    if args.unet == False:
        net_encoder = builder.build_encoder(
            arch=args.arch_encoder,
            fc_dim=args.fc_dim,
            weights=args.weights_encoder)
        net_decoder = builder.build_decoder(
            arch=args.arch_decoder,
            fc_dim=args.fc_dim,
            num_class=args.num_class,
            weights=args.weights_decoder)
    else:
        unet = builder.build_unet(num_class=args.num_class, 
            arch=args.unet_arch,
            weights=args.weights_unet)

        print("Froze the following layers: ")
        for name, p in unet.named_parameters():
            if p.requires_grad == False:
                print(name)
        print()
    
    crit = ACLoss(mode="train")
    #crit = nn.CrossEntropyLoss().cuda()
    #crit = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(50))
    #crit = nn.CrossEntropyLoss().cuda()
    #crit = nn.BCELoss()

    if args.arch_decoder.endswith('deepsup') and args.unet == False:
        segmentation_module = SegmentationModule(
            net_encoder, net_decoder, crit, args.deep_sup_scale)
    else:
        segmentation_module = SegmentationModule(
            net_encoder, net_decoder,  crit, is_unet=args.unet, unet=unet)

    train_augs = Compose([PaddingCenterCrop(256), RandomHorizontallyFlip(), RandomVerticallyFlip(), RandomRotate(180)])
    test_augs = Compose([PaddingCenterCrop(256)])
    # Dataset and Loader
    dataset_train = AC17(
            root=args.data_root,
            split='train',
            k_split=args.k_split,
            augmentations=train_augs,
            img_norm=args.img_norm)
    ac17_train = load2D(dataset_train, split='train', deform=True)
    
    loader_train = data.DataLoader(
        ac17_train,
        batch_size=args.batch_size_per_gpu,  # we have modified data_parallel
        shuffle=True, 
        num_workers=int(args.workers),
        drop_last=True,
        pin_memory=True)
    dataset_val = AC17(
            root=args.data_root,
            split='val',
            k_split=args.k_split,
            augmentations=test_augs,
            img_norm=args.img_norm)
    ac17_val = load2D(dataset_val, split='val', deform=False)
    loader_val = data.DataLoader(
        ac17_val,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)
    # create loader iterator
    #iterator_train = iter(loader_train)

    # load nets into gpu
    if len(args.gpus) > 1:
        segmentation_module = UserScatteredDataParallel(
            segmentation_module,
            device_ids=args.gpus)
        # For sync bn
        patch_replication_callback(segmentation_module)
    segmentation_module.cuda()
    
    # Set up optimizers
    nets = (net_encoder, net_decoder, crit) if args.unet == False else (unet, crit)
    optimizers = create_optimizers(nets, args)

    # Main loop
    history = {'train': {'epoch': [], 'loss': [], 'acc': [], 'jaccard': []}}
    best_val = {'epoch_1': 0, 'mIoU_1': 0,
                'epoch_2': 0, 'mIoU_2': 0,
                'epoch_3': 0, 'mIoU_3': 0,
                'epoch' : 0, 'mIoU': 0}

    for epoch in range(args.start_epoch, args.num_epoch + 1):
        train(segmentation_module, loader_train, optimizers, history, epoch, args)
        iou, loss = eval(loader_val, segmentation_module, args, crit)
        #checkpointing
        ckpted = False
        if loss < 0.215:
            ckpted = True
        if iou[0] > best_val['mIoU_1']:
            best_val['epoch_1'] = epoch
            best_val['mIoU_1'] = iou[0]
            ckpted = True

        if iou[1] > best_val['mIoU_2']:
            best_val['epoch_2'] = epoch
            best_val['mIoU_2'] = iou[1]
            ckpted = True

        if iou[2] > best_val['mIoU_3']:
            best_val['epoch_3'] = epoch
            best_val['mIoU_3'] = iou[2]
            ckpted = True
        
        if (iou[0]+iou[1]+iou[2])/3 > best_val['mIoU']:
            best_val['epoch'] = epoch
            best_val['mIoU'] = (iou[0]+iou[1]+iou[2])/3
            ckpted = True
        
        if epoch % 50 == 0:
            checkpoint(nets, history, args, epoch)
            continue

        if epoch == args.num_epoch:
            checkpoint(nets, history, args, epoch)
            continue
        if epoch < 15:
            ckpted = False
        if ckpted == False:
            continue
        else:
            checkpoint(nets, history, args, epoch)
            continue
        print()
    
    #print("[Val] Class 1: Epoch " + str(best_val['epoch_1']) + " had the best mIoU of " + str(best_val['mIoU_1']) + ".")
    #print("[Val] Class 2: Epoch " + str(best_val['epoch_2']) + " had the best mIoU of " + str(best_val['mIoU_2']) + ".")
    #print("[Val] Class 3: Epoch " + str(best_val['epoch_3']) + " had the best mIoU of " + str(best_val['mIoU_3']) + ".")
    print('Training Done!')
    def _transform(self, img, mask):
        if img.ndim == 3:
            img = np.expand_dims(img, axis=0)
            img = np.concatenate((img, img, img), axis=0)
        img = torch.from_numpy(img).float()
        mask = torch.from_numpy(mask).long()
        return img, mask


if __name__ == "__main__":
    from pprint import pprint

    train_augs = Compose([
        PaddingCenterCrop(256),
        RandomHorizontallyFlip(),
        RandomVerticallyFlip(),
        RandomRotate(180)
    ])

    # root = "/home/hao/Downloads/COCO2CULane"
    #
    #
    # dataset_train = SideWalkData(
    #     root=root,
    #     split='train',
    #     k_split=1,
    #     augmentations=train_augs
    # )
    #
    # img = dataset_train[0]
    # # print(img)
def run_model(space):
    # Network Builders
    builder = ModelBuilder()
    net_encoder = None
    net_decoder = None
    unet = None

    unet = builder.build_unet(num_class=4, arch='AlbuNet', weights='')

    crit = ACLoss()

    segmentation_module = SegmentationModule(net_encoder,
                                             net_decoder,
                                             crit,
                                             is_unet=True,
                                             unet=unet)

    train_augs = Compose([
        PaddingCenterCrop(224),
        RandomHorizontallyFlip(),
        RandomVerticallyFlip(),
        RandomRotate(180)
    ])
    test_augs = Compose([PaddingCenterCrop(224)])
    # Dataset and Loader
    dataset_train = AC17(root=os.getenv('DATA_ROOT',
                                        '/home/rexma/Desktop/MRI_Images/AC17'),
                         split='train',
                         k_split=1,
                         augmentations=train_augs,
                         img_norm=True)
    ac17_train = load2D(dataset_train, split='train', deform=False)

    loader_train = data.DataLoader(
        ac17_train,
        batch_size=4,  # we have modified data_parallel
        shuffle=True,
        num_workers=5,
        drop_last=True,
        pin_memory=True)

    dataset_val = AC17(root=os.getenv('DATA_ROOT',
                                      '/home/rexma/Desktop/MRI_Images/AC17'),
                       split='val',
                       k_split=1,
                       augmentations=test_augs,
                       img_norm=True)
    ac17_val = load2D(dataset_val, split='val', deform=False)
    loader_val = data.DataLoader(ac17_val,
                                 batch_size=1,
                                 shuffle=False,
                                 collate_fn=user_scattered_collate,
                                 num_workers=5,
                                 drop_last=True)

    segmentation_module.cuda()

    # Set up optimizers
    nets = (unet, crit)
    optimizers = create_optimizers(nets, space)

    val_losses = []
    train_losses = []
    status = STATUS_OK
    print("Searching " + "lr: " + str(space['lr'])
          )  #+ " b1: " + str(space['b1']) + "b2: " + str(space['b2']))
    # Main loop
    for epoch in range(1, 31):
        t_iou = train(segmentation_module, loader_train, optimizers, epoch,
                      space)
        v_iou = eval(loader_val, segmentation_module, crit)
        train_losses.append(t_iou)
        val_losses.append(v_iou)
        if epoch == 3 and v_iou >= 1.0:
            status = STATUS_FAIL
            break

    #values to be returned
    opt_name = 'lr' + str(
        space['lr'])  # + "_b1" + str(space['b1']) + "_b2" + str(space['b2'])
    model_dict = {'loss': min(val_losses), 'status': status, 'name': opt_name}
    return model_dict