예제 #1
0
    def __init__(
        self,
        images_dir: str,
        mats_dir: str,
        landmarks_dir: str = None,
        transform: bool = False,
    ):
        self.images, self.volumes, self.landmarks = [], [], []
        self.transform = transform

        if transform:
            self.tf_flip = data_transform.Flip()
            self.tf_rotate = data_transform.Rotation()
            self.tf_translate = data_transform.Translation()
            self.tf_scale = data_transform.Scale()

        for i in os.listdir(images_dir):
            name = i.split(".")[0]

            self.images += [os.path.join(images_dir, name + ".jpg")]
            self.volumes += [os.path.join(mats_dir, name + ".mat")]

            if landmarks_dir:
                self.landmarks += [os.path.join(landmarks_dir, name + ".mat")]

        assert len(self.images) == len(self.volumes)
    def __init__(self,
                 root_folder,
                 folder=None,
                 num_classes=100,
                 split="train",
                 img_transforms=None):
        assert split in ["train", "val", "test"]
        # root folder, split
        self.root_folder = root_folder
        self.split = split
        self.img_transforms = img_transforms
        self.n_classes = num_classes

        #define mask tranforms
        mask_transforms = []
        mask_transforms.append(transforms.Scale((256, 256)))
        mask_transforms.append(transforms.ToTensor())
        self.mask_transforms = transforms.Compose(mask_transforms)

        # load all labels
        if folder is None:
            folder = os.path.join(root_folder,
                                  "ISIC2018_Task1_Training_GroundTruth")
        if not os.path.exists(folder):
            raise ValueError(
                'Label folder {:s} does not exist!'.format(folder))

        if split == "train":
            start, end = 1, 501  #1,1200; 1200,2400 #count = 1596
        elif split == "val":
            start, end = 2495, 2595  #count = 500
        elif split == "test":
            start, end = 121, 151  #2095, 2595 #count = 500

        masks_filename = []
        for itr in range(start, end):
            filename = "ISIC_Mask_" + str(itr) + ".png"
            mask = os.path.join(folder, filename)
            if mask is not None:
                masks_filename.append(mask)

        # load input images
        folder = os.path.join(root_folder, "ISIC2018_Task1-2_Training_Input")
        if not os.path.exists(folder):
            raise ValueError(
                'Input folder {:s} does not exist!'.format(folder))

        images_filename = []
        for itr in range(start, end):
            filename = "ISIC_Input_" + str(itr) + ".jpg"
            img = os.path.join(folder, filename)
            if img is not None:
                images_filename.append(img)

        self.images_filename = images_filename
        self.masks_filename = masks_filename
예제 #3
0
    def __init__(self,
                 images_dir: str,
                 mats_dir: str,
                 landmarks_dir: str = None,
                 transform: bool = False):
        self.images, self.volumes, self.landmarks = [], [], []
        self.transform = transform

        if transform:
            self.tf_flip = data_transform.Flip()
            self.tf_rotate = data_transform.Rotation()
            self.tf_translate = data_transform.Translation()
            self.tf_scale = data_transform.Scale()

        for i in os.listdir(images_dir):

            ext = os.path.splitext(i)[1]

            if ext == '.jpg' or ext == '.jpeg':
                self.images += [os.path.join(images_dir, i)]

        for j in os.listdir(mats_dir):

            ext = os.path.splitext(j)[1]

            if ext == '.mat':
                self.volumes += [os.path.join(mats_dir, j)]

        if landmarks_dir:
            for j in os.listdir(landmarks_dir):

                ext = os.path.splitext(j)[1]

                if ext == '.mat':
                    self.landmarks += [os.path.join(landmarks_dir, i)]

        assert len(self.images) == len(self.volumes)
예제 #4
0
def main(args):
    # parse args
    if args.mode == "preTrain":
        # For MSE loss we store loss of validation set
        best_acc1 = 100000
    else:
        # For dice loss we store the dice coefficient accuracy of validation set
        best_acc1 = 0.0

    if args.gpu >= 0:
        print("Use GPU: {}".format(args.gpu))
    else:
        print('You are using CPU for computing!',
              'Yet we assume you are using a GPU.',
              'You will NOT be able to switch between CPU and GPU training!')

    criterion1 = jaccard_loss()
    # Train in self supervised mode
    if args.mode == "preTrain":
        model = preTrain_model()
        criterion = nn.MSELoss()
    else:  # Train in fully supervised mode
        #initial_param = {}
        #load_checkpoint(initial_param)
        #return
        model = uNet_model()  #imgSeg_model(initial_param=initial_param)
        criterion = nn.BCELoss()
        #criterion = nn.BCELoss()
    model_arch = "UNet"

    # put everthing to gpu
    if args.gpu >= 0:
        model = model.cuda(args.gpu)
        criterion = criterion.cuda(args.gpu)
        criterion1 = criterion1.cuda(args.gpu)

    # setup the optimizer
    if args.mode == "preTrain":
        optimizer = torch.optim.Adam(model.parameters(), args.lr)
    #momentum=args.momentum,
    #weight_decay=args.weight_decay)
    else:
        optimizer = torch.optim.Adam(model.parameters(),
                                     args.lr,
                                     weight_decay=args.weight_decay)

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            model.load_state_dict(checkpoint['state_dict'])
            if args.gpu < 0:
                model = model.cpu()
            else:
                model = model.cuda(args.gpu)

            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {}, acc1 {})".format(
                args.resume, checkpoint['epoch'], best_acc1))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # set up transforms for data augmentation
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    # train transforms
    print('Loading training, validation and test dataset......')
    train_transforms = []
    train_transforms.append(transforms.Scale((256, 256)))
    train_transforms.append(transforms.ToTensor())
    train_transforms.append(normalize)
    train_transforms = transforms.Compose(train_transforms)
    # val transforms
    val_transforms = []
    val_transforms.append(transforms.Scale((256, 256)))
    val_transforms.append(transforms.ToTensor())
    val_transforms.append(normalize)
    val_transforms = transforms.Compose(val_transforms)
    # test transforms
    #test_transforms=[]
    #test_transforms.append(transforms.Scale((512, 512)))
    #test_transforms.append(transforms.ToTensor())
    #test_transforms.append(normalize)
    #test_transforms = transforms.Compose(test_transforms)

    train_dataset = MelanomaDataLoader(args.data_folder,
                                       split="train",
                                       img_transforms=train_transforms)
    val_dataset = MelanomaDataLoader(args.data_folder,
                                     split="val",
                                     img_transforms=val_transforms)
    #test_dataset = MelanomaDataLoader(args.data_folder,
    #	                                         split="test", transforms=test_transforms)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=None,
                                               drop_last=False)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             num_workers=args.workers,
                                             pin_memory=True,
                                             sampler=None,
                                             drop_last=False)
    #test_loader = torch.utils.data.DataLoader(
    #  test_dataset, batch_size=1, shuffle=False,
    #  num_workers=args.workers, pin_memory=True, sampler=None, drop_last=False)

    # enable cudnn benchmark
    cudnn.enabled = True
    cudnn.benchmark = True
    print(optimizer)
    print(criterion)
    if args.mode == "supTrain":
        print(criterion1)

    # start the training
    print("Training the model ...")
    for epoch in range(args.start_epoch, args.epochs):
        # train for one epoch
        train(train_loader, model, criterion, criterion1, optimizer, epoch,
              "train", args)

        # evaluate on validation set
        #acc1 = validate(val_loader, model, epoch, args)
        if args.mode == "preTrain":
            loss = validate(val_loader, model, criterion, epoch, args)
            # remember best loss and save checkpoint
            is_best = loss < best_acc1
            best_acc1 = min(loss, best_acc1)
        else:
            acc1 = validate(val_loader, model, criterion, epoch, args)
            # remember best acc@1 and save checkpoint
            is_best = acc1 > best_acc1
            best_acc1 = max(acc1, best_acc1)

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'model_arch': model_arch,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'optimizer': optimizer.state_dict(),
            }, is_best)