Exemple #1
0
            print('Model loaded from {}'.format(args.resume))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    else:
        start_epoch = 0
        start_step = 0

    train_image_paths, train_mask_paths = get_images(image_dir, args.preprocess, phase='train', healthy_included=args.healthyincluded)
    train_image_paths2, train_mask_paths2, train_predicted_mask_paths2 = get_images_diaretAL(image_dir2, predicted_dir, args.preprocess, phase='train')
    eval_image_paths, eval_mask_paths = get_images(image_dir, args.preprocess, phase='eval', healthy_included=args.healthyincluded)

    if net_name == 'unet':
        # not using unet, deprecated yet.
        train_dataset = IDRIDDataset(train_image_paths, train_mask_paths, 4, transform=
                                Compose([
                                RandomRotation(rotation_angle),
                                RandomCrop(image_size),
                    ]))
        eval_dataset = IDRIDDataset(eval_image_paths, eval_mask_paths, 4, transform=
                                Compose([
                                RandomCrop(image_size),
                    ]))
    elif net_name == 'hednet':
        train_dataset1 = IDRIDDataset(train_image_paths, train_mask_paths, 4, transform=
                                Compose([
                                RandomRotation(rotation_angle),
                                RandomCrop(image_size),
                                Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
                    ]))
        train_dataset2 = DiaretALDataset(train_image_paths2, train_mask_paths2, train_predicted_mask_paths2, 4, transform=
                                Compose([
Exemple #2
0
        checkpoint = torch.load(args.model)
        model.load_state_dict(checkpoint['state_dict'])
        print('Model loaded from {}'.format(args.model))
    else:
        print("=> no checkpoint found at '{}'".format(args.model))
        sys.exit(0)

    eval_image_paths, eval_mask_paths = get_images(
        image_dir,
        args.preprocess,
        phase='eval',
        healthy_included=args.healthyincluded)

    if net_name == 'unet':
        eval_dataset = IDRIDDataset(eval_image_paths,
                                    eval_mask_paths,
                                    4,
                                    transform=Compose([]))
    elif net_name == 'hednet':
        eval_dataset = IDRIDDataset(eval_image_paths,
                                    eval_mask_paths,
                                    4,
                                    transform=Compose([
                                        Normalize(mean=[0.485, 0.456, 0.406],
                                                  std=[0.229, 0.224, 0.225]),
                                    ]))
    eval_loader = DataLoader(eval_dataset, args.batchsize, shuffle=False)

    dice_coeffs_soft, dice_coeffs_hard, vis_images = eval_model(
        model, eval_loader)
    print(dice_coeffs_soft, dice_coeffs_hard)
    #logger.image_summary('eval_images', vis_images, step=0)
Exemple #3
0
            checkpoint = torch.load(resume)
            start_epoch = checkpoint['epoch']+1
            start_step = checkpoint['step']
            model.load_state_dict(checkpoint['state_dict'])
            g_optimizer.load_state_dict(checkpoint['optimizer'])
            print('Model loaded from {}'.format(resume))
        else:
            print("=> no checkpoint found at '{}'".format(resume))
    else:
        start_epoch = 0
        start_step = 0

    train_image_paths, train_mask_paths = get_images(image_dir, args.preprocess, phase='train')
    eval_image_paths, eval_mask_paths = get_images(image_dir, args.preprocess, phase='eval')

    train_dataset = IDRIDDataset(train_image_paths, train_mask_paths, config.LESION_IDS[args.lesion], transform=
                            Compose([
                            RandomRotation(rotation_angle),
                            RandomCrop(image_size),
                ]))
    eval_dataset = IDRIDDataset(eval_image_paths, eval_mask_paths, config.LESION_IDS[args.lesion])

    train_loader = DataLoader(train_dataset, batchsize, shuffle=True)
    eval_loader = DataLoader(eval_dataset, batchsize, shuffle=False)

    g_scheduler = lr_scheduler.StepLR(g_optimizer, step_size=200, gamma=0.9)
    criterion = nn.CrossEntropyLoss(weight=torch.FloatTensor(config.CROSSENTROPY_WEIGHTS).to(device))
    
    train_model(model, args.lesion, args.preprocess, train_loader, eval_loader, criterion, g_optimizer, g_scheduler, \
        batchsize, num_epochs=config.EPOCHES, start_epoch=start_epoch, start_step=start_step)
Exemple #4
0
        start_epoch = 0
        start_step = 0

    train_image_paths, train_mask_paths = get_images(image_dir,
                                                     config.PREPROCESS,
                                                     phase='train')
    eval_image_paths, eval_mask_paths = get_images(image_dir,
                                                   config.PREPROCESS,
                                                   phase='eval')

    if net_name == 'unet':
        train_dataset = IDRIDDataset(
            train_image_paths,
            train_mask_paths,
            config.CLASS_ID,
            transform=Compose([
                RandomRotation(rotation_angle),
                #ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2),
                RandomCrop(image_size),
            ]))
        eval_dataset = IDRIDDataset(eval_image_paths,
                                    eval_mask_paths,
                                    config.CLASS_ID,
                                    transform=Compose([
                                        RandomCrop(image_size),
                                    ]))
    elif net_name == 'hednet':
        train_dataset = IDRIDDataset(
            train_image_paths,
            train_mask_paths,
            config.CLASS_ID,
    model = HNNNet(pretrained=True, class_number=2)

    resume = args.model

    if os.path.isfile(resume):
        print("=> loading checkpoint '{}'".format(resume))
        checkpoint = torch.load(resume)
        start_epoch = checkpoint['epoch'] + 1
        start_step = checkpoint['step']
        try:
            model.load_state_dict(checkpoint['state_dict'])
        except:
            model.load_state_dict(checkpoint['g_state_dict'])
        print('Model loaded from {}'.format(resume))
    else:
        print("=> no checkpoint found at '{}'".format(resume))

    model.to(device)

    test_image_paths, test_mask_paths = get_images(image_dir,
                                                   config.PREPROCESS,
                                                   phase='test')

    test_dataset = IDRIDDataset(test_image_paths, test_mask_paths, config.LESION_IDS[args.lesion], \
       transform=Compose([Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),]))

    test_loader = DataLoader(test_dataset, 1, shuffle=False)
    auc_result = eval_model(model, test_loader)
    print(auc_result)
Exemple #6
0
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    model = UNet(n_channels=3, n_classes=2)

    resume = args.model

    if os.path.isfile(resume):
        print("=> loading checkpoint '{}'".format(resume))
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['state_dict'])
        print('Model loaded from {}'.format(resume))
    else:
        print("=> no checkpoint found at '{}'".format(resume))

    model.to(device)

    test_image_paths, test_mask_paths = get_images(image_dir,
                                                   args.preprocess,
                                                   phase='test')
    test_dataset = IDRIDDataset(test_image_paths, test_mask_paths,
                                config.LESION_IDS[args.lesion])

    test_loader = DataLoader(test_dataset, 1, shuffle=False)
    auc_result = eval_model(model, test_loader)
    print(auc_result)