Beispiel #1
0
os.environ['CUDA_VISIBLE_DEVICES'] = gpus

DEVICE = torch.device("cuda" if True else "cpu")
data_path = "../data/data_npy/"
test_flag = 0
train_files, test_files = get_cross_validation_paths(test_flag)
model_flag = 'SM'
net_name = 'ResUNet101'
loss_name = 'CombinedLoss'
if_fpf = True
saved_checkpoint = '36.ckpt'
dynamic_threshold = [0.2398, 0.2151, 0.1941, 0.1636]  # None
precise_net_path = os.path.join("SavePath", model_flag, net_name, str(test_flag), saved_checkpoint)
if_dependent = 1
if if_dependent == 1:
    alpha = get_global_alpha(train_files, data_path)
    alpha = torch.from_numpy(alpha).float().to(DEVICE)
    alpha.requires_grad = False
else:
    alpha = None

################################################################

from torch.nn import DataParallel

model = import_module('models.model_loader')
precise_net, loss = model.get_full_model(
    net_name, loss_name, n_classes=5, alpha=alpha)
c_loss = DependentLoss(alpha)
checkpoint = torch.load(precise_net_path)
precise_net.load_state_dict(checkpoint['state_dict'])
Beispiel #2
0
def main(args):
    max_precision = 0.
    torch.manual_seed(123)
    cudnn.benchmark = True
    setgpu(args.gpu)
    data_path = args.data_path
    train_files, test_files = get_cross_validation_paths(args.test_flag)

    composed_transforms_tr = transforms.Compose([
        tr.Normalize(mean=(0.12, 0.12, 0.12), std=(0.018, 0.018, 0.018)),
        tr.ToTensor2(args.n_class)
    ])
    eval_dataset = THOR_Data(transform=composed_transforms_tr,
                             path=args.data_path,
                             file_list=test_files,
                             otsu=args.otsu)

    if args.if_dependent == 1:
        alpha = get_global_alpha(train_files, data_path)
        alpha = torch.from_numpy(alpha).float().to(DEVICE)
        alpha.requires_grad = False
    else:
        alpha = None
    model = import_module('models.model_loader')
    net, loss = model.get_full_model(args.model_name,
                                     args.loss_name,
                                     n_classes=args.n_class,
                                     alpha=alpha,
                                     if_closs=args.if_closs,
                                     class_weights=torch.FloatTensor(
                                         [1.0, 4.0, 2.0, 5.0, 3.0]).to(DEVICE))
    start_epoch = args.start_epoch
    save_dir = args.save_dir
    logging.info(args)
    if args.resume:
        checkpoint = torch.load(args.resume)
        start_epoch = checkpoint['epoch'] + 1
        net.load_state_dict(checkpoint['state_dict'])

    net = net.to(DEVICE)
    loss = loss.to(DEVICE)
    if len(args.gpu.split(',')) > 1 or args.gpu == 'all':
        net = DataParallel(net)

    optimizer = torch.optim.SGD(net.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    init_lr = np.copy(args.lr)

    def get_lr(epoch):
        if args.lr < 0.0001:
            return args.lr
        if epoch > 0:
            args.lr = args.lr * 0.95
            logging.info('current learning rate is %f' % args.lr)
        return args.lr

    composed_transforms_tr = transforms.Compose([
        tr.RandomZoom((512, 512)),
        tr.RandomHorizontalFlip(),
        tr.Normalize(mean=(0.12, 0.12, 0.12), std=(0.018, 0.018, 0.018)),
        tr.ToTensor2(args.n_class)
    ])
    train_dataset = THOR_Data(transform=composed_transforms_tr,
                              path=data_path,
                              file_list=train_files,
                              otsu=args.otsu)
    trainloader = DataLoader(train_dataset,
                             batch_size=args.batch_size,
                             shuffle=True,
                             num_workers=4)
    break_flag = 0.
    high_dice = 0.
    selected_thresholds = np.zeros((args.n_class - 1, ))
    run_id = str(uuid.uuid4())
    cur_train_stats_path = train_stats_path.format(run_id)
    cur_eval_stats_path = eval_stats_path.format(run_id)
    with open(cur_train_stats_path, 'w') as f:
        writer = csv.writer(f)
        writer.writerow(stats_fields)

    with open(cur_eval_stats_path, 'w') as f:
        writer = csv.writer(f)
        writer.writerow(stats_fields)

    for epoch in range(start_epoch, args.epochs + 1):
        train_loss, adaptive_thresholds = train(trainloader, net, loss, epoch,
                                                optimizer, get_lr, save_dir,
                                                cur_train_stats_path)
        if epoch < args.untest_epoch:
            continue
        break_flag += 1
        eval_dice, eval_precision = evaluation(args, net, loss, epoch,
                                               eval_dataset,
                                               selected_thresholds,
                                               cur_eval_stats_path)
        if max_precision <= eval_precision:
            selected_thresholds = adaptive_thresholds
            max_precision = eval_precision
            logging.info(
                '************************ dynamic threshold saved successful ************************** !'
            )
        if eval_dice >= high_dice:
            high_dice = eval_dice
            break_flag = 0
            if len(args.gpu.split(',')) > 1 or args.gpu == 'all':
                state_dict = net.module.state_dict()
            else:
                state_dict = net.state_dict()
            torch.save(
                {
                    'epoch': epoch,
                    'save_dir': save_dir,
                    'state_dict': state_dict,
                    'optimizer': optimizer.state_dict(),
                    'args': args
                }, os.path.join(save_dir, '%d.ckpt' % epoch))
            logging.info(
                '************************ model saved successful ************************** !'
            )
        if break_flag > args.patient:
            break