Exemplo n.º 1
0
                        datefmt='%H:%M:%S')
    logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
    logging.info(str(args))

    net = VNetRec(n_channels=1,
                  n_classes=num_classes,
                  normalization='batchnorm',
                  has_dropout=True)
    net = net.cuda()

    db_train = LAHeart(base_dir=train_data_path,
                       split='train',
                       num=16,
                       transform=transforms.Compose([
                           RandomRotFlip(),
                           RandomCrop(patch_size),
                           ToTensor(),
                       ]))

    def worker_init_fn(worker_id):
        random.seed(args.seed + worker_id)

    trainloader = DataLoader(db_train,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4,
                             pin_memory=True,
                             worker_init_fn=worker_init_fn)

    net.train()
    optimizer = optim.SGD(net.parameters(),
Exemplo n.º 2
0
def main():
    ###################
    # init parameters #
    ###################
    args = get_args()
    # training path
    train_data_path = args.root_path
    # writer
    idx = args.save.rfind('/')
    log_dir = args.writer_dir + args.save[idx:]
    writer = SummaryWriter(log_dir)

    batch_size = args.batch_size * args.ngpu 
    max_iterations = args.max_iterations
    base_lr = args.base_lr

    patch_size = (112, 112, 80)
    num_classes = 2

    # random
    if args.deterministic:
        cudnn.benchmark = False
        cudnn.deterministic = True
        random.seed(args.seed)
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)

    ## make logger file
    if os.path.exists(args.save):
        shutil.rmtree(args.save)
    os.makedirs(args.save, exist_ok=True)
    snapshot_path = args.save
    logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
                        format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
    logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
    logging.info(str(args))

    net = VNet(n_channels=1, n_classes=num_classes, normalization='batchnorm', has_dropout=True)
    net = net.cuda()

    db_train = LAHeart(base_dir=train_data_path,
                       split='train',
                       num=16,
                       transform = transforms.Compose([
                          RandomRotFlip(),
                          RandomCrop(patch_size),
                          ToTensor(),
                          ]))
    db_test = LAHeart(base_dir=train_data_path,
                       split='test',
                       transform = transforms.Compose([
                           CenterCrop(patch_size),
                           ToTensor()
                       ]))
    def worker_init_fn(worker_id):
        random.seed(args.seed+worker_id)
    trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True,  num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)

    net.train()
    optimizer = optim.SGD(net.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)

    logging.info("{} itertations per epoch".format(len(trainloader)))

    iter_num = 0
    max_epoch = max_iterations//len(trainloader)+1
    lr_ = base_lr
    net.train()
    for epoch_num in tqdm(range(max_epoch), ncols=70):
        time1 = time.time()
        for i_batch, sampled_batch in enumerate(trainloader):
            time2 = time.time()
            # print('fetch data cost {}'.format(time2-time1))
            volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
            volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
            outputs = net(volume_batch)

            loss_seg = F.cross_entropy(outputs, label_batch)
            outputs_soft = F.softmax(outputs, dim=1)
            loss_seg_dice = dice_loss(outputs_soft[:, 1, :, :, :], label_batch == 1)
            loss = 0.5*(loss_seg+loss_seg_dice)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            iter_num = iter_num + 1
            writer.add_scalar('lr', lr_, iter_num)
            writer.add_scalar('loss/loss_seg', loss_seg, iter_num)
            writer.add_scalar('loss/loss_seg_dice', loss_seg_dice, iter_num)
            writer.add_scalar('loss/loss', loss, iter_num)
            logging.info('iteration %d : loss : %f' % (iter_num, loss.item()))
            if iter_num % 50 == 0:
                image = volume_batch[0, 0:1, :, :, 20:61:10].permute(3,0,1,2).repeat(1,3,1,1)
                grid_image = make_grid(image, 5, normalize=True)
                writer.add_image('train/Image', grid_image, iter_num)

                outputs_soft = F.softmax(outputs, 1)
                image = outputs_soft[0, 1:2, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
                grid_image = make_grid(image, 5, normalize=False)
                writer.add_image('train/Predicted_label', grid_image, iter_num)

                image = label_batch[0, :, :, 20:61:10].unsqueeze(0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
                grid_image = make_grid(image, 5, normalize=False)
                writer.add_image('train/Groundtruth_label', grid_image, iter_num)

            ## change lr
            if iter_num % 2500 == 0:
                lr_ = base_lr * 0.1 ** (iter_num // 2500)
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr_
            if iter_num % 1000 == 0:
                save_mode_path = os.path.join(snapshot_path, 'iter_' + str(iter_num) + '.pth')
                torch.save(net.state_dict(), save_mode_path)
                logging.info("save model to {}".format(save_mode_path))

            if iter_num > max_iterations:
                break
            time1 = time.time()
        if iter_num > max_iterations:
            break
    save_mode_path = os.path.join(snapshot_path, 'iter_'+str(max_iterations+1)+'.pth')
    torch.save(net.state_dict(), save_mode_path)
    logging.info("save model to {}".format(save_mode_path))
    writer.close()
Exemplo n.º 3
0
def debugger():
    patch_size = (112, 112, 80)
    training_data = data_loader(split='train')
    testing_data = data_loader(split='test')

    x_criterion = soft_cross_entropy  #supervised loss is 0.5*(x_criterion + dice_loss)
    u_criterion = nn.MSELoss()  #unsupervised loss

    labelled_index = np.random.permutation(LABELLED_INDEX)
    unlabelled_index = np.random.permutation(
        UNLABELLED_INDEX)[:len(labelled_index)]
    labelled_data = [training_data[i] for i in labelled_index]
    unlabelled_data = [training_data[i] for i in unlabelled_index]  #size = 16

    ##data transformation: rotation, flip, random_crop
    labelled_data = [
        shape_transform(RandomRotFlip()(RandomCrop(patch_size)(sample)))
        for sample in labelled_data
    ]
    unlabelled_data = [
        shape_transform(RandomRotFlip()(RandomCrop(patch_size)(sample)))
        for sample in unlabelled_data
    ]

    net = VNet(n_channels=1,
               n_classes=2,
               normalization='batchnorm',
               has_dropout=True).cuda()

    model_path = "../saved/0_supervised.pth"
    net.load_state_dict(torch.load(model_path))

    optimizer = optim.SGD(net.parameters(),
                          lr=0.01,
                          momentum=0.9,
                          weight_decay=0.0001)
    training_loss = train_epoch(net=net,
                                labelled_data=labelled_data,
                                unlabelled_data=unlabelled_data,
                                batch_size=2,
                                supervised_only=True,
                                optimizer=optimizer,
                                x_criterion=x_criterion,
                                u_criterion=u_criterion,
                                K=1,
                                T=1,
                                alpha=1,
                                mixup_mode="__",
                                Lambda=0,
                                aug_factor=0)

    net = VNet(n_channels=1,
               n_classes=2,
               normalization='batchnorm',
               has_dropout=True).cuda()
    model_path = "../saved/8_expected_supervised.pth"
    net.load_state_dict(torch.load(model_path))

    optimizer = optim.SGD(net.parameters(),
                          lr=0.01,
                          momentum=0.9,
                          weight_decay=0.0001)
    training_loss = train_epoch(net=net,
                                labelled_data=labelled_data,
                                unlabelled_data=unlabelled_data,
                                batch_size=2,
                                supervised_only=False,
                                optimizer=optimizer,
                                x_criterion=x_criterion,
                                u_criterion=u_criterion,
                                K=1,
                                T=1,
                                alpha=1,
                                mixup_mode="__",
                                Lambda=0,
                                aug_factor=0)
def main():
    ###################
    # init parameters #
    ###################
    args = get_args()
    # training path
    train_data_path = args.root_path
    # writer
    idx = args.save.rfind('/')
    log_dir = args.writer_dir + args.save[idx:]
    writer = SummaryWriter(log_dir)

    batch_size = args.batch_size * args.ngpu
    max_iterations = args.max_iterations
    base_lr = args.base_lr

    patch_size = (112, 112, 80)
    num_classes = 2

    # random
    if args.deterministic:
        cudnn.benchmark = False
        cudnn.deterministic = True
        random.seed(args.seed)
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)

    ## make logger file
    if os.path.exists(args.save):
        shutil.rmtree(args.save)
    os.makedirs(args.save, exist_ok=True)
    snapshot_path = args.save
    logging.basicConfig(filename=snapshot_path + "/log.txt",
                        level=logging.INFO,
                        format='[%(asctime)s.%(msecs)03d] %(message)s',
                        datefmt='%H:%M:%S')
    logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
    logging.info(str(args))

    #training set
    db_train = LAHeart(base_dir=train_data_path,
                       split='train',
                       num=16,
                       transform=transforms.Compose([
                           RandomRotFlip(),
                           RandomCrop(patch_size),
                           ToTensor()
                       ]))

    def worker_init_fn(worker_id):
        random.seed(args.seed + worker_id)

    trainloader = DataLoader(db_train,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4,
                             pin_memory=True,
                             worker_init_fn=worker_init_fn)

    net = VNet(n_channels=1,
               n_classes=num_classes,
               normalization='batchnorm',
               has_dropout=True)
    net = net.cuda()
    net.train()
    optimizer = optim.SGD(net.parameters(),
                          lr=base_lr,
                          momentum=0.9,
                          weight_decay=0.0001)

    logging.info("{} itertations per epoch".format(len(trainloader)))

    iter_num = 0
    alpha = 1.0
    max_epoch = max_iterations // len(trainloader) + 1
    lr_ = base_lr
    net.train()
    for epoch_num in tqdm(range(max_epoch), ncols=70):
        time1 = time.time()
        for i_batch, sampled_batch in enumerate(trainloader):
            time2 = time.time()
            # print('fetch data cost {}'.format(time2-time1))
            # volume_batch.shape=(b,1,x,y,z) label_patch.shape=(b,x,y,z)
            volume_batch, label_batch = sampled_batch['image'], sampled_batch[
                'label']
            volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
            outputs = net(volume_batch)

            loss_seg = F.cross_entropy(outputs, label_batch)
            outputs_soft = F.softmax(outputs, dim=1)
            loss_seg_dice = dice_loss(outputs_soft[:, 1, :, :, :],
                                      label_batch == 1)
            # compute gt_signed distance function and boundary loss
            with torch.no_grad():
                # defalut using compute_sdf; however, compute_sdf1_1 is also worth to try;
                gt_sdf_npy = compute_sdf(label_batch.cpu().numpy(),
                                         outputs_soft.shape)
                gt_sdf = torch.from_numpy(gt_sdf_npy).float().cuda(
                    outputs_soft.device.index)
                # show signed distance map for debug
                # import matplotlib.pyplot as plt
                # plt.figure()
                # plt.subplot(121), plt.imshow(gt_sdf_npy[0,1,:,:,40]), plt.colorbar()
                # plt.subplot(122), plt.imshow(np.uint8(label_batch.cpu().numpy()[0,:,:,40]>0)), plt.colorbar()
                # plt.show()
            loss_boundary = boundary_loss(outputs_soft, gt_sdf)
            loss = alpha * (loss_seg + loss_seg_dice) + (1 -
                                                         alpha) * loss_boundary

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            iter_num = iter_num + 1
            writer.add_scalar('lr', lr_, iter_num)
            writer.add_scalar('loss/loss_seg', loss_seg, iter_num)
            writer.add_scalar('loss/loss_seg_dice', loss_seg_dice, iter_num)
            writer.add_scalar('loss/loss_boundary', loss_boundary, iter_num)
            writer.add_scalar('loss/loss', loss, iter_num)
            writer.add_scalar('loss/alpha', alpha, iter_num)
            logging.info('iteration %d : alpha : %f' % (iter_num, alpha))
            logging.info('iteration %d : loss_seg_dice : %f' %
                         (iter_num, loss_seg_dice.item()))
            logging.info('iteration %d : loss_boundary : %f' %
                         (iter_num, loss_boundary.item()))
            logging.info('iteration %d : loss : %f' % (iter_num, loss.item()))
            if iter_num % 2 == 0:
                image = volume_batch[0, 0:1, :, :,
                                     20:61:10].permute(3, 0, 1,
                                                       2).repeat(1, 3, 1, 1)
                grid_image = make_grid(image, 5, normalize=True)
                writer.add_image('train/Image', grid_image, iter_num)

                image = outputs_soft[0, 1:2, :, :,
                                     20:61:10].permute(3, 0, 1,
                                                       2).repeat(1, 3, 1, 1)
                grid_image = make_grid(image, 5, normalize=False)
                writer.add_image('train/Predicted_label', grid_image, iter_num)

                image = label_batch[0, :, :, 20:61:10].unsqueeze(0).permute(
                    3, 0, 1, 2).repeat(1, 3, 1, 1)
                grid_image = make_grid(image, 5, normalize=False)
                writer.add_image('train/Groundtruth_label', grid_image,
                                 iter_num)

                image = gt_sdf[0, 1:2, :, :,
                               20:61:10].permute(3, 0, 1,
                                                 2).repeat(1, 3, 1, 1)
                grid_image = make_grid(image, 5, normalize=True)
                writer.add_image('train/gt_sdf', grid_image, iter_num)

            ## change lr
            if iter_num % 2500 == 0:
                lr_ = base_lr * 0.1**(iter_num // 2500)
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr_
            if iter_num % 1000 == 0:
                save_mode_path = os.path.join(snapshot_path,
                                              'iter_' + str(iter_num) + '.pth')
                torch.save(net.state_dict(), save_mode_path)
                logging.info("save model to {}".format(save_mode_path))

            if iter_num > max_iterations:
                break
            time1 = time.time()
        alpha -= 0.01
        if alpha <= 0.01:
            alpha = 0.01
        if iter_num > max_iterations:
            break
    save_mode_path = os.path.join(snapshot_path,
                                  'iter_' + str(max_iterations + 1) + '.pth')
    torch.save(net.state_dict(), save_mode_path)
    logging.info("save model to {}".format(save_mode_path))
    writer.close()
Exemplo n.º 5
0
def experiment(exp_identifier,
               max_epoch,
               training_data,
               testing_data,
               batch_size=2,
               supervised_only=False,
               K=2,
               T=0.5,
               alpha=1,
               mixup_mode='all',
               Lambda=1,
               Lambda_ramp=None,
               base_lr=0.01,
               change_lr=None,
               aug_factor=1,
               from_saved=None,
               always_do_validation=True,
               decay=0):
    '''
    max_epoch: epochs to run. Going through labeled data once is one epoch.
    batch_size: batch size of labeled data. Unlabeled data is of the same size.
    training_data: data for train_epoch, list of dicts of numpy array.
    training_data: data for validation, list of dicts of numpy array.
    supervised_only: if True, only do supervised training on LABELLED_INDEX; otherwise, use both LABELLED_INDEX and UNLABELLED_INDEX
    
    Hyperparameters
    ---------------
    K: repeats of each unlabelled data
    T: temperature of sharpening
    alpha: mixup hyperparameter of beta distribution
    mixup_mode: how mixup is performed --
        '__': no mix up
        'ww': x and u both mixed up with w(x+u)
        'xx': both with x
        'xu': x with x, u with u
        'uu': both with u
        ... _ means no, x means with x, u means with u, w means with w(x+u)
    Lambda: loss = loss_x + Lambda * loss_u, relative weight for unsupervised loss
    base_lr: initial learning rate

    Lambda_ramp: callable or None. Lambda is ignored if this is not None. In this case,  Lambda = Lambda_ramp(epoch).
    change_lr: dict, {epoch: change_multiplier}


    '''
    print(
        f"Experiment {exp_identifier}: max_epoch = {max_epoch}, batch_size = {batch_size}, supervised_only = {supervised_only},"
        f"K = {K}, T = {T}, alpha = {alpha}, mixup_mode = {mixup_mode}, Lambda = {Lambda}, Lambda_ramp = {Lambda_ramp}, base_lr = {base_lr}, aug_factor = {aug_factor}."
    )

    net = VNet(n_channels=1,
               n_classes=2,
               normalization='batchnorm',
               has_dropout=True)
    eval_net = VNet(n_channels=1,
                    n_classes=2,
                    normalization='batchnorm',
                    has_dropout=True)

    if from_saved is not None:
        net.load_state_dict(torch.load(from_saved))

    if GPU:
        net = net.cuda()
        eval_net.cuda()

    ## eval_net is not updating
    for param in eval_net.parameters():
        param.detach_()

    net.train()
    eval_net.train()

    optimizer = optim.SGD(net.parameters(),
                          lr=base_lr,
                          momentum=0.9,
                          weight_decay=0.0001)
    x_criterion = soft_cross_entropy  #supervised loss is 0.5*(x_criterion + dice_loss)
    u_criterion = nn.MSELoss()  #unsupervised loss

    training_losses = []
    testing_losses = []
    testing_accuracy = []  #dice accuracy

    patch_size = (112, 112, 80)

    testing_data = [
        shape_transform(CenterCrop(patch_size)(sample))
        for sample in testing_data
    ]
    t0 = time.time()

    lr = base_lr

    for epoch in range(max_epoch):
        labelled_index = np.random.permutation(LABELLED_INDEX)
        unlabelled_index = np.random.permutation(
            UNLABELLED_INDEX)[:len(labelled_index)]
        labelled_data = [training_data[i] for i in labelled_index]
        unlabelled_data = [training_data[i]
                           for i in unlabelled_index]  #size = 16

        ##data transformation: rotation, flip, random_crop
        labelled_data = [
            shape_transform(RandomRotFlip()(RandomCrop(patch_size)(sample)))
            for sample in labelled_data
        ]
        unlabelled_data = [
            shape_transform(RandomRotFlip()(RandomCrop(patch_size)(sample)))
            for sample in unlabelled_data
        ]

        if Lambda_ramp is not None:
            Lambda = Lambda_ramp(epoch)
            print(f"Lambda ramp: Lambda = {Lambda}")

        if change_lr is not None:
            if epoch in change_lr:
                lr_ = lr * change_lr[epoch]
                print(
                    f"Learning rate decay at epoch {epoch}, from {lr} to {lr_}"
                )
                lr = lr_
                #change learning rate.
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr_

        training_loss = train_epoch(net=net,
                                    eval_net=eval_net,
                                    labelled_data=labelled_data,
                                    unlabelled_data=unlabelled_data,
                                    batch_size=batch_size,
                                    supervised_only=supervised_only,
                                    optimizer=optimizer,
                                    x_criterion=x_criterion,
                                    u_criterion=u_criterion,
                                    K=K,
                                    T=T,
                                    alpha=alpha,
                                    mixup_mode=mixup_mode,
                                    Lambda=Lambda,
                                    aug_factor=aug_factor,
                                    decay=decay)

        training_losses.append(training_loss)

        if always_do_validation or epoch % 50 == 0:
            testing_dice_loss, accuracy = validation(net=net,
                                                     testing_data=testing_data,
                                                     x_criterion=x_criterion)

        testing_losses.append(testing_dice_loss)
        testing_accuracy.append(accuracy)
        print(
            f"Epoch {epoch+1}/{max_epoch}, time used: {time.time()-t0:.2f},  training loss: {training_loss:.6f}, testing dice_loss: {testing_dice_loss:.6f}, testing accuracy: {100.0*accuracy:.2f}% "
        )

    save_path = f"../saved/{exp_identifier}.pth"
    torch.save(net.state_dict(), save_path)
    print(f"Experiment {exp_identifier} finished. Model saved as {save_path}")
    return training_losses, testing_losses, testing_accuracy