Ejemplo n.º 1
0
NetC = NetC(ngpu=opt.ngpu)
# NetC.apply(weights_init)
print(NetC)

if cuda:
    NetS = NetS.cuda()
    NetC = NetC.cuda()
    # criterion = criterion.cuda()

# setup optimizer
lr = opt.lr
decay = opt.decay
optimizerG = optim.Adam(NetS.parameters(), lr=lr, betas=(opt.beta1, 0.999))
optimizerD = optim.Adam(NetC.parameters(), lr=lr, betas=(opt.beta1, 0.999))
# load training data
dataloader = loader(Dataset('./'), opt.batchSize)
# load testing data
dataloader_val = loader(Dataset_val('./'), opt.batchSize)

max_iou = 0
NetS.train()
for epoch in range(opt.niter):
    for i, data in enumerate(dataloader, 1):
        # train C
        NetC.zero_grad()
        input, label = Variable(data[0]), Variable(data[1])
        if cuda:
            input = input.cuda()
            target = label.cuda()
        target = target.type(torch.FloatTensor)
        target = target.cuda()
Ejemplo n.º 2
0
mediastinums_data = sum(mediastinums_data, [])
# # print(len(train_datas),len(train_labels),len(lungs_data),len(mediastinums_data))

# #loading test data
test_datas = glob.glob('../data_5pictures/images_1/*.jpg')
test_labels = glob.glob('../data_5pictures/masks_1/*.jpg')
lungs_test = glob.glob('../data_5pictures/lungs_1/*jpg')
mediastinums_test = glob.glob('../data_5pictures/med_1/*jpg')

print('data info------------------------------------------------')
print(len(train_datas), len(train_labels), len(test_datas), len(test_labels),
      len(lungs_data), len(mediastinums_data))
print(len(lungs_data), len(mediastinums_data), len(lungs_test),
      len(mediastinums_test), len(lungs_test), len(mediastinums_test))
dataloader = loader(
    Dataset(train_datas, train_labels, lungs_data, mediastinums_data),
    opt.batchSize)
dataloader_val = loader(
    Dataset_val(test_datas, test_labels, lungs_test, mediastinums_test),
    opt.batchSize)
# load testing data
# dataloader_val = loader(Dataset_val('../../node/dataset/dataset_96_random_info/'), opt.batchSize)

max_iou = 0
NetS.train()
#TODO classfiy loss
loss_function = nn.CrossEntropyLoss()


def cross_loss(input, target):
    # input = input.cuda()
Ejemplo n.º 3
0
    parser.add_argument("--outpath", default="./test_outputs")
    parser.add_argument('--batchSize', type=int, default=1)
    parser.add_argument('--ngpu', type=int, default=1)
    parser.add_argument('--weight_path', type=str)
    parser.add_argument("--exp_id", type=int)
    parser.add_argument("--store_images", default=False, action="store_true")
    parser.add_argument("--thinning", default=False, action="store_true")
    opt = parser.parse_args()

    try:
        os.makedirs(opt.outpath)
    except OSError:
        pass

    cuda = True
    dataloader_test = loader(Dataset_test('./'), opt.batchSize)

    cudnn.benchmark = True

    IoUs = []
    hds = []

    NetS = NetS(ngpu=opt.ngpu)
    NetS.load_state_dict(torch.load(opt.weight_path))
    NetS.cuda()
    NetS.eval()
    for i, data in enumerate(dataloader_test, 1):
        input, label = Variable(data[0]), Variable(data[1])
        if cuda:
            input = input.cuda()
            label = label.cuda()
Ejemplo n.º 4
0
def main():
    from Dense-residual-block import NetC,NetS
    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)
    cudnn.benchmark = True
    n_layers_list = [4, 5, 7, 10, 12, 15, 12, 10, 7, 5, 4]
    NetS = NetS(n_layers_list, 5)
    print(NetS)
    NetC = NetC(ngpu = opt.ngpu)
    print(NetC)
    if cuda:
        NetS = NetS.cuda()
        NetC = NetC.cuda()
    lr = opt.lr
    decay = opt.decay
    optimizerG = optim.Adam(NetS.parameters(), lr=lr, betas=(opt.beta1, 0.999))
    optimizerD = optim.Adam(NetC.parameters(), lr=lr, betas=(opt.beta1, 0.999))
    dataloader = loader(Dataset('./'),opt.batchSize)
    print(len(dataloader))
    dataloader_val = loader(Dataset_val('./'), opt.batchSize)
    print(len(dataloader_val))
    logger=Logger('./logs/Dense_Residual_block')
    max_Jac = 0
    NetS.train()
    history = {split: {'epoch': [], 'Loss_D': [], 'Loss_G_joint': []}
               for split in ('train', 'val')}

    history1 = {split: {'epoch': [], 'Jac': [], 'Dsc': [], 'acc': [],'se':[], 'sp':[]}
                for split in ('train', 'val')}
    for epoch in range(opt.niter):
        for i, data in enumerate(dataloader, 1):
            NetC.zero_grad()
            input, label = Variable(data[0]), Variable(data[1])
            if cuda:
                input = input.cuda()
                target = label.cuda()
            else:
                input=input
                target=label
            target = target.type(torch.FloatTensor)
            target = target.cuda()
            output = NetS(input)
            output = F.sigmoid(output)
            output = output.detach()
            output_masked = input.clone()
            input_mask = input.clone()
            for d in range(3):
                if d==0:
                    output_masked[:, d:, :, :] = input_mask[:, d, :, :].unsqueeze(1) * output
                else:
                    output_masked[:,:d:, :, :] = input_mask[:,d,:,:].unsqueeze(1) * output
            if cuda:
                output_masked = output_masked.cuda()
            result = NetC(output_masked)
            target_masked = input.clone()
            for d in range(3):
                if d==0:
                    target_masked[:,d:,:,:] = input_mask[:,d,:,:].unsqueeze(1) * target
                else:
                    target_masked[:,:d:,:,:] = input_mask[:,d,:,:].unsqueeze(1) * target
            if cuda:
                target_masked = target_masked.cuda()
            target_D = NetC(target_masked)
            target_D.detach()
            loss_D = -torch.mean(torch.abs(result - target_D))
            loss_D.backward()
            optimizerD.step()
            for p in NetC.parameters():
                p.data.clamp_(-0.05, 0.05)
            NetS.zero_grad()
            output = NetS(input)
            output = F.sigmoid(output)
            for d in range(3):
                if d==0:
                    output_masked[:, d:, :, :] = input_mask[:, d, :, :].unsqueeze(1) * output
                else:
                    output_masked[:, :d:, :, :] = input_mask[:, d, :, :].unsqueeze(1) * output
            if cuda:
                output_masked = output_masked.cuda()
            result = NetC(output_masked)
            for d in range(3):
                if d==0:
                    target_masked[:, d:, :, :] = input_mask[:, d, :, :].unsqueeze(1) * target
                else:
                    target_masked[:, :d:, :, :] = input_mask[:, d, :, :].unsqueeze(1) * target
            if cuda:
                target_masked = target_masked.cuda()
            target_G = NetC(target_masked)
            loss_dice = dice_loss(output,target)
            loss_G = torch.mean(torch.abs(result - target_G))
            label_edge = getEdge(target_masked)
            _, pred3 = (torch.max(output_masked, 1))
            pred_edge = getEdge(pred3)
            loss_G_joint = torch.mean(torch.abs(result - target_G)) + opt.a * EPE(label_edge,pred_edge)+opt.b*loss_dice
            loss_G_joint.backward()
            optimizerG.step()
            step = len(dataloader) * epoch + i
            info = {'D_loss': loss_D.data[0], 'G_loss': loss_G.data[0], 'loss_dice': loss_dice.data[0]}
            for tag, value in info.items():
                logger.scalar_summary(tag, value, step)
        print("===> Epoch[{}]({}/{}): Batch Dice: {:.4f}".format(epoch, i, len(dataloader), 1 - loss_dice.data[0]))
        print("===> Epoch[{}]({}/{}): G_Loss: {:.4f}".format(epoch, i, len(dataloader), loss_G.data[0]))
        print("===> Epoch[{}]({}/{}): D_Loss: {:.4f}".format(epoch, i, len(dataloader), loss_D.data[0]))
        vutils.save_image(data[0],
                '%s/input.png' % opt.outpath,
                normalize=True)
        vutils.save_image(data[1],
                '%s/label.png' % opt.outpath,
                normalize=True)
        vutils.save_image(output.data,
                '%s/result.png' % opt.outpath,
                normalize=True)
        if epoch % 10 == 0:
            NetS.eval()

            Jacs, dices, accs,SEs,SPs= [], [],[],[],[]
            for i, data in enumerate(dataloader_val, 1):
                input, gt = Variable(data[0]), Variable(data[1])
                if cuda:
                    input = input.cuda()
                    gt = gt.cuda()
                pred = NetS(input)
                pred[pred < 0.5] = 0
                pred[pred >= 0.5] = 1
                pred = pred.type(torch.LongTensor)
                pred_np = pred.data.cpu().numpy()
                gt = gt.data.cpu().numpy()
                for x in range(input.size()[0]):
                    Jac = np.sum(pred_np[x][gt[x]==1]) / float(np.sum(pred_np[x]) + np.sum(gt[x]) - np.sum(pred_np[x][gt[x]==1]))
                    Dsc = np.sum(pred_np[x][gt[x]==1])*2 / float(np.sum(pred_np[x]) + np.sum(gt[x]))
                    acc = float(np.sum(pred_np[x][gt[x] == 1])+np.sum(pred_np[x][gt[x] == 0])) / float(np.sum(pred_np[x]) + np.sum(gt[x])+np.sum(pred_np[x][gt[x] == 1])+np.sum(pred_np[x][gt[x] == 0]))
                    SE=np.sum(pred_np[x][gt[x]==1])/ float(np.sum(pred_np[x][gt[x]==1])+np.sum(pred_np[x]))
                    SP=np.sum(pred_np[x][gt[x] == 0])/float(np.sum(pred_np[x][gt[x] == 0])+np.sum(gt[x]))

                    Jacs.append(Jac)
                    Dscs.append(Dsc)
                    accs.append(acc)
                    SEs.append(SE)
                    SPs.append(SP)


            NetS.train()
            Jacs = np.array(Jacs, dtype=np.float64)
            Dscs = np.array(dices, dtype=np.float64)
            accs = np.array(accs, dtype=np.float64)
            SEs = np.array(SEs, dtype=np.float64)
            SPs = np.array(SPs, dtype=np.float64)

            mJac = np.mean(Jacs, axis=0)
            mDsc = np.mean(Dscs, axis=0)
            macc = np.mean(accs, axis=0)
            mSE= np.mean(SEs, axis=0)
            mSP = np.mean(SPs, axis=0)

            history1['train']['epoch'].append(epoch)
            history1['train']['Jac'].append(mJac)
            history1['train']['Dsc'].append(mDsc)
            history1['train']['acc'].append(macc)
            history1['train']['SE'].append(mSE)
            history1['train']['SP'].append(mSP)
            print('Plotting  evaluation metrics figure...')
            plt.xlabel('Epoch')
            plt.ylabel('Jac_dice')
            fig = plt.figure()
            plt.plot(history1['train']['epoch'], history1['train']['Jac'], color='b', label='Jac')
            plt.plot(history1['train']['epoch'], history1['train']['Dsc'], color='c', label='Dsc')
            plt.plot(history1['train']['epoch'], history1['train']['acc'], color='y', label='acc')
            plt.plot(history1['train']['epoch'], history1['train']['SE'], color='g', label='SE')
            plt.plot(history1['train']['epoch'], history1['train']['SP'], color='r', label='SP')
            plt.legend()
            fig.savefig('{}/ evaluation metrics.png'.format(opt.ckpt), dpi=200)
            plt.close('all')
            info = {'Jac': mJac, 'DSC': mDsc,'acc':macc, 'SE':mSE, 'SP':mSP}
            for tag, value in info.items():
              logger.scalar_summary(tag, value, epoch)
            print('mJac: {:.4f}'.format(mJac))
            print('mDsc: {:.4f}'.format(mDsc))
            print('macc: {:.4f}'.format(macc))
            print('mSE: {:.4f}'.format(mSE))
            print('mSP: {:.4f}'.format(mSP))
            if mJac > max_Jac:
                max_Jac = mJac
                torch.save(NetS.state_dict(), '%s/NetS_epoch_%d.pth' % (opt.outpath, epoch))
            vutils.save_image(data[0],
                    '%s/input_val.png' % opt.outpath,
                    normalize=True)
            vutils.save_image(data[1],
                    '%s/label_val.png' % opt.outpath,
                    normalize=True)
            pred = pred.type(torch.FloatTensor)
            vutils.save_image(pred.data,
                    '%s/result_val.png' % opt.outpath,
                    normalize=True)
        if epoch % 25 == 0:
            lr = lr*decay
            if lr <= 0.000001:
                lr = 0.000001
            print('Learning Rate: {:.6f}'.format(lr))
            print('Max mJac: {:.4f}'.format(max_Jac))
            optimizerG = optim.Adam(NetS.parameters(), lr=lr, betas=(opt.beta1, 0.999))
            optimizerD = optim.Adam(NetC.parameters(), lr=lr, betas=(opt.beta1, 0.999))
Ejemplo n.º 5
0
if cuda:
    NetS = NetS.cuda()
    #NetC = NetC.cuda()

# setup optimizer
lr = opt.lr
decay = opt.decay
optimizerG = optim.Adam(NetS.parameters(), lr=lr, betas=(opt.beta1, 0.999))
#optimizerD = optim.Adam(NetC.parameters(), lr=lr, betas=(opt.beta1, 0.999))
test_data = glob.glob('../data_5pictures/images_5/*.jpg')
test_label = glob.glob('../data_5pictures/masks_5/*.jpg')
test_lung = glob.glob('../data_5pictures/lungs_5/*.jpg')
test_med = glob.glob('../data_5pictures/med_5/*.jpg')
#dataloader = loader(Dataset(test_data),opt.batchSize)
dataloader_val = loader(
    Dataset_val(test_data, test_label, test_lung, test_med), opt.batchSize)

max_iou = 0
NetS.train()
NetS.load_state_dict(torch.load('./outputs_att_info/NetS_epoch_137_5.pth'))
print('load S ok')
NetS.eval()

# NetC.train()
# NetC.load_state_dict(torch.load('./outputs/NetC_epoch_220.pth'))
# print('load C ok')
# NetC.eval()

#分析训练数据
for i, data in enumerate(dataloader_val, 1):
    input, gt, lung, med, name = Variable(data[0]), Variable(
Ejemplo n.º 6
0
def main(batch_size, n_epochs, lr, beta1, decay, _run):
    assert torch.cuda.is_available()

    writer = SummaryWriter(
        os.path.join(base_path, "runs", "experiment-{}".format(_run._id)))
    model_path = os.path.join(fs_observer.dir, "best_model.pth")

    outputs_path = os.path.join(fs_observer.dir, "outputs")
    if not os.path.exists(outputs_path):
        os.mkdir(outputs_path)

    cudnn.benchmark = True
    s_model = SegmentorNet().cuda()
    c_model = CriticNet().cuda()

    s_optimizer = optim.Adam(s_model.parameters(), lr=lr, betas=(beta1, 0.999))
    c_optimizer = optim.Adam(c_model.parameters(), lr=lr, betas=(beta1, 0.999))

    s_scheduler = CyclicLR(s_optimizer, base_lr=lr, max_lr=lr * 10)
    c_scheduler = CyclicLR(c_optimizer, base_lr=lr, max_lr=lr * 10)

    dataloaders = {
        "train": loader(Dataset('./'), batch_size),
        "validation": loader(Dataset_val('./'), 36)
    }

    best_IoU = 0.0
    s_model.train()
    for epoch in range(n_epochs):
        progress_bar = tqdm(dataloaders["train"],
                            desc="Epoch {} - train".format(epoch))

        s_losses = []
        s_losses_joint = []
        c_losses = []
        dices = []

        for i, (inputs, targets) in enumerate(progress_bar):
            c_model.zero_grad()

            inputs = Variable(inputs).cuda()
            targets = Variable(targets).cuda().type(torch.FloatTensor).cuda()

            outputs = s_model(inputs)
            outputs = F.sigmoid(outputs)
            outputs = outputs.detach()
            outputs_masked = inputs.clone()
            inputs_mask = inputs.clone()

            for d in range(3):
                outputs_masked[:, d, :, :] = inputs_mask[:, d, :, :].unsqueeze(
                    1) * outputs
            outputs_masked = outputs_masked.cuda()

            results = c_model(outputs_masked)
            targets_masked = inputs.clone()
            for d in range(3):
                targets_masked[:, d, :, :] = inputs_mask[:, d, :, :].unsqueeze(
                    1) * targets

            for d in range(3):
                targets_masked[:, d, :, :] = inputs_mask[:, d, :, :].unsqueeze(
                    1) * targets

            targets_masked = targets_masked.cuda()
            targets_D = c_model(targets_masked)
            loss_D = -torch.mean(torch.abs(results - targets_D))
            loss_D.backward()
            c_optimizer.step()
            c_scheduler.batch_step()

            for p in c_model.parameters():
                p.data.clamp_(-0.05, 0.05)

            s_model.zero_grad()
            outputs = s_model(inputs)
            outputs = F.sigmoid(outputs)

            for d in range(3):
                outputs_masked[:, d, :, :] = inputs_mask[:, d, :, :].unsqueeze(
                    1) * outputs
            outputs_masked = outputs_masked.cuda()

            results = c_model(outputs_masked)
            for d in range(3):
                targets_masked[:, d, :, :] = inputs_mask[:, d, :, :].unsqueeze(
                    1) * targets
            targets_masked = targets_masked.cuda()

            targets_G = c_model(targets_masked)
            loss_dice = dice_loss(outputs, targets)
            loss_G = torch.mean(torch.abs(results - targets_G))
            loss_G_joint = loss_G + loss_dice
            loss_G_joint.backward()
            s_optimizer.step()
            s_scheduler.batch_step()

            c_losses.append(loss_D.data[0])
            s_losses.append(loss_G.data[0])
            s_losses_joint.append(loss_G_joint.data[0])
            dices.append(loss_dice.data[0])

            progress_bar.set_postfix(
                OrderedDict({
                    "c_loss": np.mean(c_losses),
                    "s_loss": np.mean(s_losses),
                    "s_loss_joint": np.mean(s_losses_joint),
                    "dice": np.mean(dices)
                }))

        mean_c_loss = np.mean(c_losses)
        mean_s_loss = np.mean(s_losses)
        mean_s_loss_joint = np.mean(s_losses_joint)
        mean_dice = np.mean(dices)

        c_loss_tag = "train.c_loss"
        s_loss_tag = "train.s_loss"
        s_losses_joint_tag = "train.s_loss_joint"
        dice_loss_tag = "train.loss_dice"

        writer.add_scalar(c_loss_tag, mean_c_loss, epoch)
        writer.add_scalar(s_loss_tag, mean_s_loss, epoch)
        writer.add_scalar(s_losses_joint_tag, mean_s_loss_joint, epoch)
        writer.add_scalar(dice_loss_tag, mean_dice, epoch)

        if epoch % 10 == 0:
            progress_bar = tqdm(dataloaders["validation"],
                                desc="Epoch {} - validation".format(epoch))

            s_model.eval()
            IoUs, dices = [], []
            for i, (inputs, targets) in enumerate(progress_bar):
                inputs = Variable(inputs).cuda()
                targets = Variable(targets).cuda()

                pred = s_model(inputs)
                pred[pred < 0.5] = 0
                pred[pred >= 0.5] = 1

                pred = pred.type(torch.LongTensor)
                pred_np = pred.data.cpu().numpy()

                targets = targets.data.cpu().numpy()
                for x in range(inputs.size()[0]):
                    IoU = np.sum(pred_np[x][targets[x] == 1]) / float(
                        np.sum(pred_np[x]) + np.sum(targets[x]) -
                        np.sum(pred_np[x][targets[x] == 1]))
                    dice = np.sum(pred_np[x][targets[x] == 1]) * 2 / float(
                        np.sum(pred_np[x]) + np.sum(targets[x]))
                    IoUs.append(IoU)
                    dices.append(dice)

                progress_bar.set_postfix(
                    OrderedDict({
                        "mIoU": np.mean(IoUs, axis=0),
                        "mDice": np.mean(dices, axis=0)
                    }))

            s_model.train()
            IoUs = np.array(IoUs, dtype=np.float64)
            dices = np.array(dices, dtype=np.float64)
            mIoU = np.mean(IoUs, axis=0)
            mDice = np.mean(dices, axis=0)

            miou_tag = "validation.miou"
            mdice_tag = "validation.mdice"

            writer.add_scalar(miou_tag, mIoU, epoch)
            writer.add_scalar(mdice_tag, mDice, epoch)
            writer.commit()

            if mIoU > best_IoU:
                best_IoU = mIoU
                torch.save(s_model, model_path)

        if epoch % 25 == 0:
            lr = max(lr * decay, 0.00000001)
            s_optimizer = optim.Adam(s_model.parameters(),
                                     lr=lr,
                                     betas=(beta1, 0.999))
            c_optimizer = optim.Adam(c_model.parameters(),
                                     lr=lr,
                                     betas=(beta1, 0.999))