コード例 #1
0
def predict_layer(net, layer, gpu):
    net.eval()
    samples = pp.make_patch(layer, resize=(128, 128), patch=5)

    patches = samples.shape[2]
    ret_samples = np.zeros_like(samples)
    for patch in range(patches):
        img = samples[:, :, patch]
        img = img[np.newaxis, np.newaxis, :, :]
        img = torch.from_numpy(img)

        img = img.float()
        if gpu:
            img = img.cuda(0)
            pass

        img = img.float()

        # 只有训练阶段才追踪历史
        with torch.set_grad_enabled(False):
            output = net(img)
            _, preds = torch.max(output, 1)
            ret_samples[:, :, patch] = preds[0, :, :].cpu().numpy()
            # ut.plot_img(preds[0, :, :], mPath.DataPath_Volume_Predict + "output"+str(patch)+".jpg", 'Output', 2)
            pass
        pass

    # preds = ut.Merge_Patches(ret_samples, [512, 512], 5)
    preds = ut.Merge_Patches_And(ret_samples, [512, 512], 5)
    ut.save_img(preds, mPath.DataPath_Volume_Predict + "output.jpg")
    return preds
コード例 #2
0
def predict_liverNii(net, nii, gpu, name):
    net.eval()
    nii = np.array(nii, dtype='float64')
    nii[nii < -200] = -200
    nii[nii > 250] = 250
    nii = ((nii + 200) / 450)

    layer = nii.shape[2]
    ut.CheckDirectory(mPath.DataPath_Volume_Predict + 'temp/')

    output_layer = np.zeros_like(nii)
    for i in range(layer):
        current_layer = np.flip(nii[:, :, i], 0)
        current_layer = np.rot90(current_layer)
        current_layer = np.rot90(current_layer)
        current_layer = np.rot90(current_layer)
        print("Predicting {}-{}".format(name, i))
        temp = predict_liver(current_layer, gpu)
        temp = np.flip(temp, 0)
        temp = np.rot90(temp)
        temp = np.rot90(temp)
        temp = np.rot90(temp)
        output_layer[:, :, i] = temp

    output_layer = np.array(output_layer, dtype='uint8')
    new_img = nib.Nifti1Image(output_layer, affine=np.eye(4))
    nib.save(new_img, mPath.DataPath_Volume_Predict + name + '.nii')
コード例 #3
0
def predict_nii(net, nii, gpu, name):
    net.eval()
    nii = np.array(nii, dtype='float64')
    nii[nii < -200] = -200
    nii[nii > 250] = 250
    nii = ((nii + 200) * 255 // 450)
    nii = np.array(nii, dtype='uint8')

    layer = nii.shape[2]
    ut.CheckDirectory(mPath.DataPath_Volume_Predict + 'temp/')
    for i in range(layer):
        save(nii[:, :, i],
             mPath.DataPath_Volume_Predict + 'temp/' + str(i) + '.jpg')
        pass

    save_nii = np.zeros((512, 512, layer), dtype='float32')
    for i in range(layer):
        current_img = cv2.imread(mPath.DataPath_Volume_Predict + 'temp/' +
                                 str(i) + '.jpg')[:, :, 0]
        current_img = np.array(current_img, dtype='float64')
        current_img = current_img / 255
        samples = predict_layer(net, current_img, gpu)
        samples = np.flip(samples, 0)
        samples = np.rot90(samples)
        samples = np.rot90(samples)
        samples = np.rot90(samples)
        save_nii[:, :, i] = samples
        print("Predicting {}-{}".format(name, i))
        pass

    new_img = nib.Nifti1Image(save_nii, affine=np.eye(4))
    nib.save(new_img, mPath.DataPath_Volume_Predict + name + '-.nii')
コード例 #4
0
def predict_patch(net, img, gpu):
    net.eval()
    img = img[np.newaxis, np.newaxis, :, :]
    img = torch.from_numpy(img)

    img = img.float()
    if gpu:
        img = img.cuda()
        pass

    img = img.float()

    # 只有训练阶段才追踪历史
    with torch.set_grad_enabled(False):
        output = net(img)
        _, preds = torch.max(output, 1)
        ut.plot_img(preds[0, :, :],
                    mPath.DataPath_Volume_Predict + "output.jpg", 'Output', 2)
        pass
    pass
コード例 #5
0
def predict_layer_multi(net, layer_input, gpu):
    net.eval()

    ret_samples = np.zeros(
        (layer_input.shape[0], layer_input.shape[1], layer_input.shape[3]),
        dtype='float64')
    for patch in range(layer_input.shape[3]):
        temp = layer_input[:, :, :, patch]
        input = np.zeros(
            (layer_input.shape[2], layer_input.shape[0], layer_input.shape[1]))
        for i in range(temp.shape[2]):
            input[i, :, :] = temp[:, :, i]
            pass
        # cv2.imshow('1', input[1, :, :])
        # cv2.waitKey(0)
        input = input[np.newaxis, :, :, :]

        input = torch.from_numpy(input)
        input = input.type(torch.FloatTensor)

        if gpu:
            input = input.cuda(0)
            pass
        # 只有训练阶段才追踪历史
        with torch.set_grad_enabled(False):

            output = net(input)
            _, preds = torch.max(output, 1)
            ret_samples[:, :, patch] = preds[0, :, :].cpu().numpy()
            # cv2.imshow('2', ret_samples[:,:,patch])
            # cv2.waitKey(0)
            # ut.plot_img(preds[0, :, :], mPath.DataPath_Volume_Predict + "output"+str(patch)+".jpg", 'Output', 2)
            pass
        pass
    preds = ut.Merge_Patches_And(ret_samples, [512, 512], 5)
    preds[preds > 0] = 2
    # cv2.imshow('2', preds*127)
    # cv2.waitKey(0)
    return preds
コード例 #6
0
ファイル: Predict.py プロジェクト: Kuailun/LiTS_Segmentation
from Try import Utils as ut, mPath

GPU_DEVICES = '0'
os.environ["CUDA_VISIBLE_DEVICES"] = GPU_DEVICES
Use_GPU = torch.cuda.is_available()
Output_Class = 3


def predict_net():
    pass


if __name__ == '__main__':
    net = UNet(n_channels=1, n_classes=Output_Class)

    ut.CheckDirectory(mPath.DataPath_Net_CheckPoint)
    if Use_GPU:
        net.cuda()
        net.load_state_dict(torch.load(mPath.DataPath_Net_Normal))
        pass
    else:
        net.load_state_dict(
            torch.load(mPath.DataPath_Net_Normal, map_location='cpu'))

    try:
        predict_net()
        train_net(net,
                  lr=learning_rate,
                  epochs=Train_Epochs,
                  batch_size=Train_Batch_Size,
                  val_percent=Validation_Percent,
コード例 #7
0
import os

import mPath

from medpy.io import load, save
import os.path
import numpy as np
import csv
import cv2
import random

from Try import Utils as ut
ut.CheckDirectory(mPath.DataPath_Volume_Predict)

def getRangImageDepth(image):
    """
    :param image:
    :return:rangofimage depth
    """
    fistflag = True
    startposition = 0
    endposition = 0
    for z in range(image.shape[2]):
        notzeroflag = np.max(image[:,:,z])
        if notzeroflag and fistflag:
            startposition = z
            fistflag = False
        if notzeroflag:
            endposition = z
    return startposition, endposition
コード例 #8
0
ファイル: Train.py プロジェクト: Kuailun/LiTS_Segmentation
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.01,
              val_percent=0.05,
              save_cp=True,
              gpu=True,
              classes=3):
    optimizer=optim.SGD(net.parameters(),
                        lr=lr,
                        momentum=0.9,
                        weight_decay=0.0005)
    # optimizer=optim.adam(net.parameters(),
    #                      lr=lr,
    #                      momentum=0.9,
    #                      weight_decay=0.0005)
    # optimizer.zero_grad()
    # criterion=nn.BCELoss()
    criterion= Loss.MultclassDiceLoss()

    mTrain,mValid = LiTS_Data.split_to_train_val(mPath.CSVPath + "data.csv", Train_Mode,val_percent)

    mTrainDataset= LiTS_Data.Dataset_WithLiver(mTrain, Train_Mode,classes=classes,is_train=True,randomize=True)
    mValDataset= LiTS_Data.Dataset_WithLiver(mValid,  Train_Mode, classes=classes,is_train=False,randomize=False)
    # mTrainDataset = LiTS_Data.Dataset_Liver(mTrain, is_train=True, randomize=True)
    # mValDataset = LiTS_Data.Dataset_Liver(mValid, is_train=False, randomize=False)

    mTrainDataloader=DataLoader(dataset=mTrainDataset,batch_size=batch_size,shuffle=True)
    modeList=[]
    if len(mValid)==0:
        modeList=['train']
    else:
        mValDataloader = DataLoader(dataset=mValDataset, batch_size=batch_size, shuffle=True)
        modeList = ['train', 'val']


    best_acc=0.0
    batch_count=0

    # Begin training
    iter_train=0
    iter_val=0
    for epoch in range(epochs):
        print()
        print()
        print('-' * 10)
        print("Starting epoch {}/{}".format(epoch+1,epochs))

        adjust_learning_rate(optimizer,epoch)

        for phase in modeList:
            print()
            if(phase=='train'):
                net.train()
                dataset=mTrainDataloader
                dataLength=len(mTrainDataset)
            else:
                net.eval()
                dataset=mValDataloader
                dataLength = len(mValDataset)
                pass

            running_loss=0.0
            running_corrects=0
            running_dice=0.0

            for index,sample in enumerate(dataset):
                batch_count=batch_count+1
                img = sample['img']
                mask = sample['mask']

                if gpu:
                    img = img.cuda()
                    mask = mask.cuda()
                    pass

                img = img.float()
                mask = mask.float()

                # 清空参数的梯度
                optimizer.zero_grad()

                # 只有训练阶段才追踪历史
                with torch.set_grad_enabled(phase == 'train'):
                    output = net(img)
                    _, preds = torch.max(output, 1)

                    preds_onehot=torch.zeros_like(mask)
                    for i in range(preds_onehot.shape[1]):
                        preds_onehot[:,i,:,:]=preds==i
                        pass

                    # loss = criterion(output, mask)
                    loss = criterion(output, mask,weights)

                    if phase=='train':
                        loss.backward()
                        optimizer.step()
                        pass

                #  记录loss和准确率
                subloss=loss.item()
                subaccTotal=torch.sum(preds_onehot==mask.data)

                subaccTotal=subaccTotal.float()
                subaccTotal=subaccTotal/img.shape[0]/classes/img.shape[2]/img.shape[3]

                running_loss+=subloss*img.size(0)
                running_corrects+=subaccTotal*img.size(0)

                mask_ori=mask[:,1,:,:]
                sub_dice=ut.dice_cofficient(mask_ori,preds,1)

                samples=mask.shape[0]
                running_dice=running_dice+sub_dice*samples

                if(phase=='train'):
                    writer.add_scalar('train_loss', subloss, iter_train)
                    writer.add_scalar('train_acc', subaccTotal, iter_train)
                    writer.add_scalar('train_dice', sub_dice, iter_train)
                    iter_train+=1
                elif(phase=='val'):
                    writer.add_scalar('val_loss', subloss, iter_val)
                    writer.add_scalar('val_acc', subaccTotal, iter_val)
                    writer.add_scalar('val_dice', sub_dice, iter_val)
                    iter_val += 1
                # ut.plot_img(img[0, 0, :, :], mPath.DataPath_Log + "Input0-" + str(epoch) + ".jpg", "Input", 2)
                # ut.plot_img(mask[0, :, :, :], mPath.DataPath_Log + "Mask0-" + str(epoch) + ".jpg", "Mask", 2)
                # ut.plot_img(preds[0, :, :], mPath.DataPath_Log + "Output0-" + str(epoch) + ".jpg", "Output", 2)
                print('{} Loss:{:.6f} Acc:{:.10f} Dice:{:.10f}'.format(index, subloss, subaccTotal,sub_dice))
                pass
            if not (dataLength==0):
                epoch_loss=running_loss/dataLength
                epoch_acc=running_corrects/dataLength
                epoch_dice=running_dice/dataLength
            else:
                epoch_loss=0
                epoch_acc=0
                epoch_dice=0

            print('{} Loss:{:.6f} Acc:{:.10f} Dice:{:.10f}'.format(phase,epoch_loss,epoch_acc,epoch_dice))
            # writer.add_scalars('scalar/epoch_data', {'epoch_loss': epoch_loss, 'epoch_acc': epoch_acc},
            #                    epoch)
            if (phase == 'train'):
                writer.add_scalar('epoch_train_loss', epoch_loss, epoch)
                writer.add_scalar('epoch_train_acc', epoch_acc, epoch)
                writer.add_scalar('epoch_train_dice', epoch_dice, epoch)
            elif (phase == 'val'):
                writer.add_scalar('epoch_val_loss', epoch_loss, epoch)
                writer.add_scalar('epoch_val_acc', epoch_acc, epoch)
                writer.add_scalar('epoch_val_dice', epoch_dice, epoch)

            if epoch%Output_per_epoch==0:
                ut.plot_img(img[0,1,:,:], mPath.DataPath_Log + "Input0-" + str(epoch) + ".jpg", "Input",2)
                ut.plot_img(mask[0, :, :, :], mPath.DataPath_Log + "Mask0-" + str(epoch) + ".jpg", "Mask",2)
                ut.plot_img(preds[0, :, :], mPath.DataPath_Log + "Output0-" + str(epoch) + ".jpg", "Output",2)

            if(phase=='val' and epoch_dice>best_acc):
                best_acc=epoch_dice
                torch.save(net, mPath.DataPath_Net_Normal)
                pass
            pass
        pass
    torch.save(net, mPath.DataPath_Net_Final)
    writer.close()
    pass