Exemple #1
0
def create_img_lists(imglist):
    partitions = np.load(os.path.join(cg.partition_dir,
                                      'partitions_local_adapted.npy'),
                         allow_pickle=True)
    print(partitions.shape)

    for i, partition in enumerate(partitions):
        # define which time frame(s) in each case you will use in the model
        # What I did is to pre-define this/these frames and save in a txt file "time_frame_picked_for_pretrained_AI.txt" in the folder of each case in octomore
        # You can define in your own way
        t = [
            os.path.join(c, 'time_frame_picked_for_pretrained_AI.txt')
            for c in partition
        ]
        t = [int(open(s, 'r').read()) for s in t]  # t is the frame I define

        # find the file path of segmentation file of this/these frame(s)
        segs = [[
            os.path.join(c, 'seg-nii-1.5-upsample-retouch-adapted',
                         'pred_s_' + str(f) + '.npy')
        ] for c, f in zip(partition, t)]  # path of manual segmentation
        segs = dv.collapse_iterable(segs)
        imgs = [
            os.path.join(os.path.dirname(os.path.dirname(s)),
                         'img-nii-0.625-adapted',
                         str(ff.find_timeframe(s, 1, '_')) + '.npy')
            for s in segs
        ]  # corresponding image file

        assert (len(imgs) == len(segs))
        os.makedirs(os.path.join(cg.partition_dir, imglist), exist_ok=True)
        np.save(
            os.path.join(cg.partition_dir, imglist,
                         'img_list_' + str(i) + '.npy'), imgs)
        np.save(
            os.path.join(cg.partition_dir, imglist,
                         'seg_list_' + str(i) + '.npy'), segs)
Exemple #2
0
def create_img_lists(imglist):
    partitions = np.load(os.path.join(cg.partition_dir,'partitions_local_adapted.npy'),allow_pickle = True)
    for i, partition in enumerate(partitions):
        if imglist == 'one_time_frame_LV':
            t = [os.path.join(c, 'time_frame_picked_for_pretrained_AI.txt') for c in partition]
            t = [int(open(s, 'r').read()) for s in t]
            segs = [[os.path.join(c, 'seg-nii-1.5-upsample-retouch-adapted-LV', 'pred_s_'+str(f)+'.npy')] for c, f in zip(partition, t)]
            segs = dv.collapse_iterable(segs)
            imgs = [os.path.join(os.path.dirname(os.path.dirname(s)), 'img-nii-0.625-adapted', str(ff.find_timeframe(s,1,'_'))+'.npy') for s in segs]
           
        assert(len(imgs) == len(segs))
        os.makedirs(os.path.join(cg.partition_dir, imglist), exist_ok = True)
        np.save(os.path.join(cg.partition_dir,imglist,'img_list_'+str(i)+'.npy'), imgs)
        np.save(os.path.join(cg.partition_dir,imglist,'seg_list_'+str(i)+'.npy'), segs)
Exemple #3
0
# System
import os
import glob as gb
import pathlib as plib
import numpy as np
import dvpy as dv
import segcnn
import U_Net_function_list as ff
cg = segcnn.Experiment()

np.random.seed(cg.seed)

#make the directories
os.makedirs(cg.partition_dir, exist_ok = True)
# Create a list of all patients.
patient_list = ff.find_all_target_files(['*/*'],cg.local_dir)
print(patient_list.shape,patient_list[0:10])

# Shuffle the patients.
np.random.shuffle(patient_list)
print(patient_list[0:10])

# Split the list into `cg.num_partitions` (approximately) equal sublists.
partitions = np.array_split(patient_list, cg.num_partitions)
# when set seed = 1, VR data partition result:
# batch 0: Abnormal = 27, Normal = 35; batch 1: 31, 31; batch 2: 24, 38; batch 3: 27,34; batch 4: 31,30

# Save the partitions.
np.save(os.path.join(cg.partition_dir,'partitions_local_adapted.npy'), partitions)

def create_img_lists(imglist):
def validate(batch):

    #===========================================
    dv.section_print('Calculating Image Lists...')

    partition_file_name = 'one_time_frame_4classes'

    imgs_list_tst = [
        np.load(os.path.join(cg.partition_dir, partition_file_name,
                             'img_list_' + str(p) + '.npy'),
                allow_pickle=True) for p in range(cg.num_partitions)
    ]
    segs_list_tst = [
        np.load(os.path.join(cg.partition_dir, partition_file_name,
                             'seg_list_' + str(p) + '.npy'),
                allow_pickle=True) for p in range(cg.num_partitions)
    ]

    if batch == None:
        raise ValueError('No batch was provided: wrong!')
        # print('pick all batches')
        # batch = 'all'
        # imgs_list_tst = np.concatenate(imgs_list_tst)
        # segs_list_tst = np.concatenate(segs_list_tst)
    else:
        imgs_list_tst = imgs_list_tst[batch]
        segs_list_tst = segs_list_tst[batch]

    print(imgs_list_tst.shape)
    #===========================================
    dv.section_print('Loading Saved Weights...')

    # Build the U-NET
    shape = cg.dim + (1, )
    model_inputs = [Input(shape)]
    model_outputs = []
    _, _, unet_output = dvpy.tf_2d.get_unet(
        cg.dim,
        cg.num_classes,
        cg.conv_depth,
        layer_name='unet',
        dimension=cg.unetdim,
        unet_depth=cg.unet_depth,
    )(model_inputs[0])
    model_outputs += [unet_output]
    model = Model(inputs=model_inputs, outputs=model_outputs)

    # Load weights
    model.load_weights(model_files[0], by_name=True)

    #===========================================
    dv.section_print('Calculating Predictions...')
    # build data generator
    valgen = dv.tf_2d.ImageDataGenerator(
        cg.unetdim,
        input_layer_names=['input_1'],
        output_layer_names=['unet'],
    )

    # predict
    for img, seg in zip(imgs_list_tst, segs_list_tst):
        patient_id = os.path.basename(os.path.dirname(os.path.dirname(img)))
        patient_class = os.path.basename(
            os.path.dirname(os.path.dirname(os.path.dirname(img))))
        print(img)
        print(patient_class, patient_id, '\n')

        u_pred = model.predict_generator(
            valgen.flow(
                np.asarray([img]),
                np.asarray([seg]),
                slice_num=cg.slice_num,
                batch_size=cg.slice_num,
                relabel_LVOT=cg.relabel_LVOT,
                shuffle=False,
                input_adapter=ut.in_adapt,
                output_adapter=ut.out_adapt,
                shape=cg.dim,
                input_channels=1,
                output_channels=cg.num_classes,
                adapted_already=cg.adapted_already,
            ),
            verbose=1,
            steps=1,
        )

        # save u_net segmentation
        time = ff.find_timeframe(seg, 1, '_')
        u_gt_nii = nb.load(
            os.path.join(cg.seg_data_dir, patient_class, patient_id,
                         'seg-pred-1.5-upsample-retouch',
                         'pred_s_' + str(time) + '.nii.gz')
        )  # load the manual segmentation file for affine matrix
        u_pred = np.rollaxis(u_pred, 0, 3)
        u_pred = np.argmax(u_pred, axis=-1).astype(np.uint8)
        u_pred = dv.crop_or_pad(u_pred, u_gt_nii.get_fdata().shape)
        u_pred[u_pred == 3] = 4  # use for LVOT only
        u_pred = nb.Nifti1Image(u_pred, u_gt_nii.affine)
        save_file = os.path.join(cg.seg_data_dir, patient_class, patient_id,
                                 'seg-pred-0.625-4classes',
                                 seg_filename + str(time) +
                                 '.nii.gz')  # predicted segmentation file
        os.makedirs(os.path.dirname(save_file), exist_ok=True)
        nb.save(u_pred, save_file)
########### Define the model weight you are going to use
batch = 0
Batch = str(batch)
epoch = '052'
view = '2C'  # set as default
vector = ''  #ignore
suffix = ''  #ignore
test_set = 'VR_1tf_4classes'
print(view, vector, Batch)

model_folder = os.path.join(cg.fc_dir, 'models', 'model_batch' + Batch,
                            '2D-UNet-seg')

filename = 'model-' + test_set + '_batch' + Batch + '_s' + suffix + '-' + epoch + '-*'

model_files = ff.find_all_target_files([filename], model_folder)
assert len(model_files) == 1
print(model_files)

seg_filename = 'pred_s_'  # deine the name of predicted segmentation file
###########


def validate(batch):

    #===========================================
    dv.section_print('Calculating Image Lists...')

    partition_file_name = 'one_time_frame_4classes'

    imgs_list_tst = [
Exemple #6
0
#!/usr/bin/env python

import os
import numpy as np
import U_Net_function_list as ff
import nibabel as nb
import pandas as pd
import segcnn

cg = segcnn.Experiment()

# find patient list
patient_list = ff.find_all_target_files(['*/*'], cg.seg_data_dir)
print(patient_list.shape)

result = []
for p in patient_list:
    patient_id = os.path.basename(p)
    patient_class = os.path.basename(os.path.dirname(p))
    print(patient_class, patient_id)

    # read time frame file
    f = open(os.path.join(p, 'time_frame_picked_for_pretrained_AI.txt'), "r")
    t = int(f.read())

    # read threshold setting:
    threshold_file = os.path.join(p, 'threshold.txt')
    if os.path.isfile(threshold_file) == 0:
        raise ValueError('no threshold txt file')
    else:
        threshold = open(threshold_file, "r")
Exemple #7
0
def read_nib(file):
    img = nib.load(file)
    data = img.get_fdata()
    shape = data.shape
    return img, data, shape


pred_seg_file = 'pred_s_'
excel_filename = 'UNet_VR_1tf_2class_segmentation_volumetric_evaluation.xlsx'
structure_list = [('LV', 1), ('LA', 2), ('LAA', 3), ('LVOT', 4), ('Aorta', 5),
                  ('PV', 6)]  #(chamber,value in the seg)
structure_choice = [0]

column_list = ['Patient_class', 'Patient_ID', 'batch']

patient_list = ff.get_patient_list_from_csv(
    os.path.join(cg.spreadsheet_dir, 'Final_patient_list_include.csv'))
partition_file_path = os.path.join(cg.partition_dir,
                                   'partitions_local_adapted.npy')

result = []
count = 0
for p in patient_list:
    patient_id = p[1]
    patient_class = p[0]
    patient_batch = ff.locate_batch_num_for_patient(patient_class, patient_id,
                                                    partition_file_path)
    print(patient_class, patient_id, patient_batch)

    # read time frame picked
    f = open(
        os.path.join(cg.seg_data_dir, patient_class, patient_id,
Exemple #8
0
cg = segcnn.Experiment()

########### Define the model file########
batch = 0
Batch = str(batch)
epoch = '080'
view = '2C'  # set as default
vector = ''  # ignore
suffix = ''  #ignore
test_set = 'VR_1tf_4class'
print(view, vector, Batch)

model_folder = os.path.join(cg.fc_dir, 'models', 'model_batch' + Batch,
                            '2D-UNet-seg')
filename = 'model-' + test_set + '_batch' + Batch + '_s' + suffix + '-' + epoch + '-*'
model_files = ff.find_all_target_files([filename], model_folder)
assert len(model_files) == 1
print(model_files)

seg_filename = 'pred_s_'  #define file name of predicted segmentation
###########

dv.section_print('Loading Saved Weights...')

# BUILT U-NET

shape = cg.dim + (1, )
model_inputs = [Input(shape)]
model_outputs = []
_, _, unet_output = dvpy.tf_2d.get_unet(
    cg.dim,
Exemple #9
0
#!/usr/bin/env python

# this script can check the dimension of each case so that 
# we can set a reasonable cropping/padding size

import os
import numpy as np
import U_Net_function_list as ff
import nibabel as nib
import segcnn

cg = segcnn.Experiment()

patient_list = ff.get_patient_list_from_csv(os.path.join(cg.spreadsheet_dir,'Final_patient_list_include.csv'))
print(len(patient_list))

x_size = []
y_size = []
z_size = []
for p in patient_list:
    patient_id = p[1]
    patient_class = p[0]
    vol = os.path.join(cg.image_data_dir,patient_class,patient_id,'img-nii-0.625/0.nii.gz')
    vol_data = nib.load(vol).get_fdata()
    dimension = vol_data.shape
    x_size.append(dimension[0])
    y_size.append(dimension[1])
    z_size.append(dimension[-1])
    print(patient_class,patient_id,dimension)
x_size = np.asarray(x_size)
y_size = np.asarray(y_size)
Exemple #10
0
import os
import glob as gb
import pathlib as plib
import numpy as np
import dvpy as dv
import segcnn
import U_Net_function_list as ff
cg = segcnn.Experiment()

np.random.seed(cg.seed)

#make the directories
os.makedirs(cg.partition_dir, exist_ok=True)
# Create a list of all patients.
patient_list = ff.find_all_target_files(
    ['*/*'], cg.local_dir
)  # these are all the patients (with adapted image volumes) you save in the octomore local
#patient_list1 = ff.get_patient_list_from_csv(os.path.join(cg.spreadsheet_dir,'Lead_patient_list.csv'))
patient_list = []
for p in patient_list:
    patient_list.append(os.path.join(cg.local_dir, p[0], p[1]))
patient_list = np.asarray(patient_list)
print(patient_list.shape, patient_list[0:3])

# Randomly Shuffle the patients.
np.random.shuffle(patient_list)
print(patient_list[0:3])

# Split the list into `cg.num_partitions` (approximately) equal sublists.
partitions = np.array_split(patient_list, cg.num_partitions)
# when set seed = 1, VR data partition result:
Exemple #11
0
#!/usr/bin/env python

# this script will adapt the image (crop/pad + normalize + relabel + resize...)
import os
import numpy as np
import dvpy as dv
import segcnn
import segcnn.utils as ut
import U_Net_function_list as ff
cg = segcnn.Experiment()

# define patient list
patient_list = ff.get_patient_list_from_csv(
    os.path.join(cg.spreadsheet_dir, 'Final_patient_list_include.csv'))
print(len(patient_list))

for p in patient_list:
    patient_class = p[0]
    patient_id = p[1]

    # read time frame file
    t_file = open(
        os.path.join(cg.seg_data_dir, patient_class, patient_id,
                     'time_frame_picked_for_pretrained_AI.txt'), "r")
    t = t_file.read()
    if t[-1] == '\n':
        t = t[0:len(t) - 1]

    print(patient_class, patient_id, t)

    # # adapt input image - CT volume
Exemple #12
0
#!/usr/bin/env python

# this script will adapt the image (crop/pad + normalize + relabel + resize...)
import os
import numpy as np
import dvpy as dv
import segcnn
import segcnn.utils as ut
import U_Net_function_list as ff
cg = segcnn.Experiment()

# define patient list
patient_list = ff.get_patient_list_from_csv(
    os.path.join(cg.spreadsheet_dir, 'Final_patient_list_include.csv'))
print(len(patient_list))

for p in patient_list:
    patient_class = p[0]
    patient_id = p[1]

    # read time frame file
    t_file = open(
        os.path.join(cg.seg_data_dir, patient_class, patient_id,
                     'time_frame_picked_for_pretrained_AI.txt'), "r")
    t = t_file.read()
    if t[-1] == '\n':
        t = t[0:len(t) - 1]

    print(patient_class, patient_id)

    # adapt input image - CT volume

# compress
# patient_list = ff.get_patient_list_from_csv(os.path.join(cg.spreadsheet_dir,'Final_patient_list_include.csv'))
# print(len(patient_list))
# for p in patient_list:
#     print(p[0],p[1])
#     f1 = os.path.join(cg.seg_data_dir,p[0],p[1],'seg-nii-1.5-upsample-retouch-adapted-LV')
#     f2 = os.path.join(cg.seg_data_dir,p[0],p[1],'seg-nii-1.5-upsample-retouch-adapted')
#     shutil.make_archive(os.path.join(cg.seg_data_dir,p[0],p[1],'seg-nii-1.5-upsample-retouch-adapted-LV'),'zip',f1)
#     shutil.make_archive(os.path.join(cg.seg_data_dir,p[0],p[1],'seg-nii-1.5-upsample-retouch-adapted'),'zip',f2)
#     shutil.rmtree(f1)
#     shutil.rmtree(f2)

# change file name
patient_list = ff.find_all_target_files(['*/*'],cg.image_data_dir)

for p in patient_list:
    files = ff.find_all_target_files(['img-nii/img_*.nii.gz'],p)
    print(p,len(files))

    if len(files) == 0:
        print('no data. skip')
    else:
        for f in files:
            tf = ff.find_timeframe(f,2,'_')
            if os.path.isfile(os.path.join(p,'img-nii',str(tf)+'.nii.gz')) == 0:
                shutil.copy(f,os.path.join(p,'img-nii',str(tf)+'.nii.gz'))
        
        for f in files:
            os.remove(f)