Exemplo n.º 1
0
    def load_scan(self, file_name, extension):
        """Load mhd or nrrd 3d scan"""

        if extension == '.mhd':
            scan, _, _ = futil.load_itk(file_name)

        elif extension == '.nrrd':
            scan, _, _ = futil.load_nrrd(file_name)

        return np.expand_dims(scan, axis=-1)
Exemplo n.º 2
0
 def load_scan(self, file_name):
     """Load mhd or nrrd 3d scan thread safelly. Output is scan and spacing with shape (z,y,x)"""
     with self.lock:
         print(threading.current_thread().name +
               " get the lock, thread id: " + str(threading.get_ident()) +
               " prepare to load data")
         scan, origin, spacing = futil.load_itk(
             file_name)  # all the output shape are (z, y, z)
         print(threading.current_thread().name +
               " load data successfully, release the lock")
     return np.expand_dims(scan, axis=-1), spacing  # size=(z,x,y,1)
Exemplo n.º 3
0
def main():
    task = 'vessel'
    model = '1591438344_307_lr0.0001ld0m6l0m7l0pm0.5no_label_dirSScao0ds0dr1bn1fn16trszNonetrzszNonetrspNonetrzspNoneptch_per_scan500tr_nb18ptsz144ptzsz96'
    for file_name in ['52', '53']:
        pred_file_name = '/data/jjia/new/results/vessel/valid/pred/SSc/' + model + '/SSc_patient_' + file_name + '.mhd'
        gdth_file_name = '/data/jjia/mt/data/vessel/valid/gdth_ct/SSc/SSc_patient_' + file_name + '.mhd'
        gdth, gdth_origin, gdth_spacing = futil.load_itk(gdth_file_name)
        pred, pred_origin, pred_spacing = futil.load_itk(pred_file_name)

        # pred, pred_origin, pred_spacing = futil.load_itk('/data/jjia/new/results/SSc_51_lobe_segmentation/loberecon/SSc_patient_51.mhd')

        # connedted_pred = largest_connected_parts(pred, nb_parts_saved=5)
        # pred[connedted_pred == 0] = 0
        # futil.save_itk(
        #     'results/SSc_51_lobe_segmentation/SSc_patient_51_connected.nrrd',
        #     pred, pred_origin, pred_spacing)

        #
        # pred, pred_origin, pred_spacing = futil.load_itk(pred_file_name)
        #
        # connedted_pred = largest_connected_parts(pred, nb_parts_saved=5)
        # pred[connedted_pred==0] = 0
        # futil.save_itk('/data/jjia/new/results/lobe/valid/pred/GLUCOLD/' + model + '/GLUCOLD_patients_' + file_name + '_connected.nrrd', pred, pred_origin, pred_spacing)

        if task == 'vessel':
            labels = [1]
        elif task == 'lobe':
            labels = [4, 5, 6, 7, 8]
        gdth = one_hot_encode_3D(gdth, labels=labels)
        pred = one_hot_encode_3D(pred, labels=labels)

        metrics_dict_all_labels = metrics_dict_all_labels(labels, gdth, pred, spacing=gdth_spacing[::-1])
        metrics_dict_all_labels['filename'] = pred_file_name  # add a new key to the metrics
        data_frame = pd.DataFrame(metrics_dict_all_labels)
        data_frame.to_csv(
            '/data/jjia/new/results/vessel/valid/pred/SSc/' + model + '/SSc_patient_' + file_name + 'connected.csv',
            index=False)
Exemplo n.º 4
0
    def write_connected_lobe(
        mylock
    ):  # neural network inference needs GPU which can not be computed by multi threads, so the
        # consumer is just the upsampling only.
        while True:
            with mylock:
                ct_fpath = None
                if len(
                        scan_files
                ):  # if scan_files are empty, then threads should not wait any more
                    print(
                        threading.current_thread().name +
                        " gets the lock, thread id: " +
                        str(threading.get_ident()) +
                        " prepare to compute largest 5 lobes , waiting for the data from queue"
                    )
                    ct_fpath = scan_files.pop()  # wait up to 1 minutes
                    print(threading.current_thread().name +
                          " gets the data, thread id: " +
                          str(threading.get_ident()) +
                          " prepare to release the lock.")

            if ct_fpath is not None:
                t1 = time.time()
                print(threading.current_thread().name + "is computing ...")
                pred, pred_origin, pred_spacing = futil.load_itk(ct_fpath)
                pred = largest_connected_parts(pred, nb_need_saved=5)
                suffex_len = len(os.path.basename(ct_fpath).split(".")[-1])
                if target_dir:
                    new_dir = target_dir
                else:
                    new_dir = os.path.dirname(ct_fpath) + "/biggest_5_lobe"
                if not os.path.exists(new_dir):
                    os.makedirs(new_dir)
                    print('successfully create directory:', new_dir)
                write_fpath = new_dir + "/" + os.path.basename(
                    ct_fpath)[:-suffex_len - 1] + '.mhd'
                futil.save_itk(write_fpath, pred, pred_origin, pred_spacing)
                t3 = time.time()
                print("successfully save largest 5 lobes at " + write_fpath)
                print(
                    "it costs tis seconds to compute the largest 5 lobes of the data "
                    + str(t3 - t1))
            else:
                print(threading.current_thread().name +
                      "scan_files are empty, finish the thread")
                return None
Exemplo n.º 5
0
def write_dices_to_csv(step_nb,
                       labels,
                       gdth_path,
                       pred_path,
                       csv_file,
                       gdth_extension='.nrrd',
                       pred_extension='.nrrd'):
    '''
    this function is to calculate dice between the files in gdth_path and pred_mask_path. all the files must be
    '.nrrd' or '.mhd'. All the files dimensions should be 4, shape is like: (512, 512, 400, 1) or (400, 512, 512, 1)
    the default extension of masks are '.nrrd'

    '''
    print('start calculate dice and write dices to csv')
    gdth_names, pred_names = get_gdth_pred_names(gdth_path, pred_path)

    total_dices_names = ['step_nb']  # dices_names corresponding to total_dices
    total_dices = [step_nb]
    dices_values_matrix = []  # for average computation
    for gdth_name, pred_name in zip(gdth_names, pred_names):
        gdth_name = gdth_name
        pred_name = pred_name
        gdth_file, _, _ = futil.load_itk(gdth_name)
        pred_file, _, _ = futil.load_itk(pred_name)
        dices_values = calculate_dices(
            labels, gdth_file,
            pred_file)  # calculated dices exclude background
        dices_values_matrix.append(dices_values)

        dices_names = [gdth_name]
        for l in labels:  # calculated dices exclude background
            dices_names.append(
                'dice_' + str(l)
            )  # dice_names is a list corresponding to the specific dices_values
        total_dices_names.extend(
            dices_names)  # extend a list by another small list

        total_dices.append(True)  # place a fixed number under the file name
        total_dices.extend(dices_values)
        print('dice_value')
        print(dices_values)

    dices_values_matrix = np.array(dices_values_matrix)

    # average dice of each class and their names
    ave_dice_of_class = np.average(dices_values_matrix, axis=0)
    total_dices.extend(ave_dice_of_class)

    names_ave_of_dice = ['ave_dice_class_' + str(l) for l in labels
                         ]  # calculated ave dices exclude background
    total_dices_names.extend(names_ave_of_dice)

    # average dice of each image and their names
    ave_dice_of_imgs = np.average(dices_values_matrix, axis=1)
    total_dices.extend(ave_dice_of_imgs)

    names_ave_of_imgs = [
        'ave_dice_img_' + str(i) for i in range(len(pred_names))
    ]
    total_dices_names.extend(names_ave_of_imgs)

    # average dices of total class and images
    ave_dice_total = np.average(dices_values_matrix)
    total_dices.append(ave_dice_total)

    name_ave_total = 'ave_total'
    total_dices_names.append(name_ave_total)

    if not os.path.exists(csv_file):
        with open(csv_file, 'a+', newline='') as f:
            writer = csv.writer(f)
            writer.writerow(total_dices_names)

    with open(csv_file, 'a+', newline='') as f:
        writer = csv.writer(f)
        writer.writerow(total_dices)
    print('finish writing dices to csv file at ' + csv_file)
    return None
Exemplo n.º 6
0
def write_all_metrics_for_one_ct(mylock, labels, gdth_name, pred_name, csv_file, lung, fissure):
    gdth, gdth_origin, gdth_spacing = futil.load_itk(gdth_name)
    pred, pred_origin, pred_spacing = futil.load_itk(pred_name)

    if lung:  # gdth is lung, so pred need to convert to lung from lobes, labels need to be [1],
        # size need to be the same the the gdth size (LOLA11 mask resolutin is 1 mm, 1mm, 1mm)
        pred = get_lung_from_lobe(pred)
        labels = [1]
        if not gdth.shape == pred.shape:  # sometimes the gdth size is different with preds.
            pred = downsample(pred, ori_sz=pred.shape, trgt_sz=gdth.shape, order=1,
                              labels=labels)  # use shape to upsampling because the space is errors sometimes in LOLA11
        suffex_len = len(os.path.basename(pred_name).split(".")[-1])
        lung_file_dir = os.path.dirname(pred_name) + "/lung"
        lung_file_fpath = lung_file_dir + "/" + os.path.basename(pred_name)[:-suffex_len-1] + '.mhd'
        if not os.path.exists(lung_file_dir):
            os.makedirs(lung_file_dir)

        futil.save_itk(lung_file_fpath,  pred, pred_origin, pred_spacing)

    elif fissure and ('LOLA11' in gdth_name or "lola11" in gdth_name):  # only have slices annotations
        pred_cp = copy.deepcopy(pred)
        slic_nb=0
        for i in range(gdth.shape[1]):  # gdth.shape=(600, 512, 512)
            gdth_slice = gdth[:, i, :]
            if not gdth_slice.any():  # the slice is all black
                pred_cp[:, i, :] = 0
            else:
                slic_nb+=1
                # print("gdth slice sum"+str(np.sum(gdth_slice)))
                for j in range(gdth.shape[2]):  # some times only one lobe is annotated in the same slice.
                    gdth_line = gdth_slice[:, j]
                    if not gdth_line.any():
                        pred_cp[:, i, j] = 0
        if slic_nb > 30:
            print('slice number of valuable lobe is greater than 30: '+str(slic_nb)+", change to another axis")
            pred_cp = copy.deepcopy(pred)
            slic_nb = 0
            for i in range(gdth.shape[2]):  # gdth.shape=(600, 512, 512)
                gdth_slice = gdth[:, :, i]
                if not gdth_slice.any():  # the slice is all black
                    pred_cp[:, :, i] = 0
                else:
                    slic_nb += 1
                    # print("gdth slice sum" + str(np.sum(gdth_slice)))
                    for j in range(gdth.shape[1]):  # some times only one lobe is annotated in the same slice.
                        gdth_line = gdth_slice[:, j]
                        if not gdth_line.any():
                            pred_cp[:, j, i] = 0
        if slic_nb > 30:
            raise Exception("cannot get fissure points")
        pred = pred_cp
        futil.save_itk(pred_name.split(".mh")[0]+"_points.mha", pred, pred_origin, pred_spacing)
        print('slice number of valuable lobe: ', slic_nb)

    gdth = one_hot_encode_3D(gdth, labels=labels)
    pred = one_hot_encode_3D(pred, labels=labels)
    print('start calculate all metrics for image: ', pred_name)
    metrics_dict_all_labels = get_metrics_dict_all_labels(labels, gdth, pred, spacing=pred_spacing[::-1])
    metrics_dict_all_labels['filename'] = pred_name  # add a new key to the metrics
    data_frame = pd.DataFrame(metrics_dict_all_labels)
    with mylock:
        data_frame.to_csv(csv_file, mode='a', header=not os.path.exists(csv_file), index=False)
        print(threading.current_thread().name + "successfully write metrics to csv " + csv_file)
import time
import futils.util as futil
import segmentor as v_seg
import keras.backend as K

K.set_learning_phase(1)

#LOAD THE MODEL
segment = v_seg.v_segmentor(batch_size=1, model='models/final.h5', ptch_sz=128, z_sz=64)



#LOAD THE CT_SCAN
scan_file = 'VESSEL12_15.mhd'
ct_scan, origin, spacing, orientation = futil.load_itk('data/val/A/'+scan_file, get_orientation=True)
if (orientation[-1] == -1):
    ct_scan = ct_scan[::-1]
print 'Origem: '
print origin
print 'Spacing: '
print spacing
print 'Orientation: '
print orientation



#NORMALIZATION
ct_scan = futil.normalize(ct_scan)


#PREDICT the segmentation
Exemplo n.º 8
0
def writeFissure(ctFpath, fissureFpath, radiusValue=3, Absdir=None):
    scan, origin, spacing = futil.load_itk(ctFpath)
    fissure = get_fissure(scan, radiusValue=radiusValue)
    futil.save_itk(fissureFpath, fissure, origin, spacing)
    print('save ct mask at', fissureFpath)
Exemplo n.º 9
0
config.gpu_options.per_process_gpu_memory_fraction = 0.5

# Create a session with the above options specified.
K.tensorflow_backend.set_session(tf.Session(config=config))
K.set_learning_phase(1)

#LOAD THE MODEL
segment = v_seg.v_segmentor(
    batch_size=1,
    model='/content/LungLobeSegmentation/models/final.h5',
    ptch_sz=128,
    z_sz=64)

#LOAD THE CT_SCAN
scan_file = 'test.mhd'
ct_scan, origin, spacing, orientation = futil.load_itk(
    "/content/LungLobeSegmentation/data/" + scan_file, get_orientation=True)
if (orientation[-1] == -1):
    ct_scan = ct_scan[::-1]
print 'Origem: '
print origin
print 'Spacing: '
print spacing
print 'Orientation: '
print orientation

#NORMALIZATION
ct_scan = futil.normalize(ct_scan)
print 'succesfully normalized'

#PREDICT the segmentation
t1 = time.time()
Exemplo n.º 10
0
def write_preds_to_disk(segment,
                        data_dir,
                        preds_dir,
                        number=None,
                        stride=0.25,
                        workers=1,
                        qsize=1):
    """
    write predes to disk. Divided into 2 parts: predict (require segmentor.py, using GPU, can not use multi threads),
    and upsampling (require upsample_crop_save_ct, using cpu, multi threads).
    :param segment: an object or an instance
    :param data_dir: directory where ct data is
    :param preds_dir: directory where prediction result will be saved
    :param number: number of predicted ct
    :param stride: stride or overlap ratio during patching
    :return: None
    """
    scan_files = get_all_ct_names(data_dir, number=number)
    print("files are: ", scan_files)
    pad_nb = 48
    q = queue.Queue(qsize)
    cooking_flag = False

    def consumer(
        mylock
    ):  # neural network inference needs GPU which can not be computed by multi threads, so the
        # consumer is just the upsampling only.
        while True:
            if len(scan_files) or cooking_flag or not q.empty(
            ):  # if scan_files and q are empty, then threads should not wait any more
                with mylock:
                    print(
                        threading.current_thread().name +
                        " gets the lock, thread id: " +
                        str(threading.get_ident()) +
                        " prepare to upsample data, waiting for the data from queue"
                    )
                    try:
                        out_mask = q.get(timeout=60)  # wait up to 1 minutes
                        t2 = time.time()
                        print(threading.current_thread().name +
                              " gets the data before upsample at time " +
                              str(t2) + ", the thread releases the lock")
                    except:
                        out_mask = None
                        print(
                            threading.current_thread().name +
                            " does not get the data in 60s, check again if " +
                            "the scan_files are still not empty, the thread releases the lock"
                        )
                if out_mask is not None:
                    t1 = time.time()
                    out_mask.upsample_crop_save_ct()
                    t3 = time.time()
                    print("it costs tis secons to upsample the data " +
                          str(t3 - t1))
            else:
                print(
                    threading.current_thread().name +
                    "scan_files are empty, cooking flag is False, q is empty, finish the thread"
                )
                return None

    thd_list = []
    mylock = threading.Lock()
    for i in range(workers):
        thd = threading.Thread(target=consumer, args=(mylock, ))
        thd.start()
        thd_list.append(thd)

    # for scan_file in scan_files:
    for i in range(len(scan_files)):
        print('start iterate')
        scan_file = scan_files.pop()
        cooking_flag = True

        # ct_scan.shape: (717,, 512, 512), spacing: 0.5, 0.741, 0.741
        ct_scan, origin, spacing = futil.load_itk(filename=scan_file)
        ct_scan = np.pad(ct_scan, ((pad_nb, pad_nb), (pad_nb, pad_nb),
                                   (pad_nb, pad_nb)),
                         mode='constant',
                         constant_values=-3000)
        print('Spacing: ', spacing, 'size', ct_scan.shape)

        # NORMALIZATION
        ct_scan = futil.normalize(ct_scan)

        mask, trgt_space_list, original_shape, labels, trgt_sz_list, io, task = segment.predict(
            ct_scan[..., np.newaxis], ori_space_list=spacing,
            stride=stride)  # shape: (717, 512, 512,1)
        mask = Mask(mask, scan_file, pad_nb, preds_dir, origin, spacing,
                    trgt_space_list, original_shape, labels, trgt_sz_list, io,
                    task)
        q.put(mask, timeout=6000)
        cooking_flag = False

    for thd in thd_list:
        thd.join()
Exemplo n.º 11
0
#LOAD THE MODEL
segment = v_seg.v_segmentor(batch_size=1, model='models/final.h5', ptch_sz=128, z_sz=64)

#GET THE IMAGE PATH
try:
    path_to_input_scan_file = sys.argv[1]
except IndexError as err:
    logger.error('Supply the path to the image file as the only CLI arg, e.g.')
    logger.error('>> pipenv run python run_single_segmentation.py path/to/image.nii.gz')
    path_to_input_scan_file = 'data/006_ref_raw_f64.nii.gz'
    path_to_input_scan_file = 'data/003_GST_CTce_raw_crop.nii.gz'
finally:
    assert os.path.isfile(path_to_input_scan_file), "ERROR no such input file as {}".format(path_to_input_scan_file)

#LOAD THE CT_SCAN
ct_scan, origin, shape, spacing, orientation = futil.load_itk(path_to_input_scan_file, get_orientation=True)
if (orientation[-1] == -1):
    ct_scan = ct_scan[::-1]
logger.info('origin: {} {} {}'.format(*origin))
logger.info('shape: {} {} {}'.format(*shape))
logger.info('spacing: {} {} {}'.format(*spacing))
logger.info('orientation: {} {} {} {} {} {} {} {} {}'.format(*orientation))

#NORMALIZATION
ct_scan = futil.normalize(ct_scan)

#PREDICT the segmentation
t1 = time.time()
lobe_mask = segment.predict(ct_scan)
t2 = time.time()
logger.info('prediction runtime (s): %i', int(t2-t1))
Exemplo n.º 12
0

# scan_file = 'SSc_patient_77.mhd'
# sub_dir = 'GLUCOLD_isotropic1dot5'

if task == 'vessel':
    file_name = os.path.dirname(os.path.realpath(
        __file__)) + '/data/' + task + '/valid/ori_ct/' + scan_file
    file_name_gdth = os.path.dirname(os.path.realpath(
        __file__)) + '/data/' + task + '/valid/gdth_ct/' + scan_file
elif task == 'lobe':
    file_name = os.path.dirname(
        os.path.realpath(__file__)
    ) + '/data/' + task + '/valid/ori_ct/' + sub_dir + '/' + scan_file
print(file_name)
ct_scan, origin, spacing, orientation = futil.load_itk(filename=file_name,
                                                       get_orientation=True)
scan_gdth, _, _ = futil.load_itk(filename=file_name_gdth,
                                 get_orientation=False)

if (orientation[-1] == -1):
    ct_scan = ct_scan[::-1]
print('Spacing: ', spacing)

scan = (ct_scan - np.mean(ct_scan)) / (np.std(ct_scan))

if fixed_space:
    zoom_seq = np.array(spacing, dtype='float') / np.array(
        trgt_space, dtype='float')  # order is correct
    print('zoom_seq', zoom_seq)

    scan = ndimage.interpolation.zoom(scan, zoom_seq, order=1, prefilter=1)