示例#1
0
def generate_volumes_lesion(test_data_dir, n_neighboringslices=1, new_postfix='', prediction_postfix='prediction', image_size=320):
    dict_name = os.path.basename(os.path.dirname(test_data_dir))
    print('DICT_NAME: ', dict_name)
    tst = pipe.Reader(seg.file_paths_ordered(test_data_dir, iterations=1, image_identifier='volume', label_identifier='segmentation', prediction_postfix=prediction_postfix, new_postfix=new_postfix), name="Read File Names")
    tst = tst.multiply(custom_numpy_load_all_slices(n_neighboringslices=n_neighboringslices), name="Load Slices")
    tst = _test_val_tail(tst, None, label_of_interest=2, label_required=1, fuse_labels=False, apply_crop=True, discard_labels=False, image_size=image_size)

    return tst
示例#2
0
def test(test_data_dir, label_of_interest=2, label_required=1,fuse_labels=False):

    dict_name = os.path.basename(os.path.dirname(test_data_dir))
    # scaling_dict = get_scaling_dict(dict_name + "_scaling", test_data_dir)
    # crop_dict = get_crop_dict(dict_name + "_cropping", test_data_dir)

    tst = pipe.Reader(seg.file_paths_ordered(test_data_dir, iterations=1, image_identifier='volume', label_identifier='segmentation'), name="Read File Names")
    tst = tst.run_on(1)
    tst = tst.multiply(seg.numpy_load_all_slices(), name="Load Slices")
    # tst = _test_val_tail(tst, scaling_dict, crop_dict)
    tst = _test_val_tail(tst, None, label_of_interest=label_of_interest, label_required=label_required,fuse_labels=fuse_labels)
    tst = tst.run_on(4)

    return tst
示例#3
0
def validation(validation_data_dir, slice_type='axial', n_neighboringslices=5, image_size=320, label_of_interest=2,
               label_required=1, max_tries=100, fuse_labels=False, apply_crop=False):

    dict_name = os.path.basename(os.path.dirname(validation_data_dir))
    # scaling_dict = get_scaling_dict(dict_name + "_scaling", validation_data_dir)
    # crop_dict = get_crop_dict(dict_name + "_cropping", validation_data_dir)

    vld = pipe.Reader(seg.file_paths_random(validation_data_dir, iterations=0, image_identifier='volume',
                                            label_identifier='segmentation'), name="Read File Names")
    vld = vld.transform(
        custom_load_slices(label_of_interest=label_of_interest, label_required=label_required, min_frequency=1.0,
                           max_tries=max_tries, slice_type=slice_type, n_neighboringslices=n_neighboringslices),
        name="Load Slices")
    vld = _test_val_tail(vld, None, image_size=image_size, label_of_interest=label_of_interest,
                         label_required=label_required, fuse_labels=fuse_labels, apply_crop=apply_crop)

    return vld
示例#4
0
def get_scaling_dict(name, data_dir):

    name = name + '.p'

    if os.path.isfile(name):

        print('Found dictionary file "{0}". Loading dictionary...'.format(name))

        infile = open(name, 'rb')
        dictionary = pickle.load(infile)
        infile.close()

    else:

        print('No dictionary file "{0}" found. Creating dictionary...'.format(name))

        dictionary = {}

        scale = prep.RobustScaler(copy=False)
        #LITS
        dct = pipe.Reader(seg.file_paths_ordered(data_dir, iterations=1, image_identifier='volume', label_identifier='segmentation'), name="Read File Names")
        #3dircad
        #dct = pipe.Reader(seg.file_paths_ordered(data_dir, iterations=1, image_identifier='image', label_identifier='label'), name="Read File Names")
        dct = dct.transform(seg.numpy_load_volume(), name="Load Volumes")
        dct = dct.transform(seg.numpy_clip(-100, 400), name="Clip Pixel Values")
        dct = dct.transform(seg.numpy_mask_background(0, -100.))

        for inputs, parameters in dct:

            file_name = parameters["file_names"][0]
            volume = inputs[0]
            volume = np.ma.masked_values(volume, -100.)
            volume = np.ma.masked_values(volume, 400., copy=False)
            volume = np.ma.compressed(volume)
            volume = volume.reshape(-1, 1)
            print('Filename %s has %s labels' % (file_name,np.unique(inputs[1], return_counts=True)))
            scale.fit(volume)
            dictionary[file_name] = (scale.center_, scale.scale_)

        outfile = open(name, 'wb')
        pickle.dump(dictionary, outfile)
        outfile.close()

    print('Dictionary loaded.')

    return dictionary
示例#5
0
def get_crop_dict(name, data_dir):

    name = name + '.p'

    if os.path.isfile(name):

        print('Found dictionary file "{0}". Loading dictionary...'.format(name))

        infile = open(name, 'rb')
        dictionary = pickle.load(infile)
        infile.close()

    else:

        print('No dictionary file "{0}" found. Creating dictionary...'.format(name))

        dictionary = {}

        dct = pipe.Reader(seg.file_paths_ordered(data_dir, iterations=1, image_identifier='volume', label_identifier='segmentation'), name="Read File Names")
        dct = dct.transform(seg.numpy_load_volume(), name="Load Volumes")

        for inputs, parameters in dct:

            file_name = parameters["file_names"][0]
            volume = inputs[1]

            x = np.any(volume, axis=(1, 2))
            y = np.any(volume, axis=(0, 2))
            z = np.any(volume, axis=(0, 1))

            xmin, xmax = np.where(x)[0][[0, -1]]
            ymin, ymax = np.where(y)[0][[0, -1]]
            zmin, zmax = np.where(z)[0][[0, -1]]

            dictionary[file_name] = (xmin, xmax, ymin, ymax, zmin, zmax)

        outfile = open(name, 'wb')
        pickle.dump(dictionary, outfile)
        outfile.close()

    print('Dictionary loaded.')

    return dictionary
示例#6
0
def training(train_data_dir,slice_type='axial',n_neighboringslices=5, image_size=320, oversample=True,
             label_of_interest=2, label_required=1, max_tries=500, min_frequency=1.0, fuse_labels=False,
             apply_crop=True, data_augmentation=False):
    print(train_data_dir, slice_type, n_neighboringslices, image_size, oversample, label_of_interest, label_required,
          max_tries, min_frequency, fuse_labels, apply_crop, data_augmentation)
    dict_name = os.path.basename(os.path.dirname(train_data_dir))
    # scaling_dict = get_scaling_dict(dict_name + "_scaling", train_data_dir)

    sampler = None
    # Instantiate the oversampling class which is used in the custom slice loader to oversample outliers
    if oversample:
        sampler = oversampler.Oversampler(label_of_interest=2, label_required=label_required, train_data_dir=train_data_dir, debug=False)

    # crop_dict = get_crop_dict(dict_name + "_cropping", train_data_dir)

    # Load slice
    # 构建一个reader的对象,他的next属性可以获取下一个图像的路径
    tr = pipe.Reader(seg.file_paths_random(train_data_dir, iterations=0, image_identifier='volume', label_identifier='segmentation'), name="Read File Names")
    tr = tr.transform(custom_load_slices(label_of_interest=label_of_interest, label_required=label_required,
                                         min_frequency=min_frequency, max_tries=max_tries, slice_type=slice_type,
                                         n_neighboringslices=n_neighboringslices, oversampler=sampler),
                      name="Load Slices")

    # Random transformations

    tr = tr.transform(seg.numpy_rotation2D(1.0, upper_bound=90, min_val=-350.), name="Random Rotation")
    if data_augmentation:
        tr = tr.transform(seg.numpy_clip_scale(-300, 500, 255), name='Clip Scale')
        tr = tr.transform(seg.numpy_image_augmentation(0.5), name='Image Augmentation')
    tr = tr.transform(seg.numpy_random_zoom2D(1.0, [image_size, image_size], lower_bound=0.8, upper_bound=1.2),
                      name="Random Slice Scaling")
    tr = tr.transform(seg.numpy_translation2D(0.5, factor=0.25, default_border=0.25, label_of_interest=1),
                      name="Random Translation")
    if data_augmentation:
        tr = _test_val_tail(tr, None, image_size=image_size, label_of_interest=label_of_interest,
                            label_required=label_required, fuse_labels=fuse_labels, apply_crop=apply_crop,
                            s_min=0, s_max=255)
    else:
        tr = _test_val_tail(tr, None, image_size=image_size, label_of_interest=label_of_interest,
                            label_required=label_required, fuse_labels=fuse_labels, apply_crop=apply_crop)

    return tr
示例#7
0
    def build_oversampling_dict(self):
        dict_name = os.path.basename(os.path.dirname(self.train_data_dir))
        name = dict_name + '_oversampling' + '.p'

        if os.path.isfile(name):

            print(
                'Found oversampling file "{0}". Loading dictionary...'.format(
                    name))

            infile = open(name, 'rb')
            self.cache, self.buckets, self.global_intensity = pickle.load(
                infile)
            infile.close()

        else:
            print('No oversampling file "{0}" found. Creating dictionary...'.
                  format(name))

            #LITS
            dct = pipe.Reader(seg.file_paths_ordered(
                self.train_data_dir,
                iterations=1,
                image_identifier='volume',
                label_identifier='segmentation'),
                              name="Read File Names")
            dct = dct.multiply(seg.numpy_gen_slices(), name="Load Slices")

            for outputs, parameters, inputs, slice_index in dct:
                if self.label_required in outputs[1]:
                    self.register_slice(inputs[1], slice_index, outputs)

            self.calculate_global_intensity()

            outfile = open(name, 'wb')
            pickle.dump([self.cache, self.buckets, self.global_intensity],
                        outfile)
            outfile.close()

        print('Oversampling loaded.')