Esempio n. 1
0
    def __inference__( self, index ):
        image_path = self.images_path[index]
        label_path = self.labels_path[index]

        image = load_image(image_path, type=sitk.sitkFloat64)
        label = load_image(label_path, type=sitk.sitkUInt8)

        origin = label.GetOrigin()
        spacing = label.GetSpacing()
        direction = label.GetDirection()

        image = truncation(image, lower=-200, upper=300)

        image = sitk.GetArrayFromImage(image)
        label = sitk.GetArrayFromImage(label)

        # (W, H, D) -> (D, H, W)
        image = np.transpose(image,axes=(2,1,0))
        label = np.transpose(label,axes=(2,1,0))

        shape = image.shape
        image_patch, _, locations = sample(image, label, self.sample_shape, overlap=(0, 0, 0))

        patch_result = []
        for i in range(len(image_patch)):
            patch = image_patch[i]
            patch = patch[np.newaxis, np.newaxis, :]
            patch = torch.from_numpy(patch)
            patch = patch.float()

            if self.use_gpu:
                patch = patch.cuda()

            with torch.no_grad():
                output = self.model(patch)

            patch_result.append(output.cpu().numpy()[0,:])

        predict = assemble_channels(patch_result, shape, locations)
        predict = np.argmax(predict, axis=0)

        # (D, H, W) -> (W, H, D)
        predict = np.transpose(predict, axes=(2,1,0))
        predict = sitk.GetImageFromArray(predict)
        predict.SetOrigin(origin)
        predict.SetDirection(direction)
        predict.SetSpacing(spacing)

        inference_file_path = os.path.join(self.inference_dir, str(self.patient_ids[index]) + ".nii.gz")
        sitk.WriteImage(predict, inference_file_path)
Esempio n. 2
0
    def __getitem__(self, index):
        if self.skip_images:
            img = None
            pose = self.poses[index]
        else:
            if self.mode == 0:
                img = None
                while img is None:
                    img = load_image(self.c_imgs[index])
                    pose = self.poses[index]
                    index += 1
                index -= 1
            elif self.mode == 1:
                img = None
                while img is None:
                    img = load_image(self.d_imgs[index])
                    pose = self.poses[index]
                    index += 1
                index -= 1
            elif self.mode == 2:
                c_img = None
                d_img = None
                while (c_img is None) or (d_img is None):
                    c_img = load_image(self.c_imgs[index])
                    d_img = load_image(self.d_imgs[index])
                    pose = self.poses[index]
                    index += 1
                img = [c_img, d_img]
                index -= 1
            else:
                raise Exception('Wrong mode {:d}'.format(self.mode))

        if self.target_transform is not None:
            pose = self.target_transform(pose)

        if self.skip_images:
            return img, pose

        if self.transform is not None:
            if self.mode == 2:
                img = [self.transform(i) for i in img]
            else:
                img = self.transform(img)

        return img, pose
Esempio n. 3
0
    def generate_batch(self):
        start = self.start
        end = self.end
        batch_list = []
        label_list = []
        # cargamos las imagenes y estas son tratadas para darles el tamaño requerido
        for i in range(start, end):
            # print(self.images[i], i)
            img = load_image(self.dir_images + str(self.images[i]) + self.type, scale=self.divisor_scale, dim_image=self.dim_image)[:, :, :3]
            batch_list.append(img.reshape((1, self.dim_image, self.dim_image, 3)))
            label_list.append(self.labels[i])

        # concatena cada elemento del batch_list dando como resultado una matriz con la forma (n, 224, 224, 3)
        # retorna el batch_list concatenado y el label_list con n items, una etiqueta para cada imagen
        return np.concatenate(batch_list, 0), label_list
Esempio n. 4
0
    def __getitem__(self, index):
        img = None
        while img is None:
            img = load_image(self.c_imgs[index])
            tforms = [transforms.ToTensor()]
            data_transform = transforms.Compose(tforms)

            img1 = data_transform(img)
            img2 = np.array(img1)
            
            pose1 = self.poses[index]

            index += 1
        index -= 1

        return img2,pose1
Esempio n. 5
0
    def __getitem__(self, index):
        pose = self.poses[index]
        if self.skip_images:
            return None, pose
        img = load_image(self.c_imgs[index])
        #if True:
        #    img = img.convert('P')
        if self.target_transform is not None:
            pose = self.target_transform(pose)



        if self.transform is not None:
            if self.mode == 2:
                img = [self.transform(i) for i in img]
            else:
                img = self.transform(img)
        return img, pose
    def generate_batch(self):

        start = self.start
        end = self.end
        batch_list = []
        label_list = []

        # cargamos las imagenes y estas son tratadas para darles el tamaño requerido
        for i in range(start, end):
            lab = self.labels.iloc[i, :].values
            img = utils.load_image(self.dir_images + str(self.images[i]) +
                                   self.type)[:, :, :3]
            batch_list.append(img.reshape((1, 224, 224, 3)))
            label_list.append(lab)

        # concatena cada elemento del batch_list dando como resultado una matriz con la forma (n, 224, 224, 3)
        # retorna el batch_list concatenado y el label_list con n items, una etiqueta para cada imagen
        return np.concatenate([block for block in batch_list], 0), label_list
Esempio n. 7
0
    def __getitem__(self, index):
        pose = self.poses[index]
        #print(pose)
        if self.skip_images:
            return None, pose
        #print(self.c_imgs[index])
        img = load_image(self.c_imgs[index])
        #TODO: check aspect ratio
        if img.size[0] > 720:
            img = img.resize((720,480))
        #print("Lodading image:", index,img.mode,img.size,self.c_imgs[index],pose)
        if self.target_transform is not None:
            pose = self.target_transform(pose)
            #print(pose)

        if self.transform is not None:
                img = self.transform(img)

        return img, pose
Esempio n. 8
0
def write_data_hdf5(image_items,
                    mask_items,
                    hdf5_output_file,
                    process_filter_rate,
                    write_per_num=500,
                    process_resize_spacing=None,
                    sample_size=(128, 128, 32),
                    sample_overlap=(6, 6, 6),
                    window_lower=-200,
                    window_upper=300):
    # setting-up HDF5 file
    h5file = h5py.File(name=hdf5_output_file, mode='w')
    data = {}
    ds_shape = tuple([write_per_num] + list(sample_size))
    max_shape = tuple([None] + list(sample_size))
    data['image'] = h5file.create_dataset(
        name='image',
        shape=ds_shape,
        compression='gzip',
        dtype=np.float32,
        maxshape=max_shape,
    )
    data['mask'] = h5file.create_dataset(
        name='mask',
        shape=ds_shape,
        compression='gzip',
        dtype=np.uint8,
        maxshape=max_shape,
    )
    data['pid'] = h5file.create_dataset(
        name='patient_id',
        shape=tuple([write_per_num]),
        compression='gzip',
        dtype=h5py.special_dtype(vlen=np.str),
        maxshape=tuple([None]),
    )
    data['location'] = h5file.create_dataset(name='location',
                                             shape=tuple([write_per_num, 3]),
                                             compression='gzip',
                                             dtype=np.uint8,
                                             maxshape=tuple([None, 3]))

    # process one patient by one patient
    index = 1
    patch_num = 0
    patch_writed = 0
    image_patch_container = list()
    mask_patch_container = list()
    location_container = list()
    pid_container = list()

    for image_path, mask_path in zip(image_items, mask_items):
        logging.info('-' * 100)
        logging.info('start process %d patient' % index)

        # step 1: load image
        image = load_image(image_path, sitk.sitkFloat32)
        mask = load_image(mask_path, sitk.sitkUInt8)

        # step 2: truncate image with fixed window
        image = truncation(image, lower=window_lower, upper=window_upper)

        # step 3: resize z-axis
        # set z-axis spacing to 1.0
        original_spacing = image.GetSpacing()

        if process_resize_spacing is 'standard':
            resize_spacing = [1.0, 1.0, 1.0]
        elif process_resize_spacing == 'origin':
            resize_spacing = original_spacing
        elif process_resize_spacing == 'z-axis':
            resize_spacing = [1.0] + []
        else:
            raise NotImplementedError(
                'Not support for this resize spacing, please check again.')

        image = resize(image,
                       resize_spacing,
                       original_spacing,
                       sitk.sitkLinear,
                       dtype=sitk.sitkFloat32)
        mask = resize(mask,
                      resize_spacing,
                      original_spacing,
                      sitk.sitkUInt8,
                      dtype=sitk.sitkUInt8)

        image = sitk.GetArrayFromImage(image)
        mask = sitk.GetArrayFromImage(mask)

        # step 4: sample patch
        image_patchs, mask_patchs, locations = sample(image,
                                                      mask,
                                                      size=sample_size,
                                                      overlap=sample_overlap)
        logging.info('sample from {} with {} patchs'.format(
            image_path, str(len(image_patchs))))

        # step 5: filter patch
        image_patchs, mask_patchs, locations = patch_filter(
            image_patchs,
            mask_patchs,
            background=0,
            rate=process_filter_rate,
            locations=locations)
        logging.info('filter form {} with {} patchs'.format(
            image_path, str(len(image_patchs))))

        length = len(image_patchs)
        if length == 0:
            index += 1
            continue

        image_patch_container.extend(image_patchs)
        mask_patch_container.extend(mask_patchs)
        location_container.extend(locations)
        pid_container.extend(len(image_patchs) * [image_path])

        if len(image_patch_container) >= write_per_num:
            for i in range(len(image_patch_container) // write_per_num):
                image_patch_to_save = np.stack(
                    image_patch_container[:write_per_num])
                mask_patch_to_save = np.stack(
                    mask_patch_container[:write_per_num])
                location_to_save = np.stack(
                    mask_patch_container[:write_per_num])
                pid_to_save = np.stack(pid_container[:write_per_num])

                data['image'][patch_writed:patch_writed +
                              write_per_num, :] = image_patch_to_save
                data['mask'][patch_writed:patch_writed +
                             write_per_num, :] = mask_patch_to_save
                data['location'][patch_writed:patch_writed +
                                 write_per_num, :] = location_to_save
                data['pid'][patch_writed:patch_writed +
                            write_per_num, :] = pid_to_save

                patch_writed += write_per_num

                image_patch_container = image_patch_container[write_per_num:]
                mask_patch_container = mask_patch_container[write_per_num:]
                location_container = location_container[write_per_num:]
                pid_container = pid_container[write_per_num:]

        patch_num += length
        index += 1

    if len(image_patch_container) != 0:
        length = len(image_patch_container)
        image_patch_to_save = np.stack(image_patch_container)
        mask_patch_to_save = np.stack(mask_patch_container)
        location_to_save = np.stack(location_container)
        pid_to_save = np.stack(pid_container)

        data['image'].resize(size=patch_writed + length, axis=0)
        data['mask'].resize(size=patch_writed + length, axis=0)
        data['pid'].resize(size=patch_writed + length, axis=0)
        data['location'].resize(size=patch_writed + length, axis=0)

        data['image'][patch_writed:patch_writed +
                      length, :] = image_patch_to_save
        data['mask'][patch_writed:patch_writed +
                     length, :] = mask_patch_to_save
        data['pid'][patch_writed:patch_writed + length] = pid_to_save
        data['location'][patch_writed:patch_writed +
                         length, :] = location_to_save
        patch_writed += length

    logging.info('-' * 100)
    logging.info('Preprocess complete, total processed %d patchs' %
                 (patch_num))
    h5file.close()
Esempio n. 9
0
import numpy as np
import tensorflow as tf
import os, sys

PATH_DIRECTORY = os.path.abspath('../..')
PATH_TESTER_DIR = os.path.dirname('__file__')
PATH_SRC_DIR = '..'
sys.path.insert(0, os.path.abspath(os.path.join(PATH_TESTER_DIR,
                                                PATH_SRC_DIR)))

from tools.utils import load_image, show_image, print_prob_all
from nets.vgg19 import cnn_vgg19

img1 = load_image(PATH_DIRECTORY + '/data/test/tigerTrain1.jpeg')
img2 = load_image(PATH_DIRECTORY + '/data/test/tigerTrain2.jpeg')
img3 = load_image(PATH_DIRECTORY + '/data/test/tigerTrain3.jpg')
img4 = load_image(PATH_DIRECTORY + '/data/test/tigerTest1.jpg')
img5 = load_image(PATH_DIRECTORY + '/data/test/tigerTest2.jpg')

batch1 = img1.reshape((1, 224, 224, 3))
batch2 = img2.reshape((1, 224, 224, 3))
batch3 = img3.reshape((1, 224, 224, 3))
batch4 = img4.reshape((1, 224, 224, 3))
batch5 = img5.reshape((1, 224, 224, 3))

batch_train = np.concatenate((batch1, batch2, batch3), 0)
label = [292, 292, 292]
batch_test = np.concatenate((batch4, batch5), 0)

batch = img1.reshape((1, 224, 224, 3))
Esempio n. 10
0
import numpy as np
import tensorflow as tf
import os, sys

PATH_DIRECTORY = os.path.abspath('../..')
PATH_TESTER_DIR = os.path.dirname('__file__')
PATH_SRC_DIR = '..'
sys.path.insert(0, os.path.abspath(os.path.join(PATH_TESTER_DIR,
                                                PATH_SRC_DIR)))

from tools.utils import load_image, show_image, print_prob_all
from nets.vgg19 import cnn_vgg19

img1 = load_image(PATH_DIRECTORY + '/data/example/avion.jpeg')
img2 = load_image(PATH_DIRECTORY + '/data/example/tiger.jpeg')
batch1 = img1.reshape((1, 224, 224, 3))
batch2 = img2.reshape((1, 224, 224, 3))
batch = np.concatenate((batch1, batch2), 0)

npy_path = PATH_DIRECTORY + '/weights/vgg19/vgg19.npy'
data_label_path = PATH_DIRECTORY + '/data/synset.txt'

with tf.Session() as sess:
    # VARIABLES
    images = tf.placeholder(tf.float32, [2, 224, 224, 3])
    train_mode = tf.placeholder(tf.bool)

    # MODEL VGG19
    vgg19 = cnn_vgg19(npy_path, trainable=False)
    vgg19.build(images, train_mode=train_mode)