Esempio n. 1
0
    def __getitem__(self, idx):
        cur_batch_size = min((idx + 1) * self.batch_size, len(
            self.slab_seeds)) - idx * self.batch_size
        batch_x = np.zeros(tuple([cur_batch_size] + self.x_batch_shape),
                           dtype=K.floatx())
        batch_y = np.zeros(tuple([cur_batch_size] + self.y_batch_shape),
                           dtype=self.y_dtype)
        for batch_i in range(cur_batch_size):
            i = idx * self.batch_size + batch_i
            x = self.slab_seeds[i].x
            y = self.slab_seeds[i].y
            if isinstance(y, str):
                y = mhd.read(y)[0]
            index = self.slab_seeds[i].index
            xx = x[index - self.slab_thickness // 2:index +
                   math.ceil(self.slab_thickness / 2)]
            if y.ndim == 2:
                yy = y
            else:
                yy = y[index]
            if self.transform is not None:
                xx, yy = self.transform(xx, yy)

            xx = np.transpose(xx, (1, 2, 0))
            batch_x[batch_i] = xx
            batch_y[batch_i] = np.expand_dims(yy, -1)

        if self.transpose:
            return batch_x, batch_y
        else:
            return np.transpose(batch_x, (0, 3, 1, 2)), batch_y
Esempio n. 2
0
def cv(k_index, train_index, test_index):
    outdir = os.path.join(base_outdir, 'k{}'.format(k_index))
    os.makedirs(outdir, exist_ok=True)

    for ci, cid in enumerate(tqdm.tqdm(ids_test)):
        x, h = mhd.read_memmap(os.path.join(mhd_dir, cid + '.mha'))
        y, _ = mhd.read(os.path.join(label_dir, cid + '.mha'))
        valid_zs = [(slab_thickness // 2 <= z < len(x) - slab_thickness // 2)
                    and np.any(lung[z] > 0) for z in range(len(x))]
        zmin = np.min(np.where(valid_zs))
        zmax = np.max(np.where(valid_zs))
        seeds = [SlabSeed(x, x, i) for i in range(zmin, zmax + 1)]
        p = model.predict_generator(SlabGenerator(seeds,
                                                  slab_thickness,
                                                  batch_size,
                                                  False,
                                                  transform=utils.transform,
                                                  transpose=False),
                                    max_queue_size=workers + 1,
                                    workers=workers,
                                    use_multiprocessing=workers > 1)
        p = np.squeeze(p)
        label = np.argmax(p, axis=-1).astype(np.uint8)
        label = np.pad(label, ((zmin, len(x) - zmax - 1), (0, 0), (0, 0)),
                       mode='constant',
                       constant_values=0)
        h['CompressedData'] = True
        mhd.write(os.path.join(image_outdir, cid + '.mha'), label, h)
Esempio n. 3
0
    def __init__(self, ImagePath, LabelPath, valid_size=.2, augment=None):
        # All this list stored direction of Img.
        self.image_paths = np.array([
            x.path for x in os.scandir(ImagePath)
            if x.name.endswith('image.mhd')
        ])
        self.label_paths = np.array([
            x.path for x in os.scandir(LabelPath)
            if x.name.endswith('label.mhd')
        ])
        self.augment = augment  # Need augment the Image?

        self.images = []
        self.labels = []

        for i in tqdm.tqdm(range(len(self.image_paths)),
                           desc='Loading images'):
            image, image_header = mhd.read(self.image_paths[i])
            label, label_header = mhd.read(self.label_paths[i])
            image = downsample(image, 256)
            label = (downsample(label, 256)).astype(np.uint32)
            image = torch.Tensor(image)
            label = torch.Tensor(label)
            for j in range(image.shape[0]):
                self.images.append(image[j])
                self.labels.append(label[j])

        num_train = len(self.images)
        indices = list(range(num_train))
        split = int(np.floor(valid_size * num_train))
        np.random.shuffle(indices)
        from torch.utils.data.sampler import SubsetRandomSampler
        train_idx, test_idx = indices[split:], indices[:split]
        train_sampler = SubsetRandomSampler(train_idx)
        test_sampler = SubsetRandomSampler(test_idx)
        trainloader = torch.utils.data.DataLoader(image,
                                                  sampler=train_sampler,
                                                  batch_size=64)
        testloader = torch.utils.data.DataLoader(image,
                                                 sampler=test_sampler,
                                                 batch_size=64)
        return trainloader, testloader
Esempio n. 4
0
def modefilt():
    parser = argparse.ArgumentParser(description='Mode filter.')
    parser.add_argument('input', help="Input mhd filename", metavar='<input>')
    parser.add_argument('-o',
                        '--output',
                        help="Output mhd filename",
                        metavar='<filename>',
                        default='filtered.mha')
    parser.add_argument('--size',
                        help="Filter size",
                        metavar='<N>',
                        default=3,
                        type=int)

    args = parser.parse_args()
    label, h = mhd.read(args.input)
    filtered = _label_filters.modefilt3(label, args.size, 0)
    mhd.write(args.output, filtered, h)
Esempio n. 5
0
def median_filter():
    parser = argparse.ArgumentParser(
        description='Median filter for mhd image.')
    parser.add_argument('input', help="Input filename", metavar='<input>')
    parser.add_argument('-o',
                        '--output',
                        help="Output filename. Defualt:%(default)s",
                        metavar='<output>',
                        default='filtered.mha')
    parser.add_argument('-s',
                        '--size',
                        help="Optional argument. Default:%(default)s",
                        metavar='<n>',
                        default=3,
                        type=int)

    args = parser.parse_args()
    import mhd
    from scipy.ndimage.filters import median_filter
    image, h = mhd.read(args.input)
    filtered = median_filter(image, args.size)
    mhd.write(args.output, filtered, h)
Esempio n. 6
0
 def __init__(self,
              slab_seeds,
              slab_thickness,
              batch_size,
              shuffle=False,
              transform=None,
              transpose=True):
     self.slab_seeds = slab_seeds
     self.slab_thickness = slab_thickness
     self.x_batch_shape = [
         slab_seeds[0].x.shape[1], slab_seeds[0].x.shape[2], slab_thickness
     ]
     self.y_batch_shape = [
         slab_seeds[0].x.shape[1], slab_seeds[0].x.shape[2], 1
     ]
     if isinstance(slab_seeds[0].y, str):
         self.y_dtype = mhd.read(slab_seeds[0].y)[0].dtype
     else:
         self.y_dtype = slab_seeds[0].y.dtype
     self.batch_size = batch_size
     self.shuffle = shuffle
     self.transform = transform
     self.transpose = transpose
Esempio n. 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--input', required=True)
    parser.add_argument('-s', '--shape', default=(128, 128), nargs=2)

    root_dir = args.input
    block_shape = tuple(args.shape)
    downsample_shape = np.array((1, ) + block_shape[::-1])
    frame_rate = 10
    n_column = 8

    volumes = []
    infos = {}

    print('Load images')
    glob_images = glob.glob(os.path.join(root_dir, '*.mhd'))
    for image_filename in tqdm.tqdm(list(glob_images)):
        image, header = mhd.read(image_filename)
        label, _ = os.path.splitext(os.path.basename(image_filename))

        # Set info
        infos.setdefault('Label', []).append(label)

        infos.setdefault('Dim X', []).append(image.shape[2])
        infos.setdefault('Dim Y', []).append(image.shape[1])
        infos.setdefault('Dim Z', []).append(image.shape[0])

        infos.setdefault('Spacing X', []).append(header['ElementSpacing'][0])
        infos.setdefault('Spacing Y', []).append(header['ElementSpacing'][1])
        infos.setdefault('Spacing Z', []).append(header['ElementSpacing'][2])

        infos.setdefault('Min intensity', []).append(np.amin(image))

        # Set volume
        scale = downsample_shape / np.array(image.shape)
        volumes.append(ndimage.zoom(image, scale, mode='nearest'))

    pd.DataFrame(infos).to_csv('info.csv')

    min_intensity = min([np.amin(volume) for volume in volumes])
    max_intensity = max([np.amax(volume) for volume in volumes])
    max_slice = max([volume.shape[0] for volume in volumes])

    print('Preprocess images.')
    image_list = [[] for _ in range(max_slice)]
    for volume in tqdm.tqdm(volumes):
        for i in tqdm.trange(max_slice):
            if i >= volume.shape[0]:
                image_list[i].append(np.zeros(block_shape, dtype=np.uint8))
                continue
            normalized_image = ((volume[i] - min_intensity) /
                                (max_intensity - min_intensity)).astype(
                                    np.uint8)
            image_list[i].append(normalized_image)

    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    writer = None

    print('Write video.')
    for images in tqdm.tqdm(image_list):
        img = imgproc.make_tile_2d(images, (None, n_column))

        if writer is None:
            writer = cv2.VideoWriter('video.mp4', fourcc, frame_rate,
                                     img.shape)
        writer.write(img)

    writer.release()
Esempio n. 8
0
    net = Unet()
    criterion = nn.MSELoss()
    optimizer = optim.Adam(net.parameters(), lr=0.0003, betas=(0.9, 0.999), weight_decay=0.004)
    # scheduler = optim.lr_shcduker.ExponentialLR(optimizer, gamma=0.997)
    datadir = '../dataset/Cases/Images'
    labeldir = '../dataset/Cases/Labels'
    datalist = load_json('phase_liverfibrosis.json')
    # groups = split_dataset(datalist, 2)
    dataset = {}
    # n_classes = 4
    for ID in tqdm.tqdm(os.listdir(datadir), desc='Loading images'):
        if os.path.isfile(datadir + '\\' + ID):
            out = ID.split('.')
            if len(ID) >= 2:
                if out[1] == 'mhd':
                    original = mhd.read(os.path.join(datadir, ID))[0]
                    label = mhd.read(os.path.join(labeldir, ID[:-9] + 'label.mhd'))[0]
                    data = {}
                    data['x'] = np.expand_dims((original / 255.0).astype(np.float32), -1)
                    data['y'] = np.expand_dims(label, -1)
                    dataset[ID] = data
        # n_classes = max(n_classes, np.max(label) + 1)

    ship_train_dataset = Dataset.ShipDataset(datadir, labeldir, augment=None)
    ship_train_loader = DataLoader(ship_train_dataset, batch_size=1, num_workers=0, shuffle=True)
    imgsize = 256
    labelsize = 68

    if cuda.is_available():
        net.cuda()
        criterion.cuda()
Esempio n. 9
0
    return np.percentile(img[mask], [p, 100 - p])


from scipy import ndimage


def dilation(img, k=16):
    return ndimage.binary_dilation(img, structure=np.ones((k, k)))


m0 = 0.268
m1 = 0.161
debug = False
for i in range(285):
    print(i)
    img, h = mhd.read('output/{:03d}.mha'.format(i))
    # lung,_ = mhd.read('output/{:03d}_lung.mha'.format(i))

    # lung_bin = dilation(lung > 100)

    normed = normalize(img, percentile=3)
    Image.fromarray(normed).save('png/regular/{:03d}.png'.format(i))
    if debug:
        plt.imshow(normed, cmap='gray')
        plt.show()

    img_h, h = mhd.read('output/{:03d}_high.mha'.format(i))
    normed = normalize(img_h, percentile=3)
    if debug:
        plt.imshow(normed, cmap='gray')
        plt.show()
Esempio n. 10
0
            pickle.dump(obj, f)


import mhd
import tqdm
import glob

datadir = r"/win/salmon/user/fatemeh/Unet_test1_masseter/Unet_masseter_dataset"

dataset = {}
spacings = {}
#IDs = [os.path.basename(r)[0:5] for r in glob.glob(os.path.join(datadir,'k*_image.mhd'))]
for ID in tqdm.tqdm(os.listdir(datadir)):
    print(ID)
    #    original,h = mhd.read(os.path.join(datadir,'{}_image.mhd'.format(ID)))
    original, h = mhd.read(os.path.join(datadir, ID, 'original.mhd'))
    label = mhd.read(os.path.join(datadir, ID, 'label.mha'))[0]
    #label = mhd.read(os.path.join(datadir,'{}_label_muscle.mhd'.format(ID)))[0]
    #    label = mhd.read(os.path.join(datadir,'{}_label_skin.mhd'.format(ID)))[0]
    spacings[ID] = h[
        'ElementSpacing'][::
                          -1]  # reversing is required to make it [z y x] order
    data = {}
    data['x'] = np.expand_dims((original / 255.0).astype(np.float32), -1)
    data['y'] = np.expand_dims(label, -1)
    dataset[ID] = data
x_shape = next(iter(dataset.values()))['x'].shape[1:]
n_classes = np.max(label) + 1

from datetime import datetime
result_basedir = 'unet_train_' + datetime.today().strftime("%y%m%d_%H%M%S")
    optimizer = optim.Adam(net.parameters(), lr=0.01, weight_decay=0.001)
    datadir = '../dataset/Cases/Images'
    labeldir = '../dataset/Cases/Labels'
    datalist = load_json('phase_liverfibrosis.json')
    groups = split_dataset(datalist, 4)
    dataset = {}
    y_shape = [512, 512]
    Image_shape = [512, 512]
    # n_classes = 1

    for ID in tqdm.tqdm(os.listdir(datadir), desc='Loading images'):
        if os.path.isfile(datadir + '\\' + ID):
            out = ID.split('.')
            if len(ID) >= 2:
                if out[1] == 'mhd':
                    image = mhd.read(os.path.join(datadir, ID))[0]
                    vmask = mhd.read(
                        os.path.join(labeldir, ID[:-9] + 'label.mhd'))[0]
                    data = {}
                    data['x'] = np.expand_dims(
                        (image / 255.0).astype(np.float32), 0)
                    data['y'] = np.expand_dims(vmask, 0)
                    dataset = data['x']
                    dataset = torch.Tensor(dataset)
                    dataset = dataset.unsqueeze(0)
    for epoch in range(4):
        running_loss = 0.0
        for i, x in enumerate(dataset, 0):
            inputs, labels = x
            inputs = inputs.unsqueeze(0)
            labels = labels.unsqueeze(0)