コード例 #1
0
    def rgb_to_rgb(self, mat):
        ''' Reference images are CT scans with highlighted OARs and PTVs. The
        targets are 3D voxel maps with colourized dose intensities.
        '''
        dose_img = mat['dose_imgs']
        ct_img = mat['ct_imgs']
        d, w, h, nc = ct_img.shape
        assert w == self.opt.loadSize, 'size mismatch in width'
        assert h == self.opt.loadSize, 'size mismatch in height'

        A = vox2tensor(ct_img).float()
        B = vox2tensor(dose_img).float()

        # ABs are 3-channel. Normalizing to 0.5 mean, 0.5 std
        A = normalize3d(A, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        B = normalize3d(B, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))

        # flipping augments the dataset by flipping a bunch of the images.
        if (not self.opt.no_flip) and random.random() < 0.5:
            idx = [i for i in range(A.size(3) - 1, -1, -1)]
            idx = torch.LongTensor(idx)
            A = A.index_select(3, idx)
            B = B.index_select(3, idx)

        # flipped the width side. Q: is it worth it to flip on height as well?
        if (not self.opt.no_flip) and random.random() < 0.5:
            idx = [i for i in range(A.size(2) - 1, -1, -1)]
            idx = torch.LongTensor(idx)
            A = A.index_select(2, idx)
            B = B.index_select(2, idx)
        return A, B
コード例 #2
0
    def rgb_to_gray(self, mat):
        ''' Reference images are CT scans with highlighted OARs and PTVs. The
        targets are dose intensity matrices.
        '''

        dose_val = mat['dose_imgs']
        # dose_val = mat['dose_vals']
        ct_img = mat['ct_imgs']
        d, w, h, nc = ct_img.shape
        assert (d, w, h) == dose_val.shape, 'size mismatch between dose and ct'

        # I have to add some noise to ct_val because we are just training on
        # a single patient and ct_val is the same all the time. I divide by
        # 1000 to give stdev~=0.001, which is sufficiently small.
        rnd_noise = randn(*ct_img.shape) / 500
        ct_img = ct_img + rnd_noise
        ct_img[ct_img <= 0] = 0.0

        A = vox2tensor(ct_img).float()
        A = normalize3d(A, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))

        B = torch.from_numpy(dose_val).float()
        B = B.unsqueeze(0)
        B.sub_(0.5).div_(0.5)

        # flipping augments the dataset by flipping a bunch of the images.
        if (not self.opt.no_flip) and random.random() < 0.5:
            idx = [i for i in range(A.size(2) - 1, -1, -1)]
            idx = torch.LongTensor(idx)
            A = A.index_select(2, idx)
            B = B.index_select(2, idx)
        return A, B
コード例 #3
0
    def rgb_to_gray(self, mat):
        ''' Reference images are CT scans with highlighted OARs and PTVs. The
        targets are dose intensity matrices.
        '''
        dose_val = mat['dose_vals']
        ct_img = mat['ct_imgs']
        d, w, h, nc = ct_img.shape
        assert (d, w, h) == dose_val.shape, 'size mismatch between dose and ct'

        # to handle aaron's weird uint format
        if dose_val.dtype == np.uint8 or dose_val.dtype == np.uint16:
            dose_val = dose_val / 256
        if ct_img.dtype == np.uint8 or ct_img.dtype == np.uint16:
            ct_img = ct_img / 256

        A = vox2tensor(ct_img).float()
        A = normalize3d(A, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))

        B = torch.from_numpy(dose_val).float()
        B = B.unsqueeze(0)
        B.sub_(0.5).div_(0.5)

        # flipping augments the dataset by flipping a bunch of the images.
        if (not self.opt.no_flip) and random.random() < 0.5:
            idx = [i for i in range(A.size(2) - 1, -1, -1)]
            idx = torch.LongTensor(idx)
            A = A.index_select(2, idx)
            B = B.index_select(2, idx)
        return A, B