Exemple #1
0
def test_half(device, batch_size, image_size, angles, spacing, det_count,
              clip_to_circle):
    # generate random images
    det_count = int(det_count * image_size)
    mask_radius = det_count / 2.0 if clip_to_circle else -1
    x = generate_random_images(batch_size, image_size, mask_radius)

    # our implementation
    radon = Radon(image_size,
                  angles,
                  det_spacing=spacing,
                  det_count=det_count,
                  clip_to_circle=clip_to_circle)
    x = torch.FloatTensor(x).to(device)

    sinogram = radon.forward(x)
    single_precision = radon.backprojection(sinogram)

    h_sino = radon.forward(x.half())
    half_precision = radon.backprojection(h_sino)

    forward_error = relative_error(sinogram.cpu().numpy(),
                                   h_sino.cpu().numpy())
    back_error = relative_error(single_precision.cpu().numpy(),
                                half_precision.cpu().numpy())

    print(
        f"batch: {batch_size}, size: {image_size}, angles: {len(angles)}, spacing: {spacing}, circle: {clip_to_circle}, forward: {forward_error}, back: {back_error}"
    )

    assert_less(forward_error, 1e-3)
    assert_less(back_error, 1e-3)
Exemple #2
0
class Operators():
    def __init__(self, image_size, n_angles, sample_ratio, device, circle=False):
        self.device = device
        self.image_size = image_size
        self.sample_ratio = sample_ratio
        self.n_angles = n_angles
        
        angles = np.linspace(0, np.pi, self.n_angles, endpoint=False)
        self.radon = Radon(self.image_size, angles, clip_to_circle=circle)
        self.radon_sparse = Radon(self.image_size, angles[::sample_ratio], clip_to_circle=circle)
        self.n_angles_sparse = len(angles[::sample_ratio])
        self.landweber = Landweber(self.radon)
        
        self.mask = torch.zeros((1,1,1,180)).to(device)
        self.mask[:,:,:,::sample_ratio].fill_(1)
        
        
    # $X^\T ()$ inverse radon
    def forward_adjoint(self, input):
        # check dimension
        if input.size()[3] == self.n_angles:
            return self.radon.backprojection(input.permute(0,1,3,2))
        elif input.size()[3] == self.n_angles_sparse:
            return self.radon_sparse.backprojection(input.permute(0,1,3,2))/self.n_angles_sparse*self.n_angles  # scale the angles
        else:
            raise Exception(f'forward_adjoint input dimension wrong! received {input.size()}.')
            
        
    # $X^\T X ()$
    def forward_gramian(self, input):
        # check dimension
        if input.size()[2] != self.image_size:
            raise Exception(f'forward_gramian input dimension wrong! received {input.size()}.')
        
        sinogram = self.radon.forward(input)
        return self.radon.backprojection(sinogram)
    

    # Corruption model: undersample sinogram by 8
    def undersample_model(self, input):
        return input[:,:,:,::self.sample_ratio]
    
    
    # Filtered Backprojection. Input siogram range = (0,1)
    def FBP(self, input):
        # check dimension
        if input.size()[2] != self.image_size or input.size()[3] != self.n_angles:
            raise Exception(f'FBP input dimension wrong! received {input.size()}.')
        filtered_sinogram = self.radon.filter_sinogram(input.permute(0,1,3,2))
        return self.radon.backprojection(filtered_sinogram)
    
    
    # estimate step size eta
    def estimate_eta(self):
        eta = self.landweber.estimate_alpha(self.image_size, self.device)
        return torch.tensor(eta, dtype=torch.float32, device=self.device)
Exemple #3
0
def test_error(device, batch_size, image_size, angles, spacing, clip_to_circle):
    # generate random images
    x = generate_random_images(batch_size, image_size, masked=clip_to_circle)

    # astra
    astra = AstraWrapper(angles)

    astra_fp_id, astra_fp = astra.forward(x, spacing)
    astra_bp = astra.backproject(astra_fp_id, image_size, batch_size)
    if clip_to_circle:
        astra_bp *= circle_mask(image_size)

    # our implementation
    radon = Radon(image_size, angles, det_spacing=spacing, clip_to_circle=clip_to_circle)
    x = torch.FloatTensor(x).to(device)

    our_fp = radon.forward(x)
    our_bp = radon.backprojection(our_fp)

    forward_error = relative_error(astra_fp, our_fp.cpu().numpy())
    back_error = relative_error(astra_bp, our_bp.cpu().numpy())

    # if forward_error > 10:
    #     plt.imshow(astra_fp[0])
    #     plt.figure()
    #     plt.imshow(our_fp[0].cpu().numpy())
    #     plt.show()

    print(
        f"batch: {batch_size}, size: {image_size}, angles: {len(angles)}, spacing: {spacing}, circle: {clip_to_circle}, forward: {forward_error}, back: {back_error}")
    # TODO better checks
    assert_less(forward_error, 1e-2)
    assert_less(back_error, 5e-3)
Exemple #4
0
class Predict():
    def __init__(self, args, dataloader):
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.args = args
        self.dataloader = dataloader

        self.net = UNet(input_nc=1, output_nc=1).to(self.device)
        self.net = nn.DataParallel(self.net)

        pathG = os.path.join(args.ckpt)
        self.net.load_state_dict(torch.load(pathG, map_location=self.device))
        self.net.eval()

        self.gen_mask()

        angles = np.linspace(0, np.pi, 180, endpoint=False)
        self.radon = Radon(args.height, angles, clip_to_circle=True)

    def gen_mask(self):
        self.mask = torch.zeros(180).to(self.device)
        self.mask[::8].fill_(1)  # 180

    def gen_x(self, y):
        return self.mask * y

    def crop_sinogram(self, x):
        return x[:, :, :, 6:-6]

    def overlay(self, Gx, x):
        result = self.mask * x + (1 - self.mask) * Gx
        return result

    def inpaint(self):
        for i, data in enumerate(self.dataloader):
            y = data[0].to(self.device)  # 320 x 180

            x = self.gen_x(y)  # input, 320 x 23
            Gx = self.net(x)

            Gx = self.overlay(Gx, y)

            # FBP
            Gx = normalize(Gx)  # 0~1
            fbp_Gx = self.radon.backprojection(
                self.radon.filter_sinogram(Gx.permute(0, 1, 3, 2)))

            print(f'Saving images for batch {i}')

            for j in range(y.size()[0]):
                #                 vutils.save_image(Gx[j,0], f'{self.args.outdir}/{class_name}/{fnames[i*self.args.bs+j]}', normalize=True)  # to 0~255
                vutils.save_image(
                    fbp_Gx[j, 0],
                    f'{self.args.outdir}/{class_name}/{fnames[i*self.args.bs+j]}',
                    normalize=True)
Exemple #5
0
    def test_shapes(self):
        """
        Check using channels is ok
        """
        device = torch.device('cuda')
        angles = torch.FloatTensor(
            np.linspace(0, 2 * np.pi, 10).astype(np.float32)).to(device)
        radon = Radon(64, angles)

        # test with 2 batch dimensions
        x = torch.FloatTensor(2, 3, 64, 64).to(device)
        y = radon.forward(x)
        self.assertEqual(y.size(), (2, 3, 10, 64))
        z = radon.backprojection(y)
        self.assertEqual(z.size(), (2, 3, 64, 64))

        # no batch dimensions
        x = torch.FloatTensor(64, 64).to(device)
        y = radon.forward(x)
        self.assertEqual(y.size(), (10, 64))
        z = radon.backprojection(y)
        self.assertEqual(z.size(), (64, 64))
Exemple #6
0
    def test_differentiation(self):
        device = torch.device('cuda')
        x = torch.FloatTensor(1, 64, 64).to(device)
        x.requires_grad = True
        angles = torch.FloatTensor(
            np.linspace(0, 2 * np.pi, 10).astype(np.float32)).to(device)

        radon = Radon(64, angles)

        # check that backward is implemented for fp and bp
        y = radon.forward(x)
        z = torch.mean(radon.backprojection(y))
        z.backward()
        self.assertIsNotNone(x.grad)
Exemple #7
0
channels = 4

device = torch.device('cuda')
criterion = nn.L1Loss()

# Instantiate a model for the sinogram and one for the image
sino_model = nn.Conv2d(1, channels, 5, padding=2).to(device)
image_model = nn.Conv2d(channels, 1, 3, padding=1).to(device)

# create empty images
x = torch.FloatTensor(batch_size, 1, image_size, image_size).to(device)

# instantiate Radon transform
angles = np.linspace(0, np.pi, n_angles)
radon = Radon(image_size, angles)

# forward projection
sinogram = radon.forward(x)

# apply sino_model to sinograms
filtered_sinogram = sino_model(sinogram)

# backprojection
backprojected = radon.backprojection(filtered_sinogram)

# apply image_model to backprojected images
y = image_model(backprojected)

# backward works as usual
loss = criterion(y, x)
loss.backward()
Exemple #8
0
from utils import show_images

batch_size = 1
n_angles = 512
image_size = 512

img = np.load("phantom.npy")
device = torch.device('cuda')

# instantiate Radon transform
angles = np.linspace(0, np.pi, n_angles, endpoint=False)
radon = Radon(image_size, angles)

with torch.no_grad():
    x = torch.FloatTensor(img).reshape(1, 1, image_size, image_size).to(device)

    sinogram = radon.forward(x)
    filtered_sinogram = radon.filter_sinogram(sinogram)
    fbp = radon.backprojection(filtered_sinogram,
                               extend=False) * np.pi / n_angles

print("FBP Error", torch.norm(x - fbp).item())

titles = [
    "Original Image", "Sinogram", "Filtered Sinogram",
    "Filtered Backprojection"
]
show_images([x, sinogram, filtered_sinogram, fbp], titles, keep_range=False)

plt.show()
class Predict():
    def __init__(self, args, image):
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')

        if args.twoends:
            factor = 192 / (args.angles + 2)  # 7.68
        else:
            factor = 180 / args.angles  # 7.826086956521739

        self.net = UNet(input_nc=1, output_nc=1,
                        scale_factor=factor).to(self.device)
        self.net = nn.DataParallel(self.net)
        pathG = os.path.join(args.ckpt)
        self.net.load_state_dict(torch.load(pathG, map_location=self.device))
        self.net.eval()

        self.image = image.to(self.device)
        self.twoends = args.twoends
        self.mask = self.gen_mask().to(self.device)

        # Radon Operator
        angles = np.linspace(0, np.pi, 180, endpoint=False)
        self.radon = Radon(args.height, angles, clip_to_circle=True)

    def gen_mask(self):
        mask = torch.zeros(180)
        mask[::8].fill_(1)  # 180
        if self.twoends:
            mask = torch.cat((mask[-6:], mask, mask[:6]), 0)  # 192
        return mask

    def append_twoends(self, y):
        front = torch.flip(y[:, :, :, :6], [2])
        back = torch.flip(y[:, :, :, -6:], [2])
        return torch.cat((back, y, front), 3)

    def gen_sparse(self, y):
        return y[:, :, :, self.mask == 1]

    def crop_sinogram(self, x):
        return x[:, :, :, 6:-6]

    def inpaint(self):
        y = self.image  # 320 x 180

        # Two-Ends Preprocessing
        if self.twoends:
            y = self.append_twoends(y)  # 320 x 192

        # Generate Sparse-view Image, forward model
        x = self.gen_sparse(y)
        Gx = self.net(x)

        # Crop Two-Ends
        if self.twoends:
            Gx = self.crop_sinogram(Gx)

        # FBP Reconstruction
        Gx = normalize(Gx)  # 0~1
        fbp_Gx = self.radon.backprojection(
            self.radon.filter_sinogram(Gx.permute(0, 1, 3, 2)))

        # Save Results
        vutils.save_image(fbp_Gx, 'result_reconstruction.png', normalize=True)
        vutils.save_image(Gx, 'result_sinogram.png', normalize=True)
class Predict():
    def __init__(self, args, dataloader):
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.args = args
        self.dataloader = dataloader

        if args.twoends:
            factor = 192 / (args.angles + 2)  # 7.68
        else:
            factor = 180 / args.angles  # 7.826086956521739

        self.net = UNet(input_nc=1, output_nc=1,
                        scale_factor=factor).to(self.device)
        self.net = nn.DataParallel(self.net)
        pathG = os.path.join(args.ckpt)
        self.net.load_state_dict(torch.load(pathG, map_location=self.device))
        self.net.eval()

        self.gen_mask()

        # Radon Operator for different downsampling factors
        angles = np.linspace(0, np.pi, 180, endpoint=False)
        self.radon = Radon(args.height, angles, clip_to_circle=True)
        self.radon23 = Radon(args.height, angles[::8], clip_to_circle=True)
        self.radon45 = Radon(args.height, angles[::4], clip_to_circle=True)
        self.radon90 = Radon(args.height, angles[::2], clip_to_circle=True)

    def gen_mask(self):
        mask = torch.zeros(180)
        mask[::8].fill_(1)  # 180
        if self.args.twoends:
            self.mask = torch.cat((mask[-6:], mask, mask[:6]),
                                  0).to(self.device)  # 192
        self.mask_sparse = mask

    def append_twoends(self, y):
        front = torch.flip(y[:, :, :, :6], [2])
        back = torch.flip(y[:, :, :, -6:], [2])
        return torch.cat((back, y, front), 3)

    def gen_input(self, y, mask):
        return y[:, :, :, mask == 1]

    def crop_sinogram(self, x):
        return x[:, :, :, 6:-6]

    def inpaint(self):
        for i, data in enumerate(self.dataloader):
            y = data[0].to(self.device)  # 320 x 180

            # Two-Ends Preprocessing
            if self.args.twoends:
                y_TE = self.append_twoends(y)  # 320 x 192

            # Forward Model
            x = self.gen_input(y_TE, self.mask)  # input, 320 x 25
            Gx = self.net(x)  # 320 x 192

            # Crop Two-Ends
            if self.args.twoends:
                Gx = self.crop_sinogram(Gx)  # 320 x 180

            # FBP Reconstruction
            Gx = normalize(Gx)  # 0~1
            fbp_Gx = self.radon.backprojection(
                self.radon.filter_sinogram(Gx.permute(0, 1, 3,
                                                      2)))  # 320 x 320

            # FBP for downsampled sinograms
            Gx1 = Gx[:, :, :, ::2]  # 320 x 90
            Gx1 = normalize(Gx1)  # 0~1
            fbp_Gx1 = self.radon90.backprojection(
                self.radon90.filter_sinogram(Gx1.permute(0, 1, 3, 2)))

            Gx2 = Gx[:, :, :, ::4]  # 320 x 45
            Gx2 = normalize(Gx2)  # 0~1
            fbp_Gx2 = self.radon45.backprojection(
                self.radon45.filter_sinogram(Gx2.permute(0, 1, 3, 2)))

            sparse = y[:, :, :, ::8]  # 320 x 23, original sparse-view sinogram
            sparse = normalize(sparse)  # 0~1
            fbp_sparse = self.radon23.backprojection(
                self.radon23.filter_sinogram(sparse.permute(0, 1, 3, 2)))

            print(f'Saving images for batch {i}')

            for j in range(y.size()[0]):
                #                 vutils.save_image(Gx[j,0], f'{self.args.outdir}/{class_name}/{fnames[i*self.args.bs+j]}', normalize=True)
                vutils.save_image(
                    fbp_Gx[j, 0],
                    f'{self.args.outdir}/{class_name}/{fnames[i*self.args.bs+j]}',
                    normalize=True)
                vutils.save_image(
                    fbp_Gx1[j, 0],
                    f'{self.args.outdir}_90/{class_name}/{fnames[i*self.args.bs+j]}',
                    normalize=True)
                vutils.save_image(
                    fbp_Gx2[j, 0],
                    f'{self.args.outdir}_45/{class_name}/{fnames[i*self.args.bs+j]}',
                    normalize=True)
                vutils.save_image(
                    fbp_sparse[j, 0],
                    f'{self.args.outdir}_23/{class_name}/{fnames[i*self.args.bs+j]}',
                    normalize=True)