def read_image_pair(fixed_path, moving_path):
    fixed_np = cv2.imread(fixed_path)
    moving_np = cv2.imread(moving_path)
    image_size = fixed_np.shape[0:2]
    image_spacing = (1.0, 1.0)
    image_origin = (0.0, 0.0)
    moving_np_resized = cv2.resize(moving_np, image_size)

    fixed_np_bw = np.average(fixed_np, 2).astype(np.float32)
    moving_np_resized_bw = np.average(moving_np_resized, 2).astype(np.float32)
    fixed_image = al.Image(fixed_np_bw, image_size, image_spacing,
                           image_origin)
    moving_image = al.Image(moving_np_resized_bw, image_size, image_spacing,
                            image_origin)
    return fixed_image, moving_image
示例#2
0
def read_image_array_to_tensor(scan_num, extent, device):
    '''
    read 3d rock images, then convert to torch tensor, then airlab image object
    :param scan_num:
    :param extent:   (z_min, z_max), (y_min, y_max), (x_min, x_max) = extent, e.g., [(800,1200),(125,825), (125,825)]
    :return: airlab image
    '''

    # pattern = "6.5_L5_%s*_3.41_/segment/*_sub_mask.tif"%str(scan_num).zfill(3)
    pattern = "6.5_L5_%s*_3.41_/names_for_DIC/*.tif" % str(scan_num).zfill(3)

    # get image paths
    img_paths = io_function.get_file_list_by_pattern(sync_dir, pattern)

    # put the images in a sequence (from top to bottom)
    img_paths = porosity_profile.sort_images(img_paths)

    (z_min, z_max), (y_min, y_max), (x_min, x_max) = extent

    voxels_3d = calc_cov.read_3D_image_voxels_disk_start_end(
        img_paths, z_min, z_max)

    # crop
    voxels_3d = voxels_3d[y_min:y_max, x_min:x_max, :]

    # for test
    print(voxels_3d.shape)
    height, width, z_len = voxels_3d.shape

    #TODO: do we need to?: the order of axis are flipped in order to follow the convention of numpy and torch

    voxels_3d = voxels_3d.astype(np.float32)
    # convert to the torch tensor
    image = th.from_numpy(voxels_3d).to(device=device)

    # tensor_image, image_size, image_spacing, image_origin
    image_al = al.Image(image, [height, width, z_len], [1, 1, 1], [0, 0, 0])

    return image_al
示例#3
0
def main():
    start = time.time()

    # set the used data type
    dtype = th.float32
    # set the device for the computaion to CPU
    device = th.device("cpu")

    # In order to use a GPU uncomment the following line. The number is the device index of the used GPU
    # Here, the GPU with the index 0 is used.
    deviceIDs = GPUtil.getAvailable(order='first',
                                    limit=100,
                                    maxLoad=0.5,
                                    maxMemory=0.5,
                                    includeNan=False,
                                    excludeID=[],
                                    excludeUUID=[])
    if len(deviceIDs) > 0:
        print("using the GPU (ID:%d) for computing" % deviceIDs[0])
        device = th.device("cuda:%d" % deviceIDs[0])

    # create 3D image volume with two objects
    object_shift = 5

    fixed_image = th.zeros(64, 64, 64).to(device=device)
    fixed_image[16:32, 16:32, 16:32] = 1.0
    fixed_image = al.Image(fixed_image, [64, 64, 64], [1, 1, 1], [0, 0, 0])

    moving_image = th.zeros(64, 64, 64).to(device=device)
    moving_image[16 - object_shift:32 - object_shift,
                 16 - object_shift:32 - object_shift,
                 16 - object_shift:32 - object_shift] = 1.0
    moving_image = al.Image(moving_image, [64, 64, 64], [1, 1, 1], [0, 0, 0])

    # create pairwise registration object
    registration = al.PairwiseRegistration()

    # choose the affine transformation model
    transformation = al.transformation.pairwise.RigidTransformation(
        moving_image, opt_cm=True)
    transformation.init_translation(fixed_image)

    registration.set_transformation(transformation)

    # choose the Mean Squared Error as image loss
    image_loss = al.loss.pairwise.MSE(fixed_image, moving_image)

    registration.set_image_loss([image_loss])

    # choose the Adam optimizer to minimize the objective
    optimizer = th.optim.Adam(transformation.parameters(), lr=0.1)

    registration.set_optimizer(optimizer)
    registration.set_number_of_iterations(500)

    # start the registration
    registration.start()

    # set the intensities for the visualisation
    fixed_image.image = 1 - fixed_image.image
    moving_image.image = 1 - moving_image.image

    # warp the moving image with the final transformation result
    displacement = transformation.get_displacement()
    warped_image = al.transformation.utils.warp_image(moving_image,
                                                      displacement)

    end = time.time()

    print("=================================================================")

    print("Registration done in: ", end - start, " s")
    print("Result parameters:")
    transformation.print()

    sitk.WriteImage(warped_image.itk(), 'rigid_warped_image.vtk')
    sitk.WriteImage(moving_image.itk(), 'rigid_moving_image.vtk')
    sitk.WriteImage(fixed_image.itk(), 'rigid_fixed_image.vtk')

    displacement = al.transformation.utils.unit_displacement_to_displacement(
        displacement)  # unit measures to image domain measures
    displacement = al.create_displacement_image_from_image(
        displacement, moving_image)
    sitk.WriteImage(displacement.itk(), 'displacement' + '.vtk')

    # plot the results
    plt.subplot(131)
    plt.imshow(fixed_image.numpy()[16, :, :], cmap='gray')
    plt.title('Fixed Image Slice')

    plt.subplot(132)
    plt.imshow(moving_image.numpy()[16, :, :], cmap='gray')
    plt.title('Moving Image Slice')

    plt.subplot(133)
    plt.imshow(warped_image.numpy()[16, :, :], cmap='gray')
    plt.title('Warped Moving Image Slice')

    plt.show()
 def prep_al_img(img, size, thumb_size):
     spacing = np.append(size / thumb_size, 1) * downsample_factor
     img = torch.tensor(img, dtype=dtype).to(device)
     img = al.Image(img, img.shape, spacing, origin)
     return img
    def three_dim_affine_reg(self):
        start = time.time()

        # set the used data type
        dtype = torch.float32
        # set the device for the computaion to CPU
        #device = torch.device("cpu")
        #device = torch.device("cuda:0")
        device = self.device

        # In order to use a GPU uncomment the following line. The number is the device index of the used GPU
        # Here, the GPU with the index 0 is used.
        # device = th.device("cuda:0")

        #Creating the airlabs image objects for registration
        new_stationary_img_tnsr = self.preprocessed_stationary_img_tnsr.to(
            device=device)
        new_moving_img_tnsr = self.preprocessed_moving_img_tnsr.to(
            device=device)
        fixed_image = al.Image(new_stationary_img_tnsr, self.img_shape,
                               self.preprocessed_stationary_img_voxel_dim,
                               self.preprocessed_stationary_img_centre)
        moving_image = al.Image(new_moving_img_tnsr, self.img_shape,
                                self.preprocessed_moving_img_voxel_dim,
                                self.preprocessed_moving_img_centre)

        # printing image properties
        print(
            " ============= fixed image size, spacing, origin and datatype ==================="
        )
        print(fixed_image.size)
        print(fixed_image.spacing)
        print(fixed_image.origin)
        print(fixed_image.dtype)
        print(
            " ============= moving image size, spacing, origin and datatype ==================="
        )
        print(moving_image.size)
        print(moving_image.spacing)
        print(moving_image.origin)
        print(moving_image.dtype)
        print(" ============= ============== ===================")

        # create pairwise registration object
        registration = al.PairwiseRegistration()

        # choose the affine transformation model
        print("Using Affine transformation")
        print(" ============= ============== ===================")

        if (self.reg_type == "affine"):
            transformation = al.transformation.pairwise.AffineTransformation(
                moving_image, opt_cm=True)
        else:
            transformation = al.transformation.pairwise.BsplineTransformation(
                image_size=moving_image.size,
                sigma=self.sigma,
                diffeomorphic=True,
                order=3,
                dtype=torch.float32,
                device='cpu')
        transformation.init_translation(fixed_image)
        registration.set_transformation(transformation)

        # choose the Mean Squared Error as image loss
        if (self.loss_fnc == "MSE"):
            print("Using Mean squared error loss")
            image_loss = al.loss.pairwise.MSE(fixed_image, moving_image)
        elif (self.loss_fnc == "MI"):
            print("Using Mutual information loss")
            image_loss = al.loss.pairwise.MI(fixed_image,
                                             moving_image,
                                             bins=20,
                                             sigma=3)
        elif (self.loss_fnc == "CC"):
            print("Using Cross corelation loss")
            image_loss = al.loss.pairwise.NCC(fixed_image, moving_image)
        else:
            print(
                "No valid option chosen among MSE/NCC/NMI, using MSE as default"
            )
            image_loss = al.loss.pairwise.MSE(fixed_image, moving_image)

        registration.set_image_loss([image_loss])

        # choose the Adam optimizer to minimize the objective
        optimizer = torch.optim.Adam(transformation.parameters(), lr=0.1)

        registration.set_optimizer(optimizer)
        registration.set_number_of_iterations(const.ITERATIONS)

        # start the registration
        registration.start()

        # set the intensities for the visualisation
        fixed_image.image = 1 - fixed_image.image
        moving_image.image = 1 - moving_image.image

        # warp the moving image with the final transformation result
        displacement = transformation.get_displacement()
        warped_image = al.transformation.utils.warp_image(
            moving_image, displacement)

        end = time.time()

        print(" ============= ============== ===================")

        print("Registration done in: ", end - start, " s")
        print("Result parameters:")
        transformation.print()
        print(" ============= ============== ===================")
        print(transformation.transformation_matrix)
        print(" ============= ============== ===================")

        # plot the results - commented out as it pops open a window

        plt.subplot(131)
        plt.imshow(fixed_image.numpy()[90, :, :], cmap='gray')
        plt.title('Fixed Image Slice')

        plt.subplot(132)
        plt.imshow(moving_image.numpy()[90, :, :], cmap='gray')
        plt.title('Moving Image Slice')

        plt.subplot(133)
        plt.imshow(warped_image.numpy()[16, :, :], cmap='gray')
        plt.title('Warped Moving Image Slice')
        plt.show()

        self.affine_transformation_matrix = transformation.transformation_matrix
        self.affine_transformation_object = transformation
        self.displacement = displacement

        return warped_image, transformation, displacement
def main():
    start = time.time()

    # set the used data type
    dtype = th.float32
    # set the device for the computaion to CPU
    device = th.device("cpu")

    # In order to use a GPU uncomment the following line. The number is the device index of the used GPU
    # Here, the GPU with the index 0 is used.
    # device = th.device("cuda:0")

    # create 3D image volume with two objects
    object_shift = 10

    fixed_image = th.zeros(64, 64, 64).to(device=device)
    fixed_image[16:32, 16:32, 16:32] = 1.0
    fixed_image = al.Image(fixed_image, [64, 64, 64], [1, 1, 1], [0, 0, 0])

    moving_image = th.zeros(64, 64, 64).to(device=device)
    moving_image[16 - object_shift:32 - object_shift,
                 16 - object_shift:32 - object_shift,
                 16 - object_shift:32 - object_shift] = 1.0
    moving_image = al.Image(moving_image, [64, 64, 64], [1, 1, 1], [0, 0, 0])

    # create pairwise registration object
    registration = al.PairwiseRegistration()

    # choose the affine transformation model
    transformation = al.transformation.pairwise.RigidTransformation(
        moving_image, opt_cm=True)
    transformation.init_translation(fixed_image)

    registration.set_transformation(transformation)

    # choose the Mean Squared Error as image loss
    image_loss = al.loss.pairwise.MSE(fixed_image, moving_image)

    registration.set_image_loss([image_loss])

    # choose the Adam optimizer to minimize the objective
    optimizer = th.optim.Adam(transformation.parameters(), lr=0.1)

    registration.set_optimizer(optimizer)
    registration.set_number_of_iterations(500)

    # start the registration
    registration.start()

    # set the intensities for the visualisation
    fixed_image.image = 1 - fixed_image.image
    moving_image.image = 1 - moving_image.image

    # warp the moving image with the final transformation result
    displacement = transformation.get_displacement()
    warped_image = al.transformation.utils.warp_image(moving_image,
                                                      displacement)

    end = time.time()

    print("=================================================================")

    print("Registration done in: ", end - start, " s")
    print("Result parameters:")
    transformation.print()

    # sitk.WriteImage(warped_image.itk(), '/tmp/rigid_warped_image.vtk')
    # sitk.WriteImage(moving_image.itk(), '/tmp/rigid_moving_image.vtk')
    # sitk.WriteImage(fixed_image.itk(), '/tmp/rigid_fixed_image.vtk')

    # plot the results
    plt.subplot(131)
    plt.imshow(fixed_image.numpy()[16, :, :], cmap='gray')
    plt.title('Fixed Image Slice')

    plt.subplot(132)
    plt.imshow(moving_image.numpy()[16, :, :], cmap='gray')
    plt.title('Moving Image Slice')

    plt.subplot(133)
    plt.imshow(warped_image.numpy()[16, :, :], cmap='gray')
    plt.title('Warped Moving Image Slice')

    plt.show()