def test_get_centre_slice(self):
        #returns the 2D version
        AG = AcquisitionGeometry.create_Cone3D(source_position=[0, -500, 0],
                                               detector_position=[0, 1000, 0])
        AG2 = AcquisitionGeometry.create_Cone2D(source_position=[0, -500],
                                                detector_position=[0, 1000])
        cs = AG.config.system.get_centre_slice()
        self.assertEqual(cs, AG2.config.system)

        #returns the 2D version
        AG = AcquisitionGeometry.create_Cone3D(
            source_position=[0, -500, 0],
            detector_position=[0, 1000, 0],
            rotation_axis_direction=[-1, 0, 1],
            detector_direction_x=[1, 0, 1],
            detector_direction_y=[-1, 0, 1])
        AG2 = AcquisitionGeometry.create_Cone2D(source_position=[0, -500],
                                                detector_position=[0, 1000])
        cs = AG.config.system.get_centre_slice()
        self.assertEqual(cs, AG2.config.system)

        #raise error if cannot extract a cnetre slice
        AG = AcquisitionGeometry.create_Cone3D(
            source_position=[0, -500, 0],
            detector_position=[0, 1000, 0],
            rotation_axis_direction=[1, 0, 1])
        with self.assertRaises(ValueError):
            cs = AG.config.system.get_centre_slice()

        AG = AcquisitionGeometry.create_Cone3D(source_position=[0, -500, 0],
                                               detector_position=[0, 1000, 0],
                                               detector_direction_x=[1, 0, 1],
                                               detector_direction_y=[-1, 0, 1])
        with self.assertRaises(ValueError):
            cs = AG.config.system.get_centre_slice()
    def test_calculate_magnification(self):
        AG = AcquisitionGeometry.create_Cone2D(source_position=[0,-500], detector_position=[0.,1000.])
        out = AG.config.system.calculate_magnification()
        self.assertEqual(out, [500, 1000, 3]) 

        AG = AcquisitionGeometry.create_Cone2D(source_position=[0,-500], detector_position=[0.,1000.], rotation_axis_position=[0.,250.])
        out = AG.config.system.calculate_magnification()
        self.assertEqual(out, [750, 750, 2]) 

        AG = AcquisitionGeometry.create_Cone2D(source_position=[0,-500], detector_position=[0.,1000.], rotation_axis_position=[5.,0.])
        out = AG.config.system.calculate_magnification()
        source_to_object = numpy.sqrt(5.0**2 + 500.0**2)
        theta = math.atan2(5.0,500.0)
        source_to_detector = 1500.0/math.cos(theta)
        self.assertEqual(out, [source_to_object, source_to_detector - source_to_object, source_to_detector/source_to_object]) 

        AG = AcquisitionGeometry.create_Cone2D(source_position=[0,-500], detector_position=[0.,1000.], rotation_axis_position=[5.,0.],detector_direction_x=[math.sqrt(5),math.sqrt(5)])
        out = AG.config.system.calculate_magnification()
        source_to_object = numpy.sqrt(5.0**2 + 500.0**2)

        ab = (AG.config.system.rotation_axis.position - AG.config.system.source.position).astype(numpy.float64)/source_to_object

        #source_position + d * ab = detector_position + t * detector_direction_x
        #x: d *  ab[0] =  t * detector_direction_x[0]
        #y: -500 + d *  ab[1] = 1000 + t * detector_direction_x[1] 

        # t = (d *  ab[0]) / math.sqrt(5)
        # d = 1500 / (ab[1]  - ab[0])

        source_to_detector = 1500 / (ab[1]  - ab[0])

        self.assertEqual(out, [source_to_object, source_to_detector - source_to_object, source_to_detector/source_to_object]) 
Пример #3
0
    def test_cone2D(self):

        ag = AcquisitionGeometry.create_Cone2D(source_position=[0,-2], detector_position=[0,1])\
                                     .set_angles(self.angles_rad, angle_unit='radian')\
                                     .set_labels(['angle','horizontal'])\
                                     .set_panel(self.num_pixels_x, self.pixel_size_x)

        ig = ag.get_ImageGeometry()
        ig.voxel_num_y = 50
        ig.voxel_size_y /= 2

        angles_rad = np.array([-np.pi / 2, -np.pi, -3 * np.pi / 2])

        #2D cone
        tg_geometry, tg_angles = CIL2TIGREGeometry.getTIGREGeometry(ig, ag)
        np.testing.assert_allclose(
            tg_geometry.DSD, ag.dist_center_detector + ag.dist_source_center)
        np.testing.assert_allclose(tg_geometry.DSO, ag.dist_source_center)
        np.testing.assert_allclose(tg_angles, angles_rad)
        np.testing.assert_allclose(tg_geometry.dDetector,
                                   ag.config.panel.pixel_size[::-1])
        np.testing.assert_allclose(tg_geometry.nDetector,
                                   ag.config.panel.num_pixels[::-1])
        np.testing.assert_allclose(
            tg_geometry.sDetector,
            tg_geometry.dDetector * tg_geometry.nDetector)
        np.testing.assert_allclose(tg_geometry.rotDetector, 0)
        np.testing.assert_allclose(tg_geometry.offDetector, 0)
        np.testing.assert_allclose(tg_geometry.offOrigin, 0)

        mag = ag.magnification
        np.testing.assert_allclose(tg_geometry.nVoxel, [1, 50, 128])
        np.testing.assert_allclose(tg_geometry.dVoxel,
                                   [0.1 / mag, 0.05 / mag, 0.1 / mag])
Пример #4
0
    def setUp(self):
        #%% Setup Geometry
        voxel_num_xy = 255
        voxel_num_z = 15

        mag = 2
        src_to_obj = 50
        src_to_det = src_to_obj * mag

        pix_size = 0.2
        det_pix_x = voxel_num_xy
        det_pix_y = voxel_num_z

        num_projections = 1000
        angles = np.linspace(0, 360, num=num_projections, endpoint=False)

        self.ag = AcquisitionGeometry.create_Cone2D([0,-src_to_obj],[0,src_to_det-src_to_obj])\
                                           .set_angles(angles)\
                                           .set_panel(det_pix_x, pix_size)\
                                           .set_labels(['angle','horizontal'])

        self.ig = self.ag.get_ImageGeometry()

        self.ag3D = AcquisitionGeometry.create_Cone3D([0,-src_to_obj,0],[0,src_to_det-src_to_obj,0])\
                                     .set_angles(angles)\
                                     .set_panel((det_pix_x,det_pix_y), (pix_size,pix_size))\
                                     .set_labels(['angle','vertical','horizontal'])
        self.ig3D = self.ag3D.get_ImageGeometry()

        self.ad3D = self.ag3D.allocate('random')
        self.ig3D = self.ag3D.get_ImageGeometry()
    def test_get_ImageGeometry(self):

        AG = AcquisitionGeometry.create_Parallel2D()\
            .set_panel(num_pixels=[512,1],pixel_size=[0.1,0.1])      
        IG = AG.get_ImageGeometry()
        IG_gold = ImageGeometry(512,512,0,0.1,0.1,1,0,0,0,1)
        self.assertEqual(IG, IG_gold)

        AG = AcquisitionGeometry.create_Parallel3D()\
            .set_panel(num_pixels=[512,3],pixel_size=[0.1,0.2])
        IG = AG.get_ImageGeometry()
        IG_gold = ImageGeometry(512,512,3,0.1,0.1,0.2,0,0,0,1)
        self.assertEqual(IG, IG_gold)

        AG = AcquisitionGeometry.create_Cone2D(source_position=[0,-500], detector_position=[0.,500.])\
            .set_panel(num_pixels=[512,1],pixel_size=[0.1,0.2])
        IG = AG.get_ImageGeometry()
        IG_gold = ImageGeometry(512,512,0,0.05,0.05,1,0,0,0,1)
        self.assertEqual(IG, IG_gold)

        AG = AcquisitionGeometry.create_Cone3D(source_position=[0,-500,0], detector_position=[0.,500.,0])\
            .set_panel(num_pixels=[512,3],pixel_size=[0.1,0.2])
        IG = AG.get_ImageGeometry()
        IG_gold = ImageGeometry(512,512,3,0.05,0.05,0.1,0,0,0,1)
        self.assertEqual(IG, IG_gold)

        AG = AcquisitionGeometry.create_Cone3D(source_position=[0,-500,0], detector_position=[0.,500.,0])\
            .set_panel(num_pixels=[512,3],pixel_size=[0.1,0.2])
        IG = AG.get_ImageGeometry(resolution=0.5)
        IG_gold = ImageGeometry(256,256,2,0.025,0.025,0.05,0,0,0,1)
        self.assertEqual(IG, IG_gold)
Пример #6
0
    def __init__(self, volume_geometry, sinogram_geometry):

        super(FBP_Flexible, self).__init__(volume_geometry=volume_geometry,
                                           sinogram_geometry=sinogram_geometry)

        #convert parallel geomerty to cone with large source to object
        sino_geom_cone = sinogram_geometry.copy()
        sino_geom_cone.config.system.update_reference_frame()

        #reverse ray direction unit-vector direction and extend to inf
        cone_source = -sino_geom_cone.config.system.ray.direction * sino_geom_cone.config.panel.pixel_size[
            1] * sino_geom_cone.config.panel.num_pixels[1] * 1e6
        detector_position = sino_geom_cone.config.system.detector.position
        detector_direction_x = sino_geom_cone.config.system.detector.direction_x

        if sinogram_geometry.dimension == '2D':
            tmp = AcquisitionGeometry.create_Cone2D(cone_source,
                                                    detector_position,
                                                    detector_direction_x)
        else:
            detector_direction_y = sino_geom_cone.config.system.detector.direction_y
            tmp = AcquisitionGeometry.create_Cone3D(cone_source,
                                                    detector_position,
                                                    detector_direction_x,
                                                    detector_direction_y)

        sino_geom_cone.config.system = tmp.config.system.copy()

        self.vol_geom_astra, self.proj_geom_astra = convert_geometry_to_astra_vec(
            volume_geometry, sino_geom_cone)
    def test_get_centre_slice(self):
        AG = AcquisitionGeometry.create_Cone2D(source_position=[0, -500],
                                               detector_position=[0., 1000.])
        AG2 = AG.copy()

        AG2.config.system.get_centre_slice()
        self.assertEqual(AG.config.system, AG2.config.system)
    def test_align_reference_frame_tigre(self):

        ag = AcquisitionGeometry.create_Cone2D(source_position=[0, 50],
                                               detector_position=[0., -100.],
                                               rotation_axis_position=[5., 2])
        ag.set_panel(100)

        ag_align = ag.copy()
        ag_align.config.system.align_reference_frame('tigre')

        numpy.testing.assert_allclose(ag_align.config.system.source.position,
                                      [0, -ag.dist_source_center],
                                      atol=1E-6)
        numpy.testing.assert_allclose(
            ag_align.config.system.rotation_axis.position, [0, 0], rtol=1E-6)

        cos_theta = abs(
            ag.config.system.source.position[1] -
            ag.config.system.rotation_axis.position[1]) / ag.dist_source_center
        sin_theta = math.sin(math.acos(cos_theta))

        vec = ag.config.system.detector.position - ag.config.system.source.position
        tmp = abs(vec[1]) * cos_theta
        det_y = tmp - ag.dist_source_center
        det_x = numpy.sqrt(vec[1]**2 - tmp**2)

        numpy.testing.assert_allclose(ag_align.config.system.detector.position,
                                      [det_x, det_y],
                                      rtol=1E-6)

        dir_x = -ag.config.system.detector.direction_x[0] * cos_theta
        dir_y = ag.config.system.detector.direction_x[0] * sin_theta
        numpy.testing.assert_allclose(
            ag_align.config.system.detector.direction_x, [dir_x, dir_y],
            rtol=1E-6)
Пример #9
0
def has_gpu_tigre():

    if not has_tigre:
        return False

    has_gpu = True
    if has_nvidia_smi():
        from cil.plugins.tigre import ProjectionOperator
        from tigre.utilities.errors import TigreCudaCallError

        N = 3
        angles = np.linspace(0, np.pi, 2, dtype='float32')

        ag = AcquisitionGeometry.create_Cone2D([0,-100],[0,200])\
                                .set_angles(angles, angle_unit='radian')\
                                .set_panel(N, 0.1)\
                                .set_labels(['angle', 'horizontal'])

        ig = ag.get_ImageGeometry()

        data = ig.allocate(1)

        Op = ProjectionOperator(ig, ag)

        try:
            Op.direct(data)
            has_gpu = True
        except TigreCudaCallError:
            has_gpu = False
    else:
        has_gpu = False

    print("has_gpu_tigre\t{}".format(has_gpu))
    return has_gpu
Пример #10
0
    def setUp(self):
        #%% Setup Geometry
        voxel_num_xy = 255
        voxel_num_z = 15
        cs_ind = (voxel_num_z - 1) // 2

        mag = 2
        src_to_obj = 50
        src_to_det = src_to_obj * mag

        pix_size = 0.2
        det_pix_x = voxel_num_xy
        det_pix_y = voxel_num_z

        num_projections = 1000
        angles = np.linspace(0, 360, num=num_projections, endpoint=False)

        self.ag = AcquisitionGeometry.create_Cone2D([0,-src_to_obj],[0,src_to_det-src_to_obj])\
                                           .set_angles(angles)\
                                           .set_panel(det_pix_x, pix_size)\
                                           .set_labels(['angle','horizontal'])

        self.ig = self.ag.get_ImageGeometry()

        self.ag3D = AcquisitionGeometry.create_Cone3D([0,-src_to_obj,0],[0,src_to_det-src_to_obj,0])\
                                     .set_angles(angles)\
                                     .set_panel((det_pix_x,det_pix_y), (pix_size,pix_size))\
                                     .set_labels(['angle','vertical','horizontal'])
        self.ig3D = self.ag3D.get_ImageGeometry()

        #%% Create phantom
        kernel_size = voxel_num_xy
        kernel_radius = (kernel_size - 1) // 2
        y, x = np.ogrid[-kernel_radius:kernel_radius + 1,
                        -kernel_radius:kernel_radius + 1]

        circle1 = [5, 0, 0]  #r,x,y
        dist1 = ((x - circle1[1])**2 + (y - circle1[2])**2)**0.5

        circle2 = [5, 80, 0]  #r,x,y
        dist2 = ((x - circle2[1])**2 + (y - circle2[2])**2)**0.5

        circle3 = [25, 0, 80]  #r,x,y
        dist3 = ((x - circle3[1])**2 + (y - circle3[2])**2)**0.5

        mask1 = (dist1 - circle1[0]).clip(0, 1)
        mask2 = (dist2 - circle2[0]).clip(0, 1)
        mask3 = (dist3 - circle3[0]).clip(0, 1)
        phantom = 1 - np.logical_and(np.logical_and(mask1, mask2), mask3)

        self.golden_data = self.ig3D.allocate(0)
        for i in range(4):
            self.golden_data.fill(array=phantom, vertical=7 + i)

        self.golden_data_cs = self.golden_data.get_slice(vertical=cs_ind,
                                                         force=True)

        self.Op = ProjectionOperator(self.ig3D, self.ag3D)
        self.fp = self.Op.direct(self.golden_data)
    def test_update_reference_frame(self):
        AG = AcquisitionGeometry.create_Cone2D(source_position=[0,-500], detector_position=[0.,1000.], rotation_axis_position=[5.,2.])
        AG.config.system.update_reference_frame()

        numpy.testing.assert_allclose(AG.config.system.source.position, [-5,-502], rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.position, [-5,998], rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.direction_x, [1,0], rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.rotation_axis.position, [0,0], rtol=1E-6)
    def test_system_description(self):
        AG = AcquisitionGeometry.create_Cone2D(source_position=[0, -50],
                                               detector_position=[0, 100])
        self.assertTrue(AG.system_description == 'simple')

        AG = AcquisitionGeometry.create_Cone2D(source_position=[5, -50],
                                               detector_position=[5, 100],
                                               rotation_axis_position=[5, 0])
        self.assertTrue(AG.system_description == 'simple')

        AG = AcquisitionGeometry.create_Cone2D(source_position=[5, -50],
                                               detector_position=[0, 100])
        self.assertTrue(AG.system_description == 'advanced')

        AG = AcquisitionGeometry.create_Cone2D(source_position=[0, -50],
                                               detector_position=[0, 100],
                                               rotation_axis_position=[5, 0])
        self.assertTrue(AG.system_description == 'offset')
    def test_create_Cone2D(self):
        #default
        source_position = [0.1, -500.0]
        detector_position = [-1.3, 1000.0]

        AG = AcquisitionGeometry.create_Cone2D(source_position,
                                               detector_position)
        numpy.testing.assert_allclose(AG.config.system.source.position,
                                      source_position,
                                      rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.position,
                                      detector_position,
                                      rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.direction_x,
                                      [1, 0],
                                      rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.rotation_axis.position,
                                      [0, 0],
                                      rtol=1E-6)

        #values
        detector_direction_x = [1, 0.2]
        rotation_axis_position = [0.1, 2]

        AG = AcquisitionGeometry.create_Cone2D(source_position,
                                               detector_position,
                                               detector_direction_x,
                                               rotation_axis_position)

        detector_direction_x = numpy.asarray(detector_direction_x)
        detector_direction_x /= numpy.sqrt((detector_direction_x**2).sum())

        numpy.testing.assert_allclose(AG.config.system.source.position,
                                      source_position,
                                      rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.position,
                                      detector_position,
                                      rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.direction_x,
                                      detector_direction_x,
                                      rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.rotation_axis.position,
                                      rotation_axis_position,
                                      rtol=1E-6)
    def setUp(self):

        N = 128
        angles = np.linspace(0, 360, 50, True, dtype=np.float32)
        offset = 0.4
        ag = AcquisitionGeometry.create_Cone2D((offset, -100), (offset, 100))
        ag.set_panel(N)

        ag.set_angles(angles, angle_unit=AcquisitionGeometry.DEGREE)
        ig = ag.get_ImageGeometry()
        self.ag = ag
        self.ig = ig
        self.N = N
Пример #15
0
    def setUp(self):

        self.acq_data = dataexample.SIMULATED_CONE_BEAM_DATA.get().get_slice(
            vertical='centre')
        self.img_data = dataexample.SIMULATED_SPHERE_VOLUME.get().get_slice(
            vertical='centre')

        self.acq_data = np.log(self.acq_data)
        self.acq_data *= -1.0

        self.ig = self.img_data.geometry
        self.ag = self.acq_data.geometry

        self.ag_small = AcquisitionGeometry.create_Cone2D([0, -1000], [0, 0])
        self.ag_small.set_panel((16))
        self.ag_small.set_angles([0])
Пример #16
0
    def setUp(self):
        # Define image geometry.
        pixels_x = 128
        pixels_y = 3

        angles_deg = np.asarray([0, 90.0, 180.0], dtype='float32')
        angles_rad = angles_deg * np.pi / 180.0

        ag = AcquisitionGeometry.create_Parallel2D()\
                                .set_angles(angles_rad, angle_unit='radian')\
                                .set_labels(['angle','horizontal'])\
                                .set_panel(pixels_x, 0.1)

        ig = ag.get_ImageGeometry()


        ag_deg = AcquisitionGeometry.create_Parallel2D()\
                                    .set_angles(angles_deg, angle_unit='degree')\
                                    .set_labels(['angle','horizontal'])\
                                    .set_panel(pixels_x, 0.1)

        ag_cone = AcquisitionGeometry.create_Cone2D([0,-2], [0,1])\
                                     .set_angles(angles_rad, angle_unit='radian')\
                                     .set_labels(['angle','horizontal'])\
                                     .set_panel(pixels_x, 0.1)



        ag3 = AcquisitionGeometry.create_Parallel3D()\
                                 .set_angles(angles_rad, angle_unit='radian')\
                                 .set_labels(['vertical', 'angle','horizontal'])\
                                 .set_panel((pixels_x,pixels_y), (0.1,0.1))

        ig3 = ag3.get_ImageGeometry()

        ag3_cone = AcquisitionGeometry.create_Cone3D([0,-2,0], [0,1,0])\
                                      .set_angles(angles_rad, angle_unit='radian')\
                                      .set_labels(['vertical', 'angle','horizontal'])\
                                      .set_panel((pixels_x,pixels_y), (0.1,0.1))
        self.ig = ig
        self.ig3 = ig3
        self.ag = ag
        self.ag_deg = ag_deg
        self.ag_cone = ag_cone
        self.ag3 = ag3
        self.ag3_cone = ag3_cone
Пример #17
0
    def setUp(self):

        N = 3
        angles = np.linspace(0, np.pi, 2, dtype='float32')

        self.ag = AcquisitionGeometry.create_Cone2D([0,-100],[0,200])\
                                .set_angles(angles, angle_unit='radian')\
                                .set_panel(N, 0.1)\
                                .set_labels(['angle', 'horizontal'])

        self.ig = self.ag.get_ImageGeometry()
        self.Op = ProjectionOperator(self.ig, self.ag)

        self.ag3D = AcquisitionGeometry.create_Cone3D([0,-100,0],[0,200,0])\
                                .set_angles(angles, angle_unit='radian')\
                                .set_panel((N,N), (0.1,0.1))\
                                .set_labels(['angle', 'vertical', 'horizontal'])

        self.ig3D = self.ag3D.get_ImageGeometry()
        self.Op3D = ProjectionOperator(self.ig3D, self.ag3D)
    def test_align_reference_frame_cil(self):

        AG = AcquisitionGeometry.create_Cone2D(source_position=[0, 50],
                                               detector_position=[0., -100.],
                                               rotation_axis_position=[5., 2.])
        AG.set_panel(100)

        AG.config.system.align_reference_frame('cil')

        numpy.testing.assert_allclose(AG.config.system.source.position,
                                      [5, -48],
                                      rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.position,
                                      [5, 102],
                                      rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.direction_x,
                                      [-1, 0],
                                      rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.rotation_axis.position,
                                      [0, 0],
                                      rtol=1E-6)
Пример #19
0
    def test_cone2D(self):

        ag = AcquisitionGeometry.create_Cone2D(source_position=[0,-6], detector_position=[0,16])\
                                     .set_angles(self.angles_rad, angle_unit='radian')\
                                     .set_labels(['angle','horizontal'])\
                                     .set_panel(self.num_pixels_x, self.pixel_size_x)

        #2D cone
        tg_geometry, tg_angles = CIL2TIGREGeometry.getTIGREGeometry(
            self.ig, ag)

        for i, ang in enumerate(tg_angles):
            ang2 = -(self.angles_rad[i] + np.pi / 2)
            self.compare_angles(ang, ang2, 1e-6)

        self.assertTrue(tg_geometry.mode == 'cone')
        np.testing.assert_allclose(
            tg_geometry.DSD, ag.dist_center_detector + ag.dist_source_center)
        np.testing.assert_allclose(tg_geometry.DSO, ag.dist_source_center)
        np.testing.assert_allclose(tg_geometry.dDetector,
                                   ag.config.panel.pixel_size[::-1])
        np.testing.assert_allclose(tg_geometry.nDetector,
                                   ag.config.panel.num_pixels[::-1])
        np.testing.assert_allclose(
            tg_geometry.sDetector,
            tg_geometry.dDetector * tg_geometry.nDetector)
        np.testing.assert_allclose(tg_geometry.rotDetector, 0)
        np.testing.assert_allclose(tg_geometry.offDetector, 0)
        np.testing.assert_allclose(tg_geometry.offOrigin, 0)

        np.testing.assert_allclose(
            tg_geometry.nVoxel, [1, self.ig.voxel_num_y, self.ig.voxel_num_x])
        np.testing.assert_allclose(tg_geometry.dVoxel, [
            ag.config.panel.pixel_size[1] / ag.magnification,
            self.ig.voxel_size_y, self.ig.voxel_size_x
        ])
Пример #20
0
    def get_geometry(self):
        '''
        Parse NEXUS file and returns either ImageData or Acquisition Data 
        depending on file content
        '''

        with h5py.File(self.file_name, 'r') as dfile:

            if np.string_(dfile.attrs['creator']) != np.string_(
                    'NEXUSDataWriter.py'):
                raise Exception(
                    'We can parse only files created by NEXUSDataWriter.py')

            ds_data = dfile['entry1/tomo_entry/data/data']

            if ds_data.attrs['data_type'] == 'ImageData':

                self._geometry = ImageGeometry(
                    voxel_num_x=int(ds_data.attrs['voxel_num_x']),
                    voxel_num_y=int(ds_data.attrs['voxel_num_y']),
                    voxel_num_z=int(ds_data.attrs['voxel_num_z']),
                    voxel_size_x=ds_data.attrs['voxel_size_x'],
                    voxel_size_y=ds_data.attrs['voxel_size_y'],
                    voxel_size_z=ds_data.attrs['voxel_size_z'],
                    center_x=ds_data.attrs['center_x'],
                    center_y=ds_data.attrs['center_y'],
                    center_z=ds_data.attrs['center_z'],
                    channels=ds_data.attrs['channels'])

                if ds_data.attrs.__contains__('channel_spacing') == True:
                    self._geometry.channel_spacing = ds_data.attrs[
                        'channel_spacing']

                # read the dimension_labels from dim{}
                dimension_labels = self.read_dimension_labels(ds_data.attrs)

            else:  # AcquisitionData
                if ds_data.attrs.__contains__('dist_source_center') or dfile[
                        'entry1/tomo_entry'].__contains__(
                            'config/source/position'):
                    geom_type = 'cone'
                else:
                    geom_type = 'parallel'

                if ds_data.attrs.__contains__('num_pixels_v'):
                    num_pixels_v = ds_data.attrs.get('num_pixels_v')
                elif ds_data.attrs.__contains__('pixel_num_v'):
                    num_pixels_v = ds_data.attrs.get('pixel_num_v')
                else:
                    num_pixels_v = 1

                if num_pixels_v > 1:
                    dim = 3
                else:
                    dim = 2

                if self.is_old_file_version():
                    num_pixels_h = ds_data.attrs.get('pixel_num_h', 1)
                    num_channels = ds_data.attrs['channels']
                    ds_angles = dfile['entry1/tomo_entry/data/rotation_angle']

                    if geom_type == 'cone' and dim == 3:
                        self._geometry = AcquisitionGeometry.create_Cone3D(
                            source_position=[
                                0, -ds_data.attrs['dist_source_center'], 0
                            ],
                            detector_position=[
                                0, ds_data.attrs['dist_center_detector'], 0
                            ])
                    elif geom_type == 'cone' and dim == 2:
                        self._geometry = AcquisitionGeometry.create_Cone2D(
                            source_position=[
                                0, -ds_data.attrs['dist_source_center']
                            ],
                            detector_position=[
                                0, ds_data.attrs['dist_center_detector']
                            ])
                    elif geom_type == 'parallel' and dim == 3:
                        self._geometry = AcquisitionGeometry.create_Parallel3D(
                        )
                    elif geom_type == 'parallel' and dim == 2:
                        self._geometry = AcquisitionGeometry.create_Parallel2D(
                        )

                else:
                    num_pixels_h = ds_data.attrs.get('num_pixels_h', 1)
                    num_channels = ds_data.attrs['num_channels']
                    ds_angles = dfile['entry1/tomo_entry/config/angles']

                    rotation_axis_position = list(dfile[
                        'entry1/tomo_entry/config/rotation_axis/position'])
                    detector_position = list(
                        dfile['entry1/tomo_entry/config/detector/position'])

                    ds_detector = dfile['entry1/tomo_entry/config/detector']
                    if ds_detector.__contains__('direction_x'):
                        detector_direction_x = list(dfile[
                            'entry1/tomo_entry/config/detector/direction_x'])
                    else:
                        detector_direction_x = list(dfile[
                            'entry1/tomo_entry/config/detector/direction_row'])

                    if ds_detector.__contains__('direction_y'):
                        detector_direction_y = list(dfile[
                            'entry1/tomo_entry/config/detector/direction_y'])
                    elif ds_detector.__contains__('direction_col'):
                        detector_direction_y = list(dfile[
                            'entry1/tomo_entry/config/detector/direction_col'])

                    ds_rotate = dfile['entry1/tomo_entry/config/rotation_axis']
                    if ds_rotate.__contains__('direction'):
                        rotation_axis_direction = list(dfile[
                            'entry1/tomo_entry/config/rotation_axis/direction']
                                                       )

                    if geom_type == 'cone':
                        source_position = list(
                            dfile['entry1/tomo_entry/config/source/position'])

                        if dim == 2:
                            self._geometry = AcquisitionGeometry.create_Cone2D(
                                source_position, detector_position,
                                detector_direction_x, rotation_axis_position)
                        else:
                            self._geometry = AcquisitionGeometry.create_Cone3D(source_position,\
                                                detector_position, detector_direction_x, detector_direction_y,\
                                                rotation_axis_position, rotation_axis_direction)
                    else:
                        ray_direction = list(
                            dfile['entry1/tomo_entry/config/ray/direction'])

                        if dim == 2:
                            self._geometry = AcquisitionGeometry.create_Parallel2D(
                                ray_direction, detector_position,
                                detector_direction_x, rotation_axis_position)
                        else:
                            self._geometry = AcquisitionGeometry.create_Parallel3D(ray_direction,\
                                                detector_position, detector_direction_x, detector_direction_y,\
                                                rotation_axis_position, rotation_axis_direction)

                # for all Aquisition data
                #set angles
                angles = list(ds_angles)
                angle_unit = ds_angles.attrs.get('angle_unit', 'degree')
                initial_angle = ds_angles.attrs.get('initial_angle', 0)
                self._geometry.set_angles(angles,
                                          initial_angle=initial_angle,
                                          angle_unit=angle_unit)

                #set panel
                pixel_size_v = ds_data.attrs.get('pixel_size_v',
                                                 ds_data.attrs['pixel_size_h'])
                origin = ds_data.attrs.get('panel_origin', 'bottom-left')
                self._geometry.set_panel((num_pixels_h, num_pixels_v),\
                                        pixel_size=(ds_data.attrs['pixel_size_h'], pixel_size_v),\
                                        origin=origin)

                # set channels
                self._geometry.set_channels(num_channels)

                dimension_labels = []
                dimension_labels = self.read_dimension_labels(ds_data.attrs)

        #set labels
        self._geometry.set_labels(dimension_labels)

        return self._geometry
Пример #21
0
    def test_Slicer(self):
        
        #test parallel 2D case

        ray_direction = [0.1, 3.0]
        detector_position = [-1.3, 1000.0]
        detector_direction_row = [1.0, 0.2]
        rotation_axis_position = [0.1, 2.0]
        
        AG = AcquisitionGeometry.create_Parallel2D(ray_direction=ray_direction, 
                                                    detector_position=detector_position, 
                                                    detector_direction_x=detector_direction_row, 
                                                    rotation_axis_position=rotation_axis_position)
        
        angles = numpy.linspace(0, 360, 10, dtype=numpy.float32)
        
        AG.set_channels(num_channels=10)
        AG.set_angles(angles, initial_angle=10, angle_unit='radian')
        AG.set_panel(100, pixel_size=0.1)
        
        data = AG.allocate('random')
        
        s = Slicer(roi={'channel': (1, -2, 3),
                        'angle': (2, 9, 2),
                        'horizontal': (10, -11, 7)})
        s.set_input(data)
        data_sliced = s.process()
        
        AG_sliced = AG.clone()
        AG_sliced.set_channels(num_channels=numpy.arange(1, 10-2, 3).shape[0])
        AG_sliced.set_panel([numpy.arange(10, 100-11, 7).shape[0], 1], pixel_size=0.1)
        AG_sliced.set_angles(angles[2:9:2], initial_angle=10, angle_unit='radian')
        
        self.assertTrue(data_sliced.geometry == AG_sliced)
        numpy.testing.assert_allclose(data_sliced.as_array(), numpy.squeeze(data.as_array()[1:-2:3, 2:9:2, 10:-11:7]), rtol=1E-6)
        
        #%%
        #test parallel 3D case
        
        ray_direction = [0.1, 3.0, 0.4]
        detector_position = [-1.3, 1000.0, 2]
        detector_direction_row = [1.0, 0.2, 0.0]
        detector_direction_col = [0.0 ,0.0, 1.0]
        rotation_axis_position = [0.1, 2.0, 0.5]
        rotation_axis_direction = [0.1, 2.0, 0.5]
        
        AG = AcquisitionGeometry.create_Parallel3D(ray_direction=ray_direction, 
                                                    detector_position=detector_position, 
                                                    detector_direction_x=detector_direction_row, 
                                                    detector_direction_y=detector_direction_col,
                                                    rotation_axis_position=rotation_axis_position,
                                                    rotation_axis_direction=rotation_axis_direction)
        
        angles = numpy.linspace(0, 360, 10, dtype=numpy.float32)
        
        AG.set_channels(num_channels=10)
        AG.set_angles(angles, initial_angle=10, angle_unit='radian')
        AG.set_panel((100, 50), pixel_size=(0.1, 0.2))
        AG.dimension_labels = ['vertical',\
                                'horizontal',\
                                'angle',\
                                'channel']
        
        data = AG.allocate('random')
        
        s = Slicer(roi={'channel': (None, 1),
                        'angle': -1,
                        'horizontal': (10, None, 2),
                        'vertical': (10, 12, 1)})
        s.set_input(data)
        data_sliced = s.process()
        
        dimension_labels_sliced = list(data.geometry.dimension_labels)
        dimension_labels_sliced.remove('channel')
        dimension_labels_sliced.remove('vertical')
        
        AG_sliced = AG.clone()
        AG_sliced.dimension_labels = dimension_labels_sliced
        AG_sliced.set_channels(num_channels=1)
        AG_sliced.set_panel([numpy.arange(10, 100, 2).shape[0], numpy.arange(10, 12, 1).shape[0]], pixel_size=(0.1, 0.2))
        
        self.assertTrue(data_sliced.geometry == AG_sliced)
        numpy.testing.assert_allclose(data_sliced.as_array(), numpy.squeeze(data.as_array()[10:12:1, 10::2, :, :1]), rtol=1E-6)
        
        #%%
        #test cone 2D case
        
        source_position = [0.1, 3.0]
        detector_position = [-1.3, 1000.0]
        detector_direction_row = [1.0, 0.2]
        rotation_axis_position = [0.1, 2.0]
        
        AG = AcquisitionGeometry.create_Cone2D(source_position=source_position, 
                                                detector_position=detector_position, 
                                                detector_direction_x=detector_direction_row, 
                                                rotation_axis_position=rotation_axis_position)
        
        angles = numpy.linspace(0, 360, 10, dtype=numpy.float32)
        
        AG.set_channels(num_channels=10)
        AG.set_angles(angles, initial_angle=10, angle_unit='degree')
        AG.set_panel(100, pixel_size=0.1)
        
        data = AG.allocate('random')
        
        s = Slicer(roi={'channel': (1, None, 4),
                        'angle': (2, 9, 2),
                        'horizontal': (10, -10, 5)})
        s.set_input(data)
        data_sliced = s.process()
        
        AG_sliced = AG.clone()
        AG_sliced.set_channels(num_channels=numpy.arange(1,10,4).shape[0])
        AG_sliced.set_angles(AG.config.angles.angle_data[2:9:2], angle_unit='degree', initial_angle=10)
        AG_sliced.set_panel(numpy.arange(10,90,5).shape[0], pixel_size=0.1)
        
        self.assertTrue(data_sliced.geometry == AG_sliced)
        numpy.testing.assert_allclose(data_sliced.as_array(), numpy.squeeze(data.as_array()[1::4, 2:9:2, 10:-10:5]), rtol=1E-6)
        
        #%%
        #test cone 3D case
        
        source_position = [0.1, 3.0, 0.4]
        detector_position = [-1.3, 1000.0, 2]
        rotation_axis_position = [0.1, 2.0, 0.5]
        
        AG = AcquisitionGeometry.create_Cone3D(source_position=source_position, 
                                                detector_position=detector_position,
                                                rotation_axis_position=rotation_axis_position)
        
        angles = numpy.linspace(0, 360, 10, dtype=numpy.float32)
        
        AG.set_channels(num_channels=10)
        AG.set_angles(angles, initial_angle=10, angle_unit='radian')
        AG.set_panel((100, 50), pixel_size=(0.1, 0.2))
        AG.dimension_labels = ['vertical',\
                                'horizontal',\
                                'angle',\
                                'channel']
        
        data = AG.allocate('random')
        
        s = Slicer(roi={'channel': (None, 1),
                        'angle': -1,
                        'horizontal': (10, None, 2),
                        'vertical': (10, -10, 2)})
        s.set_input(data)
        data_sliced = s.process()
        
        dimension_labels_sliced = list(data.geometry.dimension_labels)
        dimension_labels_sliced.remove('channel')
        
        AG_sliced = AG.clone()
        AG_sliced.dimension_labels = dimension_labels_sliced
        AG_sliced.set_channels(num_channels=1)
        AG_sliced.set_panel([numpy.arange(10, 100, 2).shape[0], numpy.arange(10, 50-10, 2).shape[0]], pixel_size=(0.1, 0.2))
        self.assertTrue(data_sliced.geometry == AG_sliced)
        
        numpy.testing.assert_allclose(data_sliced.as_array(), numpy.squeeze(data.as_array()[10:-10:2, 10::2, :, :1]), rtol=1E-6)
        
        #%% test cone 3D - central slice
        s = Slicer(roi={'channel': (None, 1),
                        'angle': -1,
                        'horizontal': (10, None, 2),
                        'vertical': (25, 26)})
        s.set_input(data)
        data_sliced = s.process()
        
        dimension_labels_sliced = list(data.geometry.dimension_labels)
        dimension_labels_sliced.remove('channel')
        dimension_labels_sliced.remove('vertical')
        
        AG_sliced = AG.subset(vertical='centre')
        AG_sliced = AG_sliced.subset(channel=1)
        AG_sliced.config.panel.num_pixels[0] = numpy.arange(10,100,2).shape[0]
        
        self.assertTrue(data_sliced.geometry == AG_sliced)
        numpy.testing.assert_allclose(data_sliced.as_array(), numpy.squeeze(data.as_array()[25:26, 10::2, :, :1]), rtol=1E-6)
        
        
        #%% test ImageData
        IG = ImageGeometry(voxel_num_x=20,
                            voxel_num_y=30,
                            voxel_num_z=12,
                            voxel_size_x=0.1,
                            voxel_size_y=0.2,
                            voxel_size_z=0.3,
                            channels=10,
                            center_x=0.2,
                            center_y=0.4,
                            center_z=0.6,
                            dimension_labels = ['vertical',\
                                                'channel',\
                                                'horizontal_y',\
                                                'horizontal_x'])
        
        data = IG.allocate('random')
        
        s = Slicer(roi={'channel': (None, None, 2),
                        'horizontal_x': -1,
                        'horizontal_y': (10, None, 2),
                        'vertical': (5, None, 3)})
        s.set_input(data)
        data_sliced = s.process()
        
        IG_sliced = IG.copy()
        IG_sliced.voxel_num_y = numpy.arange(10, 30, 2).shape[0]
        IG_sliced.voxel_num_z = numpy.arange(5, 12, 3).shape[0]
        IG_sliced.channels = numpy.arange(0, 10, 2).shape[0]
        
        self.assertTrue(data_sliced.geometry == IG_sliced)
        numpy.testing.assert_allclose(data_sliced.as_array(), numpy.squeeze(data.as_array()[5:12:3, ::2, 10:30:2, :]), rtol=1E-6)
Пример #22
0
    def set_up(self,
               file_name=None,
               roi={
                   'angle': -1,
                   'horizontal': -1,
                   'vertical': -1
               },
               normalise=True,
               mode='bin',
               fliplr=False,
               **kwargs):

        self.file_name = file_name
        self.roi = roi
        self.normalise = normalise
        self.mode = mode
        self.fliplr = fliplr

        if 'normalize' in kwargs.keys():
            self.normalise = kwargs.get('normalize', True)
            warnings.warn(
                "'normalize' has now been deprecated. Please use 'normalise' instead."
            )

        if self.file_name == None:
            raise Exception('Path to xtek file is required.')

        # check if xtek file exists
        if not (os.path.isfile(self.file_name)):
            raise Exception('File\n {}\n does not exist.'.format(
                self.file_name))

        # check labels
        for key in self.roi.keys():
            if key not in ['angle', 'horizontal', 'vertical']:
                raise Exception(
                    "Wrong label. One of ollowing is expected: angle, horizontal, vertical"
                )

        roi = self.roi.copy()

        if 'angle' not in roi.keys():
            roi['angle'] = -1

        if 'horizontal' not in roi.keys():
            roi['horizontal'] = -1

        if 'vertical' not in roi.keys():
            roi['vertical'] = -1

        # parse xtek file
        with open(self.file_name, 'r') as f:
            content = f.readlines()

        content = [x.strip() for x in content]

        #initialise parameters
        detector_offset_h = 0
        detector_offset_v = 0
        object_offset_x = 0
        object_roll_deg = 0

        for line in content:
            # filename of TIFF files
            if line.startswith("Name"):
                self._experiment_name = line.split('=')[1]
            # number of projections
            elif line.startswith("Projections"):
                num_projections = int(line.split('=')[1])
            # white level - used for normalization
            elif line.startswith("WhiteLevel"):
                self._white_level = float(line.split('=')[1])
            # number of pixels along Y axis
            elif line.startswith("DetectorPixelsY"):
                pixel_num_v_0 = int(line.split('=')[1])
            # number of pixels along X axis
            elif line.startswith("DetectorPixelsX"):
                pixel_num_h_0 = int(line.split('=')[1])
            # pixel size along X axis
            elif line.startswith("DetectorPixelSizeX"):
                pixel_size_h_0 = float(line.split('=')[1])
            # pixel size along Y axis
            elif line.startswith("DetectorPixelSizeY"):
                pixel_size_v_0 = float(line.split('=')[1])
            # source to center of rotation distance
            elif line.startswith("SrcToObject"):
                source_to_origin = float(line.split('=')[1])
            # source to detector distance
            elif line.startswith("SrcToDetector"):
                source_to_det = float(line.split('=')[1])
            # initial angular position of a rotation stage
            elif line.startswith("InitialAngle"):
                initial_angle = float(line.split('=')[1])
            # angular increment (in degrees)
            elif line.startswith("AngularStep"):
                angular_step = float(line.split('=')[1])
            # detector offset x in units
            elif line.startswith("DetectorOffsetX"):
                detector_offset_h = float(line.split('=')[1])
            # detector offset y in units
            elif line.startswith("DetectorOffsetY"):
                detector_offset_v = float(line.split('=')[1])
            # object offset x in units
            elif line.startswith("ObjectOffsetX"):
                object_offset_x = float(line.split('=')[1])
            # object roll in degrees
            elif line.startswith("ObjectRoll"):
                object_roll_deg = float(line.split('=')[1])

        self._roi_par = [[0, num_projections, 1], [0, pixel_num_v_0, 1],
                         [0, pixel_num_h_0, 1]]

        for key in roi.keys():
            if key == 'angle':
                idx = 0
            elif key == 'vertical':
                idx = 1
            elif key == 'horizontal':
                idx = 2
            if roi[key] != -1:
                for i in range(2):
                    if roi[key][i] != None:
                        if roi[key][i] >= 0:
                            self._roi_par[idx][i] = roi[key][i]
                        else:
                            self._roi_par[idx][
                                i] = self._roi_par[idx][1] + roi[key][i]
                if len(roi[key]) > 2:
                    if roi[key][2] != None:
                        if roi[key][2] > 0:
                            self._roi_par[idx][2] = roi[key][2]
                        else:
                            raise Exception("Negative step is not allowed")

        if self.mode == 'bin':
            # calculate number of pixels and pixel size
            pixel_num_v = (self._roi_par[1][1] -
                           self._roi_par[1][0]) // self._roi_par[1][2]
            pixel_num_h = (self._roi_par[2][1] -
                           self._roi_par[2][0]) // self._roi_par[2][2]
            pixel_size_v = pixel_size_v_0 * self._roi_par[1][2]
            pixel_size_h = pixel_size_h_0 * self._roi_par[2][2]
        else:  # slice
            pixel_num_v = numpy.int(
                numpy.ceil((self._roi_par[1][1] - self._roi_par[1][0]) /
                           self._roi_par[1][2]))
            pixel_num_h = numpy.int(
                numpy.ceil((self._roi_par[2][1] - self._roi_par[2][0]) /
                           self._roi_par[2][2]))
            pixel_size_v = pixel_size_v_0
            pixel_size_h = pixel_size_h_0

        det_start_0 = -(pixel_num_h_0 / 2)
        det_start = det_start_0 + self._roi_par[2][0]
        det_end = det_start + pixel_num_h * self._roi_par[2][2]
        det_pos_h = (det_start +
                     det_end) * 0.5 * pixel_size_h_0 + detector_offset_h

        det_start_0 = -(pixel_num_v_0 / 2)
        det_start = det_start_0 + self._roi_par[1][0]
        det_end = det_start + pixel_num_v * self._roi_par[1][2]
        det_pos_v = (det_start +
                     det_end) * 0.5 * pixel_size_v_0 + detector_offset_v

        #angles from xtek.ct ignore *.ang and _ctdata.txt as not correct
        angles = numpy.asarray(
            [angular_step * proj for proj in range(num_projections)],
            dtype=numpy.float32)

        if self.mode == 'bin':
            n_elem = (self._roi_par[0][1] -
                      self._roi_par[0][0]) // self._roi_par[0][2]
            shape = (n_elem, self._roi_par[0][2])
            angles = angles[self._roi_par[0][0]:(
                self._roi_par[0][0] +
                n_elem * self._roi_par[0][2])].reshape(shape).mean(1)
        else:
            angles = angles[slice(self._roi_par[0][0], self._roi_par[0][1],
                                  self._roi_par[0][2])]

        #convert NikonGeometry to CIL geometry
        angles = -angles - initial_angle + 180

        object_roll_deg * numpy.pi / 180.
        rotate_axis_x = numpy.tan(object_roll_deg * numpy.pi / 180.)

        if self.fliplr:
            origin = 'top-left'
        else:
            origin = 'top-right'

        if pixel_num_v == 1 and (self._roi_par[1][0] + self._roi_par[1][1]
                                 ) // 2 == pixel_num_v_0 // 2:
            self._ag = AcquisitionGeometry.create_Cone2D(
                source_position=[0, -source_to_origin],
                rotation_axis_position=[-object_offset_x, 0],
                detector_position=[
                    -det_pos_h, source_to_det - source_to_origin
                ])
            self._ag.set_angles(angles, angle_unit='degree')

            self._ag.set_panel(pixel_num_h,
                               pixel_size=pixel_size_h,
                               origin=origin)

            self._ag.set_labels(labels=['angle', 'horizontal'])
        else:
            self._ag = AcquisitionGeometry.create_Cone3D(
                source_position=[0, -source_to_origin, 0],
                rotation_axis_position=[-object_offset_x, 0, 0],
                rotation_axis_direction=[rotate_axis_x, 0, 1],
                detector_position=[
                    -det_pos_h, source_to_det - source_to_origin, det_pos_v
                ])
            self._ag.set_angles(angles, angle_unit='degree')

            self._ag.set_panel((pixel_num_h, pixel_num_v),
                               pixel_size=(pixel_size_h, pixel_size_v),
                               origin=origin)

            self._ag.set_labels(labels=['angle', 'vertical', 'horizontal'])
Пример #23
0
    #%% Read in data
    # path = "/media/scratch/Data/SophiaBeads/SophiaBeads_512_averaged/SophiaBeads_512_averaged.xtekct"


    path = "/mnt/data/CCPi/Dataset/SophiaBeads_64_averaged/CentreSlice"

    # Create a 2D fan beam Geometry

    source_position=(0, -80.6392412185669)
    detector_position=(0, 1007.006 - source_position[1])
    angles = np.asarray([- 5.71428571428571 * i for i in range(63)], dtype=np.float32) * np.pi / 180.
    panel = 2000
    panel_pixel_size = 0.2

    ag_cs =  AcquisitionGeometry.create_Cone2D(source_position, detector_position)\
                                .set_angles(angles, angle_unit='radian')\
                                .set_panel(panel, pixel_size=panel_pixel_size, origin='top-right')

    #%%
    reader = TIFFStackReader()
    reader.set_up(file_name=os.path.join(path, 'Sinograms', 'SophiaBeads_64_averaged_0001.tif'))
    data = reader.read_as_AcquisitionData(ag_cs)

    white_level = 60000.0

    data_raw = data.subset(dimensions=['angle','horizontal'])
    data_raw = data / white_level

    # negative log
    ldata = data_raw.log()
    ldata *= -1