def test_get_centre_slice(self):
        #returns the 2D version
        AG = AcquisitionGeometry.create_Cone3D(source_position=[0, -500, 0],
                                               detector_position=[0, 1000, 0])
        AG2 = AcquisitionGeometry.create_Cone2D(source_position=[0, -500],
                                                detector_position=[0, 1000])
        cs = AG.config.system.get_centre_slice()
        self.assertEqual(cs, AG2.config.system)

        #returns the 2D version
        AG = AcquisitionGeometry.create_Cone3D(
            source_position=[0, -500, 0],
            detector_position=[0, 1000, 0],
            rotation_axis_direction=[-1, 0, 1],
            detector_direction_x=[1, 0, 1],
            detector_direction_y=[-1, 0, 1])
        AG2 = AcquisitionGeometry.create_Cone2D(source_position=[0, -500],
                                                detector_position=[0, 1000])
        cs = AG.config.system.get_centre_slice()
        self.assertEqual(cs, AG2.config.system)

        #raise error if cannot extract a cnetre slice
        AG = AcquisitionGeometry.create_Cone3D(
            source_position=[0, -500, 0],
            detector_position=[0, 1000, 0],
            rotation_axis_direction=[1, 0, 1])
        with self.assertRaises(ValueError):
            cs = AG.config.system.get_centre_slice()

        AG = AcquisitionGeometry.create_Cone3D(source_position=[0, -500, 0],
                                               detector_position=[0, 1000, 0],
                                               detector_direction_x=[1, 0, 1],
                                               detector_direction_y=[-1, 0, 1])
        with self.assertRaises(ValueError):
            cs = AG.config.system.get_centre_slice()
    def test_system_description(self):
        AG = AcquisitionGeometry.create_Cone3D(source_position=[0, -50, 0],
                                               detector_position=[0, 100, 0])
        self.assertTrue(AG.system_description == 'simple')

        AG = AcquisitionGeometry.create_Cone3D(source_position=[-50, 0, 0],
                                               detector_position=[100, 0, 0],
                                               detector_direction_x=[0, -1, 0])
        self.assertTrue(AG.system_description == 'simple')

        AG = AcquisitionGeometry.create_Cone3D(
            source_position=[5, -50, 0],
            detector_position=[5, 100, 0],
            rotation_axis_position=[5, 0, 0])
        self.assertTrue(AG.system_description == 'simple')

        AG = AcquisitionGeometry.create_Cone3D(source_position=[5, -50, 0],
                                               detector_position=[0, 100, 0])
        self.assertTrue(AG.system_description == 'advanced')

        AG = AcquisitionGeometry.create_Cone3D(
            source_position=[0, -50, 0],
            detector_position=[0, 100, 0],
            rotation_axis_position=[5, 0, 0])
        self.assertTrue(AG.system_description == 'offset')
    def test_get_ImageGeometry(self):

        AG = AcquisitionGeometry.create_Parallel2D()\
            .set_panel(num_pixels=[512,1],pixel_size=[0.1,0.1])      
        IG = AG.get_ImageGeometry()
        IG_gold = ImageGeometry(512,512,0,0.1,0.1,1,0,0,0,1)
        self.assertEqual(IG, IG_gold)

        AG = AcquisitionGeometry.create_Parallel3D()\
            .set_panel(num_pixels=[512,3],pixel_size=[0.1,0.2])
        IG = AG.get_ImageGeometry()
        IG_gold = ImageGeometry(512,512,3,0.1,0.1,0.2,0,0,0,1)
        self.assertEqual(IG, IG_gold)

        AG = AcquisitionGeometry.create_Cone2D(source_position=[0,-500], detector_position=[0.,500.])\
            .set_panel(num_pixels=[512,1],pixel_size=[0.1,0.2])
        IG = AG.get_ImageGeometry()
        IG_gold = ImageGeometry(512,512,0,0.05,0.05,1,0,0,0,1)
        self.assertEqual(IG, IG_gold)

        AG = AcquisitionGeometry.create_Cone3D(source_position=[0,-500,0], detector_position=[0.,500.,0])\
            .set_panel(num_pixels=[512,3],pixel_size=[0.1,0.2])
        IG = AG.get_ImageGeometry()
        IG_gold = ImageGeometry(512,512,3,0.05,0.05,0.1,0,0,0,1)
        self.assertEqual(IG, IG_gold)

        AG = AcquisitionGeometry.create_Cone3D(source_position=[0,-500,0], detector_position=[0.,500.,0])\
            .set_panel(num_pixels=[512,3],pixel_size=[0.1,0.2])
        IG = AG.get_ImageGeometry(resolution=0.5)
        IG_gold = ImageGeometry(256,256,2,0.025,0.025,0.05,0,0,0,1)
        self.assertEqual(IG, IG_gold)
Esempio n. 4
0
    def test_cone3D_simple(self):
        ag = AcquisitionGeometry.create_Cone3D(source_position=[0,-2,0], detector_position=[0,1,0])\
                                      .set_angles(self.angles_deg, angle_unit='degree')\
                                      .set_labels(['vertical', 'angle','horizontal'])\
                                      .set_panel((self.num_pixels_x,self.num_pixels_y), (self.pixel_size_x,self.pixel_size_y))

        ig = ag.get_ImageGeometry()
        ig.voxel_num_y = 50
        ig.voxel_size_y /= 2

        angles_rad = np.array([-np.pi / 2, -np.pi, -3 * np.pi / 2])

        tg_geometry, tg_angles = CIL2TIGREGeometry.getTIGREGeometry(ig, ag)

        np.testing.assert_allclose(
            tg_geometry.DSD, ag.dist_center_detector + ag.dist_source_center)
        np.testing.assert_allclose(tg_geometry.DSO, ag.dist_source_center)
        np.testing.assert_allclose(tg_angles, angles_rad)
        np.testing.assert_allclose(tg_geometry.dDetector,
                                   ag.config.panel.pixel_size[::-1])
        np.testing.assert_allclose(tg_geometry.nDetector,
                                   ag.config.panel.num_pixels[::-1])
        np.testing.assert_allclose(
            tg_geometry.sDetector,
            tg_geometry.dDetector * tg_geometry.nDetector)
        np.testing.assert_allclose(tg_geometry.rotDetector, 0)
        np.testing.assert_allclose(tg_geometry.offDetector, 0)
        np.testing.assert_allclose(tg_geometry.offOrigin, 0)

        mag = ag.magnification
        np.testing.assert_allclose(tg_geometry.nVoxel, [3, 50, 128])
        np.testing.assert_allclose(tg_geometry.dVoxel,
                                   [0.2 / mag, 0.05 / mag, 0.1 / mag])
Esempio n. 5
0
    def test_AcquisitionData(self):
        ag = AcquisitionGeometry.create_Parallel3D().set_panel(
            [5, 4]).set_angles([0, 1, 2]).set_channels(2).set_labels(
                ['channel', 'angle', 'vertical', 'horizontal'])
        data = ag.allocate(None)
        data_new = data.get_slice(angle=2)
        self.assertEquals(data_new.shape, (2, 4, 5))
        self.assertEquals(data_new.geometry.dimension_labels,
                          ('channel', 'vertical', 'horizontal'))

        #won't return a geometry for un-reconstructable slice
        ag = AcquisitionGeometry.create_Cone3D(
            [0, -200, 0], [0, 200, 0]).set_panel([5, 4]).set_angles(
                [0, 1, 2]).set_channels(2).set_labels(
                    ['channel', 'angle', 'vertical', 'horizontal'])
        data = ag.allocate('random')
        data_new = data.get_slice(vertical=1, force=True)
        self.assertEquals(data_new.shape, (2, 3, 5))
        self.assertTrue(isinstance(data_new, (DataContainer)))
        self.assertIsNone(data_new.geometry)
        self.assertEquals(data_new.dimension_labels,
                          ('channel', 'angle', 'horizontal'))

        #if 'centre' is between pixels interpolates
        data_new = data.get_slice(vertical='centre')
        self.assertEquals(data_new.shape, (2, 3, 5))
        self.assertEquals(data_new.geometry.dimension_labels,
                          ('channel', 'angle', 'horizontal'))
        numpy.testing.assert_allclose(
            data_new.array,
            (data.array[:, :, 1, :] + data.array[:, :, 2, :]) / 2)
Esempio n. 6
0
    def test_filtering(self):
        ag = AcquisitionGeometry.create_Cone3D([0,-1,0],[0,2,0])\
            .set_panel([64,3],[0.1,0.1])\
            .set_angles([0,90])

        ad = ag.allocate('random', seed=0)

        reconstructor = FDK(ad)
        out1 = ad.copy()
        reconstructor._pre_filtering(out1)

        #by hand
        filter = reconstructor.get_filter_array()
        reconstructor._calculate_weights(ag)
        pad0 = (len(filter) - ag.pixel_num_h) // 2
        pad1 = len(filter) - ag.pixel_num_h - pad0

        out2 = ad.array.copy()
        out2 *= reconstructor._weights
        for i in range(2):
            proj_padded = np.zeros((ag.pixel_num_v, len(filter)))
            proj_padded[:, pad0:-pad1] = out2[i]
            filtered_proj = fft(proj_padded, axis=-1)
            filtered_proj *= filter
            filtered_proj = ifft(filtered_proj, axis=-1)
            out2[i] = np.real(filtered_proj)[:, pad0:-pad1]

        diff = (out1 - out2).abs().max()
        self.assertLess(diff, 1e-5)
Esempio n. 7
0
    def test_weights(self):
        ag = AcquisitionGeometry.create_Cone3D([0,-1,0],[0,2,0])\
            .set_panel([3,4],[0.1,0.2])\
            .set_angles([0,90])
        ad = ag.allocate(0)

        reconstructor = FDK(ad)
        reconstructor._calculate_weights(ag)
        weights = reconstructor._weights

        scaling = 7.5 * np.pi
        weights_new = np.ones_like(weights)

        det_size_x = ag.pixel_size_h * ag.pixel_num_h
        det_size_y = ag.pixel_size_v * ag.pixel_num_v

        ray_length_z = 3
        for j in range(4):
            ray_length_y = -det_size_y / 2 + ag.pixel_size_v * (j + 0.5)
            for i in range(3):
                ray_length_x = -det_size_x / 2 + ag.pixel_size_h * (i + 0.5)
                ray_length = (ray_length_x**2 + ray_length_y**2 +
                              ray_length_z**2)**0.5
                weights_new[j, i] = scaling * ray_length_z / ray_length

        diff = np.max(np.abs(weights - weights_new))
        self.assertLess(diff, 1e-5)
    def __init__(self, volume_geometry, sinogram_geometry):

        super(FBP_Flexible, self).__init__(volume_geometry=volume_geometry,
                                           sinogram_geometry=sinogram_geometry)

        #convert parallel geomerty to cone with large source to object
        sino_geom_cone = sinogram_geometry.copy()
        sino_geom_cone.config.system.update_reference_frame()

        #reverse ray direction unit-vector direction and extend to inf
        cone_source = -sino_geom_cone.config.system.ray.direction * sino_geom_cone.config.panel.pixel_size[
            1] * sino_geom_cone.config.panel.num_pixels[1] * 1e6
        detector_position = sino_geom_cone.config.system.detector.position
        detector_direction_x = sino_geom_cone.config.system.detector.direction_x

        if sinogram_geometry.dimension == '2D':
            tmp = AcquisitionGeometry.create_Cone2D(cone_source,
                                                    detector_position,
                                                    detector_direction_x)
        else:
            detector_direction_y = sino_geom_cone.config.system.detector.direction_y
            tmp = AcquisitionGeometry.create_Cone3D(cone_source,
                                                    detector_position,
                                                    detector_direction_x,
                                                    detector_direction_y)

        sino_geom_cone.config.system = tmp.config.system.copy()

        self.vol_geom_astra, self.proj_geom_astra = convert_geometry_to_astra_vec(
            volume_geometry, sino_geom_cone)
Esempio n. 9
0
    def setUp(self):
        #%% Setup Geometry
        voxel_num_xy = 255
        voxel_num_z = 15

        mag = 2
        src_to_obj = 50
        src_to_det = src_to_obj * mag

        pix_size = 0.2
        det_pix_x = voxel_num_xy
        det_pix_y = voxel_num_z

        num_projections = 1000
        angles = np.linspace(0, 360, num=num_projections, endpoint=False)

        self.ag = AcquisitionGeometry.create_Cone2D([0,-src_to_obj],[0,src_to_det-src_to_obj])\
                                           .set_angles(angles)\
                                           .set_panel(det_pix_x, pix_size)\
                                           .set_labels(['angle','horizontal'])

        self.ig = self.ag.get_ImageGeometry()

        self.ag3D = AcquisitionGeometry.create_Cone3D([0,-src_to_obj,0],[0,src_to_det-src_to_obj,0])\
                                     .set_angles(angles)\
                                     .set_panel((det_pix_x,det_pix_y), (pix_size,pix_size))\
                                     .set_labels(['angle','vertical','horizontal'])
        self.ig3D = self.ag3D.get_ImageGeometry()

        self.ad3D = self.ag3D.allocate('random')
        self.ig3D = self.ag3D.get_ImageGeometry()
    def test_align_reference_frame_tigre(self):
        AG = AcquisitionGeometry.create_Cone3D(
            source_position=[5, 500, 0],
            detector_position=[5., -1000., 0],
            rotation_axis_position=[5, 0, 0],
            rotation_axis_direction=[0, 0, -1])
        AG.config.system.align_reference_frame('tigre')

        numpy.testing.assert_allclose(AG.config.system.source.position,
                                      [0, -500, 0],
                                      rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.position,
                                      [0, 1000, 0],
                                      rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.direction_x,
                                      [1, 0, 0],
                                      rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.direction_y,
                                      [0, 0, -1],
                                      rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.rotation_axis.position,
                                      [0, 0, 0],
                                      rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.rotation_axis.direction,
                                      [0, 0, 1],
                                      rtol=1E-6)
Esempio n. 11
0
    def setUp(self):
        #%% Setup Geometry
        voxel_num_xy = 255
        voxel_num_z = 15
        cs_ind = (voxel_num_z - 1) // 2

        mag = 2
        src_to_obj = 50
        src_to_det = src_to_obj * mag

        pix_size = 0.2
        det_pix_x = voxel_num_xy
        det_pix_y = voxel_num_z

        num_projections = 1000
        angles = np.linspace(0, 360, num=num_projections, endpoint=False)

        self.ag = AcquisitionGeometry.create_Cone2D([0,-src_to_obj],[0,src_to_det-src_to_obj])\
                                           .set_angles(angles)\
                                           .set_panel(det_pix_x, pix_size)\
                                           .set_labels(['angle','horizontal'])

        self.ig = self.ag.get_ImageGeometry()

        self.ag3D = AcquisitionGeometry.create_Cone3D([0,-src_to_obj,0],[0,src_to_det-src_to_obj,0])\
                                     .set_angles(angles)\
                                     .set_panel((det_pix_x,det_pix_y), (pix_size,pix_size))\
                                     .set_labels(['angle','vertical','horizontal'])
        self.ig3D = self.ag3D.get_ImageGeometry()

        #%% Create phantom
        kernel_size = voxel_num_xy
        kernel_radius = (kernel_size - 1) // 2
        y, x = np.ogrid[-kernel_radius:kernel_radius + 1,
                        -kernel_radius:kernel_radius + 1]

        circle1 = [5, 0, 0]  #r,x,y
        dist1 = ((x - circle1[1])**2 + (y - circle1[2])**2)**0.5

        circle2 = [5, 80, 0]  #r,x,y
        dist2 = ((x - circle2[1])**2 + (y - circle2[2])**2)**0.5

        circle3 = [25, 0, 80]  #r,x,y
        dist3 = ((x - circle3[1])**2 + (y - circle3[2])**2)**0.5

        mask1 = (dist1 - circle1[0]).clip(0, 1)
        mask2 = (dist2 - circle2[0]).clip(0, 1)
        mask3 = (dist3 - circle3[0]).clip(0, 1)
        phantom = 1 - np.logical_and(np.logical_and(mask1, mask2), mask3)

        self.golden_data = self.ig3D.allocate(0)
        for i in range(4):
            self.golden_data.fill(array=phantom, vertical=7 + i)

        self.golden_data_cs = self.golden_data.get_slice(vertical=cs_ind,
                                                         force=True)

        self.Op = ProjectionOperator(self.ig3D, self.ag3D)
        self.fp = self.Op.direct(self.golden_data)
    def test_update_reference_frame(self):
        #translate origin
        AG = AcquisitionGeometry.create_Cone3D(source_position=[0,-500,0],detector_position=[0.,1000.,0], rotation_axis_position=[5.,2.,4.])
        AG.config.system.update_reference_frame()
        numpy.testing.assert_allclose(AG.config.system.source.position, [-5,-502,-4], rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.position, [-5,998,-4], rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.direction_x, [1,0,0], rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.rotation_axis.position, [0,0,0], rtol=1E-6)

        #align Z axis with rotate axis
        AG = AcquisitionGeometry.create_Cone3D(source_position=[0,-500,0],detector_position=[0.,1000.,0], rotation_axis_position=[0.,0.,0.], rotation_axis_direction=[0,1,0])
        AG.config.system.update_reference_frame()
        numpy.testing.assert_allclose(AG.config.system.source.position, [0,0,-500], rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.position, [0,0,1000], rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.direction_x, [1,0,0], rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.direction_y, [0,-1,0], rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.rotation_axis.position, [0,0,0], rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.rotation_axis.direction, [0,0,1], rtol=1E-6)
Esempio n. 13
0
    def read(self):
        '''
        Reads projections and return AcquisitionData container
        '''
        # the import will raise an ImportError if dxchange is not installed
        import dxchange
        # Load projections and most metadata
        data, metadata = dxchange.read_txrm(self.txrm_file)
        number_of_images = data.shape[0]
        
        # Read source to center and detector to center distances
        with olefile.OleFileIO(self.txrm_file) as ole:
            StoRADistance = dxchange.reader._read_ole_arr(ole, \
                    'ImageInfo/StoRADistance', "<{0}f".format(number_of_images))
            DtoRADistance = dxchange.reader._read_ole_arr(ole, \
                    'ImageInfo/DtoRADistance', "<{0}f".format(number_of_images))
            
        dist_source_center   = np.abs( StoRADistance[0] )
        dist_center_detector = np.abs( DtoRADistance[0] )
        
        # normalise data by flatfield
        data = data / metadata['reference']
        
        # circularly shift data by rounded x and y shifts
        for k in range(number_of_images):
            data[k,:,:] = np.roll(data[k,:,:], \
                (int(metadata['x-shifts'][k]),int(metadata['y-shifts'][k])), \
                axis=(1,0))
        
        # Pixelsize loaded in metadata is really the voxel size in um.
        # We can compute the effective detector pixel size as the geometric
        # magnification times the voxel size.
        d_pixel_size = ((dist_source_center+dist_center_detector)/dist_source_center)*metadata['pixel_size']
        
        # convert angles to requested unit measure, Zeiss stores in radians
        if self.angle_unit == AcquisitionGeometry.DEGREE:
            angles = np.degrees(metadata['thetas'])
        else:
            angles = np.asarray(metadata['thetas'])

        self._ag = AcquisitionGeometry.create_Cone3D(
            [0,-dist_source_center, 0] , [ 0, dist_center_detector, 0] \
            ) \
                .set_panel([metadata['image_width'], metadata['image_height']],\
                    pixel_size=[d_pixel_size/1000,d_pixel_size/1000])\
                .set_angles(angles, angle_unit=self.angle_unit)
        self._ag.dimension_labels =  ['angle', 'vertical', 'horizontal']
                

        acq_data = AcquisitionData(array=data, deep_copy=False, geometry=self._ag.copy(),\
            suppress_warning=True)
        self._metadata = metadata
        return acq_data
Esempio n. 14
0
    def setUp(self): 
        #%% Setup Geometry
        voxel_num_xy = 255
        voxel_num_z = 15
        self.cs_ind = (voxel_num_z-1)//2


        src_to_obj = 500
        src_to_det = src_to_obj

        pix_size = 0.2
        det_pix_x = voxel_num_xy
        det_pix_y = voxel_num_z

        num_projections = 360
        angles = np.linspace(0, 2*np.pi, num=num_projections, endpoint=False)

        self.ag_cone = AcquisitionGeometry.create_Cone3D([0,-src_to_obj,0],[0,src_to_det-src_to_obj,0])\
                                     .set_angles(angles, angle_unit='radian')\
                                     .set_panel((det_pix_x,det_pix_y), (pix_size,pix_size))\
                                     .set_labels(['vertical','angle','horizontal'])

        self.ag_parallel = AcquisitionGeometry.create_Parallel3D()\
                                     .set_angles(angles, angle_unit='radian')\
                                     .set_panel((det_pix_x,det_pix_y), (pix_size,pix_size))\
                                     .set_labels(['vertical','angle','horizontal'])

        self.ig_3D = self.ag_parallel.get_ImageGeometry()

        #%% Create phantom
        kernel_size = voxel_num_xy
        kernel_radius = (kernel_size - 1) // 2
        y, x = np.ogrid[-kernel_radius:kernel_radius+1, -kernel_radius:kernel_radius+1]

        circle1 = [5,0,0] #r,x,y
        dist1 = ((x - circle1[1])**2 + (y - circle1[2])**2)**0.5

        circle2 = [5,100,0] #r,x,y
        dist2 = ((x - circle2[1])**2 + (y - circle2[2])**2)**0.5

        circle3 = [25,0,100] #r,x,y
        dist3 = ((x - circle3[1])**2 + (y - circle3[2])**2)**0.5

        mask1 =(dist1 - circle1[0]).clip(0,1) 
        mask2 =(dist2 - circle2[0]).clip(0,1) 
        mask3 =(dist3 - circle3[0]).clip(0,1) 
        phantom = 1 - np.logical_and(np.logical_and(mask1, mask2),mask3)

        self.golden_data = self.ig_3D.allocate(0)
        for i in range(4):
            self.golden_data.fill(array=phantom, vertical=7+i)

        self.golden_data_cs = self.golden_data.subset(vertical=self.cs_ind)
Esempio n. 15
0
    def setUp(self):
        self.ig = ImageGeometry(2, 3, 4, channels=5)
        angles = numpy.asarray([90., 0., -90.], dtype=numpy.float32)

        self.ag_cone = AcquisitionGeometry.create_Cone3D([0,-500,0],[0,500,0])\
                                    .set_panel((20,2))\
                                    .set_angles(angles)\
                                    .set_channels(4)

        self.ag = AcquisitionGeometry.create_Parallel3D()\
                                    .set_angles(angles)\
                                    .set_channels(4)\
                                    .set_panel((20,2))
Esempio n. 16
0
    def test_cone3D_advanced(self):

        ag = AcquisitionGeometry.create_Cone3D(source_position=[0,-10,0], detector_position=[0,10,0], rotation_axis_position=[0,0, 0],rotation_axis_direction=[0,-1,1])\
                                      .set_angles(self.angles_deg, angle_unit='degree')\
                                      .set_labels(['vertical', 'angle','horizontal'])\
                                      .set_panel((self.num_pixels_x,self.num_pixels_y), (self.pixel_size_x,self.pixel_size_y))

        self.assertTrue(ag.system_description == 'advanced')

        tg_geometry, tg_angles = CIL2TIGREGeometry.getTIGREGeometry(
            self.ig, ag)

        self.assertAlmostEqual(tg_geometry.DSO,
                               ag.dist_source_center * np.sin(np.pi / 4), 5)

        s2o = ag.dist_source_center * np.cos(np.pi / 4)
        np.testing.assert_allclose(tg_geometry.DSO, s2o)

        s2d = (ag.dist_center_detector + ag.dist_source_center) * np.cos(
            np.pi / 4)
        np.testing.assert_allclose(tg_geometry.DSD, s2d)

        det_rot = np.array([0, -np.pi / 4, 0])
        np.testing.assert_allclose(tg_geometry.rotDetector, det_rot)

        det_offset = np.array([-s2d, 0, 0])
        np.testing.assert_allclose(tg_geometry.offDetector, det_offset)

        for i, ang in enumerate(tg_angles):
            ang2 = -(self.angles_rad[i] + np.pi / 2)
            self.compare_angles(ang, ang2, 1e-6)

        self.assertTrue(tg_geometry.mode == 'cone')
        np.testing.assert_allclose(tg_geometry.dDetector,
                                   ag.config.panel.pixel_size[::-1])
        np.testing.assert_allclose(tg_geometry.nDetector,
                                   ag.config.panel.num_pixels[::-1])
        np.testing.assert_allclose(
            tg_geometry.sDetector,
            tg_geometry.dDetector * tg_geometry.nDetector)

        height = 10 / np.sqrt(2)
        np.testing.assert_allclose(tg_geometry.offOrigin, [-height, 0, 0])

        np.testing.assert_allclose(
            tg_geometry.nVoxel,
            [self.ig.voxel_num_z, self.ig.voxel_num_y, self.ig.voxel_num_x])
        np.testing.assert_allclose(
            tg_geometry.dVoxel,
            [self.ig.voxel_size_z, self.ig.voxel_size_y, self.ig.voxel_size_x])
Esempio n. 17
0
    def setUp(self):
        self.acq_data = dataexample.SIMULATED_CONE_BEAM_DATA.get()
        self.img_data = dataexample.SIMULATED_SPHERE_VOLUME.get()

        self.acq_data = np.log(self.acq_data)
        self.acq_data *= -1.0

        self.ig = self.img_data.geometry
        self.ag = self.acq_data.geometry

        self.ag_small = AcquisitionGeometry.create_Cone3D([0, -1000, 0],
                                                          [0, 0, 0])
        self.ag_small.set_panel((16, 16))
        self.ag_small.set_angles([0])
    def setUp(self):

        N = 128
        angles = np.linspace(0, 360, 50, True, dtype=np.float32)
        offset = 0.4
        ag = AcquisitionGeometry.create_Cone3D((offset, -100, 0),
                                               (offset, 100, 0))
        ag.set_panel((N, N / 2))

        ag.set_angles(angles, angle_unit=AcquisitionGeometry.DEGREE)
        ig = ag.get_ImageGeometry()
        self.ag = ag
        self.ig = ig
        self.N = N
    def test_create_Cone3D(self):

        #default
        source_position = [0.1, -500.0,-2.0]
        detector_position = [-1.3,1000.0, -1.0]

        AG = AcquisitionGeometry.create_Cone3D(source_position, detector_position)
        numpy.testing.assert_allclose(AG.config.system.source.position, source_position, rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.position, detector_position, rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.direction_x, [1,0,0], rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.direction_y, [0,0,1], rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.rotation_axis.position, [0,0,0], rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.rotation_axis.direction, [0,0,1], rtol=1E-6)

        #values
        detector_direction_x = [1,0.2, 0]
        detector_direction_y = [0.0,0,1]
        rotation_axis_position=[0.1, 2,-0.4]
        rotation_axis_direction=[-0.1,-0.3,1]

        AG = AcquisitionGeometry.create_Cone3D(source_position, detector_position, detector_direction_x,detector_direction_y, rotation_axis_position,rotation_axis_direction)

        detector_direction_x = numpy.asarray(detector_direction_x)
        detector_direction_y = numpy.asarray(detector_direction_y)
        rotation_axis_direction = numpy.asarray(rotation_axis_direction)

        detector_direction_x /= numpy.sqrt((detector_direction_x**2).sum())
        detector_direction_y /= numpy.sqrt((detector_direction_y**2).sum())
        rotation_axis_direction /= numpy.sqrt((rotation_axis_direction**2).sum())

        numpy.testing.assert_allclose(AG.config.system.source.position, source_position, rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.position, detector_position, rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.direction_x, detector_direction_x, rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.detector.direction_y, detector_direction_y, rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.rotation_axis.position, rotation_axis_position, rtol=1E-6)
        numpy.testing.assert_allclose(AG.config.system.rotation_axis.direction, rotation_axis_direction, rtol=1E-6)
Esempio n. 20
0
    def setUp(self):
        self.ig = ImageGeometry(2, 3, 4, channels=5)
        angles = numpy.asarray([90., 0., -90.], dtype=numpy.float32)

        self.ag = AcquisitionGeometry('parallel',
                                      'edo',
                                      pixel_num_h=20,
                                      pixel_num_v=2,
                                      angles=angles,
                                      dist_source_center=312.2,
                                      dist_center_detector=123.,
                                      channels=4)

        self.ag_cone = AcquisitionGeometry.create_Cone3D([0,-500,0],[0,500,0])\
                                     .set_panel((2,20))\
                                     .set_angles(self.ag.angles)\
                                     .set_channels(4)
Esempio n. 21
0
    def test_cone3D_offset(self):

        #3, 4, 5 triangle for source + object
        ag = AcquisitionGeometry.create_Cone3D(source_position=[0,-4,0], detector_position=[0,4,0], rotation_axis_position=[3,0, 0])\
                                      .set_angles(self.angles_deg, angle_unit='degree')\
                                      .set_labels(['vertical', 'angle','horizontal'])\
                                      .set_panel((self.num_pixels_x,self.num_pixels_y), (self.pixel_size_x,self.pixel_size_y))

        self.assertTrue(ag.system_description == 'offset')

        tg_geometry, tg_angles = CIL2TIGREGeometry.getTIGREGeometry(
            self.ig, ag)

        np.testing.assert_allclose(tg_geometry.DSO, ag.dist_source_center)

        yaw = np.arcsin(3. / 5.)
        det_rot = np.array([0, 0, yaw])
        np.testing.assert_allclose(tg_geometry.rotDetector, det_rot)

        offset = 4 * 6 / 5
        det_offset = np.array([0, -offset, 0])
        np.testing.assert_allclose(tg_geometry.offDetector, det_offset)

        s2d = ag.dist_center_detector + ag.dist_source_center - 6 * 3 / 5
        np.testing.assert_allclose(tg_geometry.DSD, s2d)

        for i, ang in enumerate(tg_angles):
            ang2 = -(self.angles_rad[i] + np.pi / 2 + yaw)
            self.compare_angles(ang, ang2, 1e-6)

        self.assertTrue(tg_geometry.mode == 'cone')
        np.testing.assert_allclose(tg_geometry.dDetector,
                                   ag.config.panel.pixel_size[::-1])
        np.testing.assert_allclose(tg_geometry.nDetector,
                                   ag.config.panel.num_pixels[::-1])
        np.testing.assert_allclose(
            tg_geometry.sDetector,
            tg_geometry.dDetector * tg_geometry.nDetector)
        np.testing.assert_allclose(tg_geometry.offOrigin, 0)

        np.testing.assert_allclose(
            tg_geometry.nVoxel,
            [self.ig.voxel_num_z, self.ig.voxel_num_y, self.ig.voxel_num_x])
        np.testing.assert_allclose(
            tg_geometry.dVoxel,
            [self.ig.voxel_size_z, self.ig.voxel_size_y, self.ig.voxel_size_x])
Esempio n. 22
0
    def setUp(self):
        # Define image geometry.
        pixels_x = 128
        pixels_y = 3

        angles_deg = np.asarray([0, 90.0, 180.0], dtype='float32')
        angles_rad = angles_deg * np.pi / 180.0

        ag = AcquisitionGeometry.create_Parallel2D()\
                                .set_angles(angles_rad, angle_unit='radian')\
                                .set_labels(['angle','horizontal'])\
                                .set_panel(pixels_x, 0.1)

        ig = ag.get_ImageGeometry()


        ag_deg = AcquisitionGeometry.create_Parallel2D()\
                                    .set_angles(angles_deg, angle_unit='degree')\
                                    .set_labels(['angle','horizontal'])\
                                    .set_panel(pixels_x, 0.1)

        ag_cone = AcquisitionGeometry.create_Cone2D([0,-2], [0,1])\
                                     .set_angles(angles_rad, angle_unit='radian')\
                                     .set_labels(['angle','horizontal'])\
                                     .set_panel(pixels_x, 0.1)



        ag3 = AcquisitionGeometry.create_Parallel3D()\
                                 .set_angles(angles_rad, angle_unit='radian')\
                                 .set_labels(['vertical', 'angle','horizontal'])\
                                 .set_panel((pixels_x,pixels_y), (0.1,0.1))

        ig3 = ag3.get_ImageGeometry()

        ag3_cone = AcquisitionGeometry.create_Cone3D([0,-2,0], [0,1,0])\
                                      .set_angles(angles_rad, angle_unit='radian')\
                                      .set_labels(['vertical', 'angle','horizontal'])\
                                      .set_panel((pixels_x,pixels_y), (0.1,0.1))
        self.ig = ig
        self.ig3 = ig3
        self.ag = ag
        self.ag_deg = ag_deg
        self.ag_cone = ag_cone
        self.ag3 = ag3
        self.ag3_cone = ag3_cone
Esempio n. 23
0
    def __init__(self, acquisition_geometry, image_geometry=None):

        if acquisition_geometry.dimension == "2D":
            self.ndim = 2
            sys = acquisition_geometry.config.system
            if acquisition_geometry.geom_type == 'cone':
                ag_temp = AcquisitionGeometry.create_Cone3D(
                    [*sys.source.position, 0], [*sys.detector.position, 0],
                    [*sys.detector.direction_x, 0], [0, 0, 1],
                    [*sys.rotation_axis.position, 0], [0, 0, 1])
            else:
                ag_temp = AcquisitionGeometry.create_Parallel3D(
                    [*sys.ray.direction, 0], [*sys.detector.position, 0],
                    [*sys.detector.direction_x, 0], [0, 0, 1],
                    [*sys.rotation_axis.position, 0], [0, 0, 1])

            ag_temp.config.panel = acquisition_geometry.config.panel
            ag_temp.set_angles(acquisition_geometry.angles)
            ag_temp.set_labels(
                ['vertical', *acquisition_geometry.dimension_labels])

            self.acquisition_geometry = ag_temp

        elif acquisition_geometry.channels > 1:
            self.ndim = 3
            self.acquisition_geometry = acquisition_geometry.get_slice(
                channel=0)
        else:
            self.acquisition_geometry = acquisition_geometry
            self.ndim = 3

        if image_geometry is None:
            self.image_geometry = self.acquisition_geometry.get_ImageGeometry()
        else:
            self.image_geometry = image_geometry

        len1 = self.acquisition_geometry.config.panel.num_pixels[
            0] * self.acquisition_geometry.config.panel.pixel_size[0]
        len2 = self.acquisition_geometry.config.panel.num_pixels[
            1] * self.acquisition_geometry.config.panel.pixel_size[1]
        self.scale = max(len1, len2) / 5

        self.handles = []
        self.labels = []
    def test_calculate_magnification(self):        
        AG = AcquisitionGeometry.create_Cone3D(source_position=[0,-500,0], detector_position=[0.,1000.,0])
        out = AG.config.system.calculate_magnification()
        self.assertEqual(out, [500, 1000, 3]) 

        AG = AcquisitionGeometry.create_Cone3D(source_position=[0,-500,0], detector_position=[0.,1000.,0], rotation_axis_position=[0.,250.,0])
        out = AG.config.system.calculate_magnification()
        self.assertEqual(out, [750, 750, 2]) 

        AG = AcquisitionGeometry.create_Cone3D(source_position=[0,-500,0], detector_position=[0.,1000.,0], rotation_axis_position=[5.,0.,0])
        out = AG.config.system.calculate_magnification()
        source_to_object = numpy.sqrt(5.0**2 + 500.0**2)
        theta = math.atan2(5.0,500.0)
        source_to_detector = 1500.0/math.cos(theta)
        self.assertEqual(out, [source_to_object, source_to_detector - source_to_object, source_to_detector/source_to_object]) 

        AG = AcquisitionGeometry.create_Cone3D(source_position=[0,-500,0], detector_position=[0.,1000.,0], rotation_axis_position=[0.,0.,5.])
        out = AG.config.system.calculate_magnification()
        source_to_object = numpy.sqrt(5.0**2 + 500.0**2)
        theta = math.atan2(5.0,500.0)
        source_to_detector = 1500.0/math.cos(theta)
        self.assertEqual(out, [source_to_object, source_to_detector - source_to_object, source_to_detector/source_to_object]) 

        AG = AcquisitionGeometry.create_Cone3D(source_position=[0,-500,0], detector_position=[0.,1000.,0],detector_direction_y=[0,math.sqrt(5),math.sqrt(5)])
        out = AG.config.system.calculate_magnification()
        self.assertEqual(out, [500, 1000, 3]) 

        AG = AcquisitionGeometry.create_Cone3D(source_position=[0,-500,0], detector_position=[0.,1000.,0],detector_direction_x=[1,0.1,0.2],detector_direction_y=[-0.2,0,1])
        out = AG.config.system.calculate_magnification()
        self.assertEqual(out, [500, 1000, 3])         
        
        AG = AcquisitionGeometry.create_Cone3D(source_position=[0,-500,0], detector_position=[0.,1000.,0], rotation_axis_position=[5.,0.,0],detector_direction_x=[math.sqrt(5),math.sqrt(5),0])
        out = AG.config.system.calculate_magnification()
        source_to_object = numpy.sqrt(5.0**2 + 500.0**2)

        ab = (AG.config.system.rotation_axis.position - AG.config.system.source.position).astype(numpy.float64)/source_to_object

        #source_position + d * ab = detector_position + t * detector_direction_x
        #x: d *  ab[0] =  t * detector_direction_x[0]
        #y: -500 + d *  ab[1] = 1000 + t * detector_direction_x[1] 

        # t = (d *  ab[0]) / math.sqrt(5)
        # d = 1500 / (ab[1]  - ab[0])

        source_to_detector = 1500 / (ab[1]  - ab[0])
        self.assertEqual(out, [source_to_object, source_to_detector - source_to_object, source_to_detector/source_to_object]) 
Esempio n. 25
0
    def test_cone3D_cofr(self):

        #3, 4, 5 triangle for source + object
        ag = AcquisitionGeometry.create_Cone3D(source_position=[0,-4,0], detector_position=[0,4,0], rotation_axis_position=[3,0, 0])\
                                      .set_angles(self.angles_deg, angle_unit='degree')\
                                      .set_labels(['vertical', 'angle','horizontal'])\
                                      .set_panel((self.num_pixels_x,self.num_pixels_y), (self.pixel_size_x,self.pixel_size_y))

        ig = ag.get_ImageGeometry()
        ig.voxel_num_y = 50
        ig.voxel_size_y /= 2

        tg_geometry, tg_angles = CIL2TIGREGeometry.getTIGREGeometry(ig, ag)

        np.testing.assert_allclose(tg_geometry.DSO, ag.dist_source_center)

        yaw = np.arcsin(3. / 5.)
        det_rot = np.array([0, 0, yaw])
        np.testing.assert_allclose(tg_geometry.rotDetector, det_rot)

        offset = 4 * 6 / 5
        det_offset = np.array([0, -offset, 0])
        np.testing.assert_allclose(tg_geometry.offDetector, det_offset)

        s2d = ag.dist_center_detector + ag.dist_source_center - 6 * 3 / 5
        np.testing.assert_allclose(tg_geometry.DSD, s2d)

        angles_rad = np.array([-np.pi / 2, -np.pi, -3 * np.pi / 2]) - yaw

        np.testing.assert_allclose(tg_angles, angles_rad)
        np.testing.assert_allclose(tg_geometry.dDetector,
                                   ag.config.panel.pixel_size[::-1])
        np.testing.assert_allclose(tg_geometry.nDetector,
                                   ag.config.panel.num_pixels[::-1])
        np.testing.assert_allclose(
            tg_geometry.sDetector,
            tg_geometry.dDetector * tg_geometry.nDetector)
        np.testing.assert_allclose(tg_geometry.offOrigin, 0)

        mag = ag.magnification
        np.testing.assert_allclose(tg_geometry.nVoxel, [3, 50, 128])
        np.testing.assert_allclose(tg_geometry.dVoxel,
                                   [0.2 / mag, 0.05 / mag, 0.1 / mag])
Esempio n. 26
0
    def setUp(self):

        N = 3
        angles = np.linspace(0, np.pi, 2, dtype='float32')

        self.ag = AcquisitionGeometry.create_Cone2D([0,-100],[0,200])\
                                .set_angles(angles, angle_unit='radian')\
                                .set_panel(N, 0.1)\
                                .set_labels(['angle', 'horizontal'])

        self.ig = self.ag.get_ImageGeometry()
        self.Op = ProjectionOperator(self.ig, self.ag)

        self.ag3D = AcquisitionGeometry.create_Cone3D([0,-100,0],[0,200,0])\
                                .set_angles(angles, angle_unit='radian')\
                                .set_panel((N,N), (0.1,0.1))\
                                .set_labels(['angle', 'vertical', 'horizontal'])

        self.ig3D = self.ag3D.get_ImageGeometry()
        self.Op3D = ProjectionOperator(self.ig3D, self.ag3D)
Esempio n. 27
0
    def setUp(self):

        self.data_dir = os.path.join(os.getcwd(), 'test_nxs')
        if not os.path.exists(self.data_dir):
            os.mkdir(self.data_dir)

        self.ag2d = AcquisitionGeometry.create_Parallel2D()\
                                    .set_angles([0, 90, 180],-3.0, 'radian')\
                                    .set_panel(5, 0.2, origin='top-right')\
                                    .set_channels(6)\
                                    .set_labels(['horizontal', 'angle'])

        self.ad2d = self.ag2d.allocate('random_int')


        self.ag3d = AcquisitionGeometry.create_Cone3D([0.1,-500,2], [3,600,-1], [0,1,0],[0,0,-1],[0.2,-0.1,0.5],[-0.1,0.2,0.9])\
                                    .set_angles([0, 90, 180])\
                                    .set_panel([5,10],[0.1,0.3])\

        self.ad3d = self.ag3d.allocate('random_int')
Esempio n. 28
0
    def test_cone3D_simple(self):
        ag = AcquisitionGeometry.create_Cone3D(source_position=[0,-6,0], detector_position=[0,16,0])\
                                      .set_angles(self.angles_deg, angle_unit='degree')\
                                      .set_labels(['vertical', 'angle','horizontal'])\
                                      .set_panel((self.num_pixels_x,self.num_pixels_y), (self.pixel_size_x,self.pixel_size_y))

        self.assertTrue(ag.system_description == 'simple')

        tg_geometry, tg_angles = CIL2TIGREGeometry.getTIGREGeometry(
            self.ig, ag)

        for i, ang in enumerate(tg_angles):
            ang2 = -(self.angles_rad[i] + np.pi / 2)
            self.compare_angles(ang, ang2, 1e-6)

        self.assertTrue(tg_geometry.mode == 'cone')
        np.testing.assert_allclose(
            tg_geometry.DSD, ag.dist_center_detector + ag.dist_source_center)
        np.testing.assert_allclose(tg_geometry.DSO, ag.dist_source_center)
        np.testing.assert_allclose(tg_geometry.dDetector,
                                   ag.config.panel.pixel_size[::-1])
        np.testing.assert_allclose(tg_geometry.nDetector,
                                   ag.config.panel.num_pixels[::-1])
        np.testing.assert_allclose(
            tg_geometry.sDetector,
            tg_geometry.dDetector * tg_geometry.nDetector)
        np.testing.assert_allclose(tg_geometry.rotDetector, 0)
        np.testing.assert_allclose(tg_geometry.offDetector, 0)
        np.testing.assert_allclose(tg_geometry.offOrigin, 0)

        np.testing.assert_allclose(
            tg_geometry.nVoxel,
            [self.ig.voxel_num_z, self.ig.voxel_num_y, self.ig.voxel_num_x])
        np.testing.assert_allclose(
            tg_geometry.dVoxel,
            [self.ig.voxel_size_z, self.ig.voxel_size_y, self.ig.voxel_size_x])
Esempio n. 29
0
    def get_geometry(self):
        '''
        Parse NEXUS file and returns either ImageData or Acquisition Data 
        depending on file content
        '''

        with h5py.File(self.file_name, 'r') as dfile:

            if np.string_(dfile.attrs['creator']) != np.string_(
                    'NEXUSDataWriter.py'):
                raise Exception(
                    'We can parse only files created by NEXUSDataWriter.py')

            ds_data = dfile['entry1/tomo_entry/data/data']

            if ds_data.attrs['data_type'] == 'ImageData':

                self._geometry = ImageGeometry(
                    voxel_num_x=int(ds_data.attrs['voxel_num_x']),
                    voxel_num_y=int(ds_data.attrs['voxel_num_y']),
                    voxel_num_z=int(ds_data.attrs['voxel_num_z']),
                    voxel_size_x=ds_data.attrs['voxel_size_x'],
                    voxel_size_y=ds_data.attrs['voxel_size_y'],
                    voxel_size_z=ds_data.attrs['voxel_size_z'],
                    center_x=ds_data.attrs['center_x'],
                    center_y=ds_data.attrs['center_y'],
                    center_z=ds_data.attrs['center_z'],
                    channels=ds_data.attrs['channels'])

                if ds_data.attrs.__contains__('channel_spacing') == True:
                    self._geometry.channel_spacing = ds_data.attrs[
                        'channel_spacing']

                # read the dimension_labels from dim{}
                dimension_labels = self.read_dimension_labels(ds_data.attrs)

            else:  # AcquisitionData
                if ds_data.attrs.__contains__('dist_source_center') or dfile[
                        'entry1/tomo_entry'].__contains__(
                            'config/source/position'):
                    geom_type = 'cone'
                else:
                    geom_type = 'parallel'

                if ds_data.attrs.__contains__('num_pixels_v'):
                    num_pixels_v = ds_data.attrs.get('num_pixels_v')
                elif ds_data.attrs.__contains__('pixel_num_v'):
                    num_pixels_v = ds_data.attrs.get('pixel_num_v')
                else:
                    num_pixels_v = 1

                if num_pixels_v > 1:
                    dim = 3
                else:
                    dim = 2

                if self.is_old_file_version():
                    num_pixels_h = ds_data.attrs.get('pixel_num_h', 1)
                    num_channels = ds_data.attrs['channels']
                    ds_angles = dfile['entry1/tomo_entry/data/rotation_angle']

                    if geom_type == 'cone' and dim == 3:
                        self._geometry = AcquisitionGeometry.create_Cone3D(
                            source_position=[
                                0, -ds_data.attrs['dist_source_center'], 0
                            ],
                            detector_position=[
                                0, ds_data.attrs['dist_center_detector'], 0
                            ])
                    elif geom_type == 'cone' and dim == 2:
                        self._geometry = AcquisitionGeometry.create_Cone2D(
                            source_position=[
                                0, -ds_data.attrs['dist_source_center']
                            ],
                            detector_position=[
                                0, ds_data.attrs['dist_center_detector']
                            ])
                    elif geom_type == 'parallel' and dim == 3:
                        self._geometry = AcquisitionGeometry.create_Parallel3D(
                        )
                    elif geom_type == 'parallel' and dim == 2:
                        self._geometry = AcquisitionGeometry.create_Parallel2D(
                        )

                else:
                    num_pixels_h = ds_data.attrs.get('num_pixels_h', 1)
                    num_channels = ds_data.attrs['num_channels']
                    ds_angles = dfile['entry1/tomo_entry/config/angles']

                    rotation_axis_position = list(dfile[
                        'entry1/tomo_entry/config/rotation_axis/position'])
                    detector_position = list(
                        dfile['entry1/tomo_entry/config/detector/position'])

                    ds_detector = dfile['entry1/tomo_entry/config/detector']
                    if ds_detector.__contains__('direction_x'):
                        detector_direction_x = list(dfile[
                            'entry1/tomo_entry/config/detector/direction_x'])
                    else:
                        detector_direction_x = list(dfile[
                            'entry1/tomo_entry/config/detector/direction_row'])

                    if ds_detector.__contains__('direction_y'):
                        detector_direction_y = list(dfile[
                            'entry1/tomo_entry/config/detector/direction_y'])
                    elif ds_detector.__contains__('direction_col'):
                        detector_direction_y = list(dfile[
                            'entry1/tomo_entry/config/detector/direction_col'])

                    ds_rotate = dfile['entry1/tomo_entry/config/rotation_axis']
                    if ds_rotate.__contains__('direction'):
                        rotation_axis_direction = list(dfile[
                            'entry1/tomo_entry/config/rotation_axis/direction']
                                                       )

                    if geom_type == 'cone':
                        source_position = list(
                            dfile['entry1/tomo_entry/config/source/position'])

                        if dim == 2:
                            self._geometry = AcquisitionGeometry.create_Cone2D(
                                source_position, detector_position,
                                detector_direction_x, rotation_axis_position)
                        else:
                            self._geometry = AcquisitionGeometry.create_Cone3D(source_position,\
                                                detector_position, detector_direction_x, detector_direction_y,\
                                                rotation_axis_position, rotation_axis_direction)
                    else:
                        ray_direction = list(
                            dfile['entry1/tomo_entry/config/ray/direction'])

                        if dim == 2:
                            self._geometry = AcquisitionGeometry.create_Parallel2D(
                                ray_direction, detector_position,
                                detector_direction_x, rotation_axis_position)
                        else:
                            self._geometry = AcquisitionGeometry.create_Parallel3D(ray_direction,\
                                                detector_position, detector_direction_x, detector_direction_y,\
                                                rotation_axis_position, rotation_axis_direction)

                # for all Aquisition data
                #set angles
                angles = list(ds_angles)
                angle_unit = ds_angles.attrs.get('angle_unit', 'degree')
                initial_angle = ds_angles.attrs.get('initial_angle', 0)
                self._geometry.set_angles(angles,
                                          initial_angle=initial_angle,
                                          angle_unit=angle_unit)

                #set panel
                pixel_size_v = ds_data.attrs.get('pixel_size_v',
                                                 ds_data.attrs['pixel_size_h'])
                origin = ds_data.attrs.get('panel_origin', 'bottom-left')
                self._geometry.set_panel((num_pixels_h, num_pixels_v),\
                                        pixel_size=(ds_data.attrs['pixel_size_h'], pixel_size_v),\
                                        origin=origin)

                # set channels
                self._geometry.set_channels(num_channels)

                dimension_labels = []
                dimension_labels = self.read_dimension_labels(ds_data.attrs)

        #set labels
        self._geometry.set_labels(dimension_labels)

        return self._geometry
Esempio n. 30
0
    def set_up(self,
               file_name=None,
               roi={
                   'angle': -1,
                   'horizontal': -1,
                   'vertical': -1
               },
               normalise=True,
               mode='bin',
               fliplr=False,
               **kwargs):

        self.file_name = file_name
        self.roi = roi
        self.normalise = normalise
        self.mode = mode
        self.fliplr = fliplr

        if 'normalize' in kwargs.keys():
            self.normalise = kwargs.get('normalize', True)
            warnings.warn(
                "'normalize' has now been deprecated. Please use 'normalise' instead."
            )

        if self.file_name == None:
            raise Exception('Path to xtek file is required.')

        # check if xtek file exists
        if not (os.path.isfile(self.file_name)):
            raise Exception('File\n {}\n does not exist.'.format(
                self.file_name))

        # check labels
        for key in self.roi.keys():
            if key not in ['angle', 'horizontal', 'vertical']:
                raise Exception(
                    "Wrong label. One of ollowing is expected: angle, horizontal, vertical"
                )

        roi = self.roi.copy()

        if 'angle' not in roi.keys():
            roi['angle'] = -1

        if 'horizontal' not in roi.keys():
            roi['horizontal'] = -1

        if 'vertical' not in roi.keys():
            roi['vertical'] = -1

        # parse xtek file
        with open(self.file_name, 'r') as f:
            content = f.readlines()

        content = [x.strip() for x in content]

        #initialise parameters
        detector_offset_h = 0
        detector_offset_v = 0
        object_offset_x = 0
        object_roll_deg = 0

        for line in content:
            # filename of TIFF files
            if line.startswith("Name"):
                self._experiment_name = line.split('=')[1]
            # number of projections
            elif line.startswith("Projections"):
                num_projections = int(line.split('=')[1])
            # white level - used for normalization
            elif line.startswith("WhiteLevel"):
                self._white_level = float(line.split('=')[1])
            # number of pixels along Y axis
            elif line.startswith("DetectorPixelsY"):
                pixel_num_v_0 = int(line.split('=')[1])
            # number of pixels along X axis
            elif line.startswith("DetectorPixelsX"):
                pixel_num_h_0 = int(line.split('=')[1])
            # pixel size along X axis
            elif line.startswith("DetectorPixelSizeX"):
                pixel_size_h_0 = float(line.split('=')[1])
            # pixel size along Y axis
            elif line.startswith("DetectorPixelSizeY"):
                pixel_size_v_0 = float(line.split('=')[1])
            # source to center of rotation distance
            elif line.startswith("SrcToObject"):
                source_to_origin = float(line.split('=')[1])
            # source to detector distance
            elif line.startswith("SrcToDetector"):
                source_to_det = float(line.split('=')[1])
            # initial angular position of a rotation stage
            elif line.startswith("InitialAngle"):
                initial_angle = float(line.split('=')[1])
            # angular increment (in degrees)
            elif line.startswith("AngularStep"):
                angular_step = float(line.split('=')[1])
            # detector offset x in units
            elif line.startswith("DetectorOffsetX"):
                detector_offset_h = float(line.split('=')[1])
            # detector offset y in units
            elif line.startswith("DetectorOffsetY"):
                detector_offset_v = float(line.split('=')[1])
            # object offset x in units
            elif line.startswith("ObjectOffsetX"):
                object_offset_x = float(line.split('=')[1])
            # object roll in degrees
            elif line.startswith("ObjectRoll"):
                object_roll_deg = float(line.split('=')[1])

        self._roi_par = [[0, num_projections, 1], [0, pixel_num_v_0, 1],
                         [0, pixel_num_h_0, 1]]

        for key in roi.keys():
            if key == 'angle':
                idx = 0
            elif key == 'vertical':
                idx = 1
            elif key == 'horizontal':
                idx = 2
            if roi[key] != -1:
                for i in range(2):
                    if roi[key][i] != None:
                        if roi[key][i] >= 0:
                            self._roi_par[idx][i] = roi[key][i]
                        else:
                            self._roi_par[idx][
                                i] = self._roi_par[idx][1] + roi[key][i]
                if len(roi[key]) > 2:
                    if roi[key][2] != None:
                        if roi[key][2] > 0:
                            self._roi_par[idx][2] = roi[key][2]
                        else:
                            raise Exception("Negative step is not allowed")

        if self.mode == 'bin':
            # calculate number of pixels and pixel size
            pixel_num_v = (self._roi_par[1][1] -
                           self._roi_par[1][0]) // self._roi_par[1][2]
            pixel_num_h = (self._roi_par[2][1] -
                           self._roi_par[2][0]) // self._roi_par[2][2]
            pixel_size_v = pixel_size_v_0 * self._roi_par[1][2]
            pixel_size_h = pixel_size_h_0 * self._roi_par[2][2]
        else:  # slice
            pixel_num_v = numpy.int(
                numpy.ceil((self._roi_par[1][1] - self._roi_par[1][0]) /
                           self._roi_par[1][2]))
            pixel_num_h = numpy.int(
                numpy.ceil((self._roi_par[2][1] - self._roi_par[2][0]) /
                           self._roi_par[2][2]))
            pixel_size_v = pixel_size_v_0
            pixel_size_h = pixel_size_h_0

        det_start_0 = -(pixel_num_h_0 / 2)
        det_start = det_start_0 + self._roi_par[2][0]
        det_end = det_start + pixel_num_h * self._roi_par[2][2]
        det_pos_h = (det_start +
                     det_end) * 0.5 * pixel_size_h_0 + detector_offset_h

        det_start_0 = -(pixel_num_v_0 / 2)
        det_start = det_start_0 + self._roi_par[1][0]
        det_end = det_start + pixel_num_v * self._roi_par[1][2]
        det_pos_v = (det_start +
                     det_end) * 0.5 * pixel_size_v_0 + detector_offset_v

        #angles from xtek.ct ignore *.ang and _ctdata.txt as not correct
        angles = numpy.asarray(
            [angular_step * proj for proj in range(num_projections)],
            dtype=numpy.float32)

        if self.mode == 'bin':
            n_elem = (self._roi_par[0][1] -
                      self._roi_par[0][0]) // self._roi_par[0][2]
            shape = (n_elem, self._roi_par[0][2])
            angles = angles[self._roi_par[0][0]:(
                self._roi_par[0][0] +
                n_elem * self._roi_par[0][2])].reshape(shape).mean(1)
        else:
            angles = angles[slice(self._roi_par[0][0], self._roi_par[0][1],
                                  self._roi_par[0][2])]

        #convert NikonGeometry to CIL geometry
        angles = -angles - initial_angle + 180

        object_roll_deg * numpy.pi / 180.
        rotate_axis_x = numpy.tan(object_roll_deg * numpy.pi / 180.)

        if self.fliplr:
            origin = 'top-left'
        else:
            origin = 'top-right'

        if pixel_num_v == 1 and (self._roi_par[1][0] + self._roi_par[1][1]
                                 ) // 2 == pixel_num_v_0 // 2:
            self._ag = AcquisitionGeometry.create_Cone2D(
                source_position=[0, -source_to_origin],
                rotation_axis_position=[-object_offset_x, 0],
                detector_position=[
                    -det_pos_h, source_to_det - source_to_origin
                ])
            self._ag.set_angles(angles, angle_unit='degree')

            self._ag.set_panel(pixel_num_h,
                               pixel_size=pixel_size_h,
                               origin=origin)

            self._ag.set_labels(labels=['angle', 'horizontal'])
        else:
            self._ag = AcquisitionGeometry.create_Cone3D(
                source_position=[0, -source_to_origin, 0],
                rotation_axis_position=[-object_offset_x, 0, 0],
                rotation_axis_direction=[rotate_axis_x, 0, 1],
                detector_position=[
                    -det_pos_h, source_to_det - source_to_origin, det_pos_v
                ])
            self._ag.set_angles(angles, angle_unit='degree')

            self._ag.set_panel((pixel_num_h, pixel_num_v),
                               pixel_size=(pixel_size_h, pixel_size_v),
                               origin=origin)

            self._ag.set_labels(labels=['angle', 'vertical', 'horizontal'])