示例#1
0
    def one_window(self):
        '''Positions for rods on one side of one bay

        Returns
        -------
        pos4d : list
            List of pos4d matrices
        '''
        # longeron
        zoom = [self.l_longeron / 2, self.d_longeron / 2, self.d_longeron / 2]
        trans = [self.l_longeron / 2, self.l_batten / 3**0.5, 0.]
        rot = np.eye(3)
        pos4d = [compose(trans, rot, zoom)]

        # batten
        zoom = [self.l_batten / 2, self.d_batten / 2, self.d_batten / 2]
        trans = [0., self.l_batten / 4. / 3**0.5, self.l_batten / 4]
        rot = euler2mat(np.pi / 2, -np.pi / 6, 0, 'szxz')
        pos4d.append(compose(trans, rot, zoom))

        # diagonal1
        zoom = [self.l_diag / 2., self.d_diag / 2., self.d_diag / 2.]
        trans[0] = self.l_longeron / 2.
        rot = euler2mat(np.deg2rad(90. - 34.03), -np.pi / 6, 0, 'szxz')
        pos4d.append(compose(trans, rot, zoom))

        # diagonal2
        rot = euler2mat(np.deg2rad(90. + 34.03), -np.pi / 6, 0, 'szxz')
        pos4d.append(compose(trans, rot, zoom))
        return pos4d
示例#2
0
    def one_window(self):
        '''Calculate pos4d matrices for each element on the boom'''
        # longeron
        zoom = [self.l_longeron / 2, self.d_longeron / 2, self.d_longeron / 2]
        trans = [self.l_longeron / 2, self.l_batten / 2, self.l_batten / 2]
        rot = np.eye(3)
        pos4d = [compose(trans, rot, zoom)]

        # batten
        zoom = [self.l_batten / 2, self.d_batten / 2, self.d_batten / 2]
        trans = [0., self.l_batten / 2, 0]
        rot = euler2mat(np.pi / 2, 0, 0, 'syxz')
        pos4d.append(compose(trans, rot, zoom))

        # diagonal1
        zoom = [self.l_diag / 2., self.d_diag / 2., self.d_diag / 2.]
        trans = [self.l_longeron / 2., self.l_batten / 2, 0.]
        ang = np.arcsin(self.l_batten / self.l_diag)
        rot = euler2mat(ang, 0, 0, 'syxz')
        pos4d.append(compose(trans, rot, zoom))

        # diagonal2
        rot = euler2mat(-ang, 0, 0, 'syxz')
        pos4d.append(compose(trans, rot, zoom))

        return pos4d
示例#3
0
    def __init__(self, conf):
        # 50 mn directly depositied on CCD + 30 nm on contamination filter
        al = Table.read('../inputdata/aluminium_transmission_80nm.txt',
                        format='ascii.no_header',
                        data_start=2,
                        names=['energy', 'transmission'])
        # 100 nm contamination filter
        poly = Table.read('../inputdata/polyimide_transmission.txt',
                          format='ascii.no_header',
                          data_start=2,
                          names=['energy', 'transmission'])
        #qe = Table.read('../inputdata/qe.csv', format='ascii.ecsv')
        qe = Table.read('../inputdata/xgs_bi_ccdqe.dat',
                        format='ascii.no_header',
                        data_start=4,
                        names=['energy', 'qe', 'temp1', 'temp2'])
        qe['energy'] = 1e-3 * qe['energy']  # eV to keV

        al['energy'] = 1e-3 * al['energy']  # eV to keV
        poly['energy'] = 1e-3 * poly['energy']  # ev to keV

        detkwargs = {'pixsize': conf['pixsize']}
        detzoom = np.array([
            1, conf['pixsize'] * conf['detsize'][0] / 2,
            conf['pixsize'] * conf['detsize'][1] / 2
        ])

        detposlist = [
            affines.compose(conf['det0pos'], euler2mat(0, np.pi / 2, 0,
                                                       'sxyz'), detzoom)
        ]
        for chan in conf['channels']:
            detpos = affines.compose(conf['det1pos'],
                                     euler2mat(0, 0, 0, 'sxyz'), detzoom)
            detposlist.append(affpihalf @ conf['rotchan'][chan] @ detpos)
        ccd_args = {
            'elements': (optics.FlatDetector, optics.EnergyFilter,
                         optics.EnergyFilter, optics.EnergyFilter),
            'keywords': (
                detkwargs,
                {
                    'filterfunc': interp1d(al['energy'], al['transmission']),
                    'name': 'aluminium filter'
                },
                {
                    'filterfunc': interp1d(poly['energy'],
                                           poly['transmission']),
                    'name': 'polyamide filter'
                },
                {
                    'filterfunc': interp1d(qe['energy'], qe['qe']),
                    'name': 'CCD QE'
                },
            ),
        }
        super().__init__(elem_class=optics.FlatStack,
                         elem_pos=detposlist,
                         elem_args=ccd_args,
                         id_col='CCD_ID')
        self.set_display()
示例#4
0
    def get_inverse_transform(self, im_ref, mode='affine'):
        aff_im_self = self.im_file.affine
        aff_im_ref = im_ref.im_file.affine
        if mode == 'affine':
            transform = np.matmul(np.linalg.inv(aff_im_ref), aff_im_self)
        else:
            T_self, R_self, Sc_self, Sh_self = affines.decompose44(aff_im_self)
            T_ref, R_ref, Sc_ref, Sh_ref = affines.decompose44(aff_im_ref)
            if mode == 'translation':
                T_transform = T_self - T_ref
                R_transform = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
                Sc_transform = np.array([1.0, 1.0, 1.0])
                transform = affines.compose(T_transform, R_transform,
                                            Sc_transform)
            elif mode == 'rigid':
                T_transform = T_self - T_ref
                R_transform = np.matmul(np.linalg.inv(R_ref), R_self)
                Sc_transform = np.array([1.0, 1.0, 1.0])
                transform = affines.compose(T_transform, R_transform,
                                            Sc_transform)
            elif mode == 'rigid_scaling':
                T_transform = T_self - T_ref
                R_transform = np.matmul(np.linalg.inv(R_ref), R_self)
                Sc_transform = Sc_self / Sc_ref
                transform = affines.compose(T_transform, R_transform,
                                            Sc_transform)
            else:
                transform = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
                                      [0, 0, 0, 1]])

        return transform
示例#5
0
文件: boom.py 项目: hamogu/ARCUS
    def one_window(self):
        '''Calculate pos4d matrices for each element on the boom'''
        # longeron
        zoom = [self.l_longeron / 2, self.d_longeron / 2, self.d_longeron / 2]
        trans = [self.l_longeron / 2, self.l_batten / 2, self.l_batten / 2]
        rot = np.eye(3)
        pos4d = [compose(trans, rot, zoom)]

        # batten
        zoom = [self.l_batten / 2, self.d_batten / 2, self.d_batten / 2]
        trans = [0., self.l_batten / 2, 0]
        rot = euler2mat(np.pi / 2, 0, 0, 'syxz')
        pos4d.append(compose(trans, rot, zoom))

        # diagonal1
        zoom = [self.l_diag / 2., self.d_diag / 2., self.d_diag / 2.]
        trans = [self.l_longeron / 2., self.l_batten / 2, 0. ]
        ang = np.arcsin(self.l_batten / self.l_diag)
        rot = euler2mat(ang, 0, 0, 'syxz')
        pos4d.append(compose(trans, rot, zoom))

        # diagonal2
        rot = euler2mat(- ang, 0, 0, 'syxz')
        pos4d.append(compose(trans, rot, zoom))

        return pos4d
示例#6
0
    def get_inverse_transform(self, im_ref, mode='affine'):
        aff_im_self = self.im_file.affine
        aff_im_ref = im_ref.im_file.affine
        if mode == 'affine':
            transform = np.matmul(np.linalg.inv(aff_im_ref), aff_im_self)
        else:
            T_self, R_self, Sc_self, Sh_self = affines.decompose44(aff_im_self)
            T_ref, R_ref, Sc_ref, Sh_ref = affines.decompose44(aff_im_ref)
            if mode == 'translation':
                T_transform = T_self - T_ref
                R_transform = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
                Sc_transform = np.array([1.0, 1.0, 1.0])
                transform = affines.compose(T_transform, R_transform, Sc_transform)
            elif mode == 'rigid':
                T_transform = T_self - T_ref
                R_transform = np.matmul(np.linalg.inv(R_ref), R_self)
                Sc_transform = np.array([1.0, 1.0, 1.0])
                transform = affines.compose(T_transform, R_transform, Sc_transform)
            elif mode == 'rigid_scaling':
                T_transform = T_self - T_ref
                R_transform = np.matmul(np.linalg.inv(R_ref), R_self)
                Sc_transform = Sc_self / Sc_ref
                transform = affines.compose(T_transform, R_transform, Sc_transform)
            else:
                transform = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])

        return transform
示例#7
0
文件: boom.py 项目: hamogu/ARCUS
    def one_window(self):
        '''Positions for rods on one side of one bay

        Returns
        -------
        pos4d : list
            List of pos4d matrices
        '''
        # longeron
        zoom = [self.l_longeron / 2, self.d_longeron / 2, self.d_longeron / 2]
        trans = [self.l_longeron / 2, self.l_batten / 3**0.5, 0.]
        rot = np.eye(3)
        pos4d = [compose(trans, rot, zoom)]

        # batten
        zoom = [self.l_batten / 2, self.d_batten / 2, self.d_batten / 2]
        trans = [0., self.l_batten / 4. / 3**0.5, self.l_batten / 4]
        rot = euler2mat(np.pi / 2, - np.pi / 6, 0, 'szxz')
        pos4d.append(compose(trans, rot, zoom))

        # diagonal1
        zoom = [self.l_diag / 2., self.d_diag / 2., self.d_diag / 2.]
        trans[0] = self.l_longeron / 2.
        rot = euler2mat(np.deg2rad(90. - 34.03), -np.pi / 6, 0, 'szxz')
        pos4d.append(compose(trans, rot, zoom))

        # diagonal2
        rot = euler2mat(np.deg2rad(90. + 34.03), -np.pi / 6, 0, 'szxz')
        pos4d.append(compose(trans, rot, zoom))
        return pos4d
示例#8
0
    def calculate_elempos(self):
        '''Read position of facets from file.

        Based on the positions, other grating parameters are chosen that differ between
        HEG and MEG, e.g. grating constant.
        '''
        # take x,y,z components of various vectors and put in numpy arrays
        # Note: This makes (3, N) arrays as opposed to (N, 3) arrays used in other parts of MARXS.
        # Note: Sometimes an angle has a minus. Beware of active / passive rotations.
        hess = self.hess

        cen = np.vstack([hess[s+'c'].data for s in 'xyz'])
        cen[0, :] += (339.909 - 1.7) * 25.4

        # All those vectors are already normalized
        facnorm = np.vstack([hess[s + 'u'].data for s in 'xyz'])
        uxf = np.vstack([hess[s + 'uxf'].data for s in 'xyz'])
        uyf = np.vstack([hess[s + 'uyf'].data for s in 'xyz'])

        pos4ds = []
        for i in range(facnorm.shape[1]):
            if int(self.hess['hessloc'][i][0]) <= 3:
                # Size of MEG facets
                zoom = np.array([1, 1.035*25.4/2, 1.035*25.4/2])
            else:
                # Size of HEG facets
                zoom = np.array([1, 0.920*25.4/2, 0.920*25.4/2])

            rot = np.eye(3)
            rot[:, 0] = facnorm[:, i]
            rot[:, 1] = uxf[:, i]
            rot[:, 2] = uyf[:, i]
            pos4d = compose(cen[:, i], rot, zoom)
            pos4ds.append(pos4d)
        return pos4ds
示例#9
0
文件: hess.py 项目: astrofrog/marxs
    def calculate_elempos(self):
        '''Read position of facets from file.

        Based on the positions, other grating parameters are chosen that differ between
        HEG and MEG, e.g. grating constant.
        '''
        # take x,y,z components of various vectors and put in numpy arrays
        # Note: This makes (3, N) arrays as opposed to (N, 3) arrays used in other parts of MARXS.
        # Note: Sometimes an angle has a minus. Beware of active / passive rotations.
        hess = self.hess

        cen = np.vstack([hess[s+'c'].data for s in 'xyz'])
        cen[0,:] +=(339.909-1.7)*25.4

        # All those vectors are already normalized
        facnorm = np.vstack([hess[s+'u'].data for s in 'xyz'])
        uxf = np.vstack([hess[s+'uxf'].data for s in 'xyz'])
        uyf = np.vstack([hess[s+'uyf'].data for s in 'xyz'])

        pos4ds = []
        for i in range(facnorm.shape[1]):
            if int(self.hess['hessloc'][i][0]) <= 3:
                # Size of MEG facets
                zoom = np.array([1, 1.035*25.4/2, 1.035*25.4/2])
            else:
                # Size of HEG facets
                zoom = np.array([1, 0.920*25.4/2, 0.920*25.4/2])

            rot = np.eye(3)
            rot[:, 0] = facnorm[:, i]
            rot[:, 1] = uxf[:, i]
            rot[:, 2] = uyf[:, i]
            pos4d = compose(cen[:, i], rot, zoom)
            pos4ds.append(pos4d)
        return pos4ds
    def extract_global_position(self, cam_pos_x):
        # Convert camera position to relative polar coordinate of robot
        depth_index = self.MIN_VISUAL_DEPTH_INDEX + int(
            float(cam_pos_x) * self.VISUAL_DEPTH_RANGE / self.VISUAL_RGB_RANGE)
        depth = self.depth_data[depth_index]
        if depth_index > 10:
            depth = max(max(depth, self.depth_data[depth_index - 5]),
                        self.depth_data[depth_index - 10])
        if depth_index < 501:
            depth = max(max(depth, self.depth_data[depth_index + 5]),
                        self.depth_data[depth_index + 10])
        degree = self.MIN_DEGREE + (float(cam_pos_x * self.VISUAL_DEGREE_RANGE)
                                    / self.VISUAL_RGB_RANGE)

        # Convert relative polar transformation to relative cartesian transformation matrix
        rel_pos_x = depth * np.cos(np.deg2rad(degree))
        rel_pos_y = depth * np.sin(np.deg2rad(degree))
        obj_vec = np.array([rel_pos_x, rel_pos_y, 0, 1])

        # Convert cartesian coordinate to absolute cartesian coordinate
        rot_mat_robot = quat2mat([
            self.orientation.w, self.orientation.x, self.orientation.y,
            self.orientation.z
        ])
        trans_mat_robot = compose(
            np.array([self.position.x, self.position.y, self.position.z]),
            rot_mat_robot, np.ones(3))

        # Perform transformation
        pos_x, pos_y, pos_z = np.dot(trans_mat_robot, obj_vec)[:3]

        return pos_x, pos_y
示例#11
0
def img_warp(img, mask, theta_max=15, offset_max=0, scale_max=1.1, shear_max=0.1):
    """Training data augmentation with random affine transformation"""
    # Rotation
    vec = np.random.normal(0, 1, 3)
    vec /= np.sqrt(np.sum(vec ** 2))
    theta = np.random.uniform(- theta_max, theta_max, 1) * pi / 180
    R = euler.axangle2mat(vec, theta)
    
    # Scale/zoom
    sign = -1 if np.random.random() < 0.5 else 1
    Z = np.ones(3) * np.random.uniform(1, scale_max, 1) ** sign
    
    # Translation
    c_in = np.array(img.shape[1:]) // 2
    offset = np.random.uniform(- offset_max, offset_max, 3)
    T = - (c_in).dot((R * Z).T) + c_in + offset
    
    # Shear
    S = np.random.uniform(- shear_max, shear_max, 3)
    
    # Compose affine
    mat = affines.compose(T, R, Z, S)
    
    # Apply warp
    img_warped  = np.zeros_like(img)
    mask_warped = np.zeros_like(mask)
    for i in range(len(img)):
        img_warped[i,] = affine_transform(img[i,], mat, order=1) # Trilinear
    mask_warped[0,] = affine_transform(mask[0,], mat, order=0)   # Nearest neighbor
    
    return img_warped, mask_warped
示例#12
0
    def frames(self,
               sample: GloveSample,
               fixed_frame: Tuple[str, Mat4f] = None) -> Dict[str, Mat4f]:
        """Convert sensor data to link frames.

        Arguments:
            sample {GloveSample} -- data from the glove

        Keyword Arguments:
            fixed_frame {Tuple[str, Mat4f]} -- known link frame transformation (default: {None})

        Returns:
            Dict[str, Mat4f] -- dict of link frame transformation matrix (4x4) by link name
        """
        joints = self.angles(sample)
        frames = {'root': np.eye(4)}
        for link in self._links:
            rotate = euler.euler2mat(
                *np.deg2rad(np.add(link.euler, joints[link.name])), 'sxyz')
            frame = affines.compose(np.dot(rotate, [link.length, 0, 0]),
                                    rotate, [1, 1, 1])
            frames[link.name] = np.dot(frames[link.parent], frame)

        if fixed_frame is not None:
            link_name, frame = fixed_frame
            shift = np.dot(frame, np.linalg.inv(frames[link_name]))
            for link_name in frames:
                frames[link_name] = np.dot(shift, frames[link_name])

        return frames
示例#13
0
    def from_rowland(cls, rowland, width, rotation=0., kwargs={}):
        '''Generate a `Cylinder` from a `RowlandTorus`.

        According to the definition of the `marxs.design.rowland.RowlandTorus`
        the origin phi=0 is at the "top". When this class method is used to
        make a detector that catches all dispersed grating signal on the
        Rowland torus, a ``rotation=np.pi`` places the center of the Cylinder
        close to the center of the torus (the location of the focal point in
        the standard Rowland geometry).

        Parameters
        ----------
        rowland : `~marxs.design.RowlandTorus`
            The circular detector is constructed to fit exactly into the
            Rowland Circle defined by ``rowland``.
        width : float
            Half-width of the tube in the flat direction (z-axis) in mm
        rotation : float
            Rotation angle of the Cylinder around its z-axis compared to the
            phi=0 position of the Rowland torus.

        '''
        # Step 1: Rotate around z axis
        rot = axangle2mat(np.array([0, 0, 1.]), rotation)
        # Step 2: Get position and size from Rowland torus
        pos4d_circ = compose([rowland.R, 0, 0], rot,
                             [rowland.r, rowland.r, width])
        # Step 3: Transform to global coordinate system
        pos4d_circ = np.dot(rowland.pos4d, pos4d_circ)
        # Step 4: Make detector
        det_kwargs = {'pos4d': pos4d_circ}
        det_kwargs.update(kwargs)
        return cls(det_kwargs)
示例#14
0
    def apply_transformation(self, M):
        """Apply the transformation matrix to all position vectors."""

        super().apply_transformation(M)
        translation, rotation, zoom, shear = affines.decompose(M)
        transform = affines.compose(np.zeros(3), rotation, zoom, shear)[:3, :3]
        self._patch_orient = np.dot(transform, self._patch_orient)
示例#15
0
    def process_input(self):
        """
        Function to extract data from the input array.
        Input array is the output of the optimization step
        which holds the generated sparse model of the object
        and the relative scene transformations.
        """
        #get the relative scene transforamtions from input array
        self.scene_tfs = np.empty((0, 4, 4))
        for input_arr in self.input_arrays:
            num_scenes = input_arr['ref'].shape[0]
            out_ts = input_arr['scenes'][:(num_scenes) * 3].reshape(
                (num_scenes, 3))
            out_qs = input_arr['scenes'][(num_scenes) * 3:(num_scenes) *
                                         7].reshape((num_scenes, 4))
            out_tfs = np.asarray([
                tfa.compose(t, tfq.quat2mat(q), np.ones(3))
                for t, q in zip(out_ts, out_qs)
            ])
            self.scene_tfs = np.vstack((self.scene_tfs, out_tfs))

        self.object_model = []
        for obj in self.model_paths:
            #read sparse model from input array
            sparse_model = SparseModel().reader(obj)
            #read dense model from .PLY file
            dense_model = o3d.io.read_point_cloud(
                os.path.join(os.path.dirname(obj), "dense.ply"))
            self.object_model.append(
                [sparse_model, np.asarray(dense_model.points)])
        return
示例#16
0
def process(input_array, pp, off):
    ref_kpts   = input_array['ref']
    select_mat = input_array['sm']
    opt_output = input_array['res']
    num_scenes = ref_kpts.shape[0]
    num_keypts = ref_kpts.shape[2]
    print("Number of scenes: ", num_scenes)
    print("Number of keypts: ", num_keypts)

    sce_id  = 0
    vis_vec = evaluate3d.get_visibility(select_mat[:3*num_keypts,:3*num_keypts])
    obj_man = evaluate3d.get_object_manual(ref_kpts[sce_id], vis_vec)
    obj_def = evaluate3d.get_object_definition(pp, vis_vec)
    d, Z, tform = evaluate3d.procrustes(obj_def, obj_man, False)

    T = tfa.compose(tform['translation'], np.linalg.inv(tform['rotation']), np.ones(3))
    
    obj_all = evaluate3d.get_object_definition(pp, np.ones(num_keypts))
    obj_est = opt_output[(num_scenes-1)*7:].reshape((num_keypts, 3))
    err = []
    for (x, y) in zip(obj_all, obj_est):
        y = (T[:3,:3].dot(y) + T[:3,3])
        err.append(((x-y)**2).sum()**0.5)

    print("Mean error: ", sum(err)/len(err))
    if visualize:
        evaluate3d.visualize_keypoints(obj_est, obj_all, T, off, None)
    print("---")
示例#17
0
def generate_facet_uncertainty(n, xyz, angle_xyz):
    '''Generate 4d matrices that represent facet misalignment.

    Positional and rotational uncertainties are input to this function. It then
    draws randomnly from Gaussians centered on 0 (the correct position) for the displacement
    and rotation, where the :math:`\sigma` of the Gaussian is given by the numbers in the input.
    The linear displacements and angles are expressed as (4,4) matrixes suitable for use with
    homogeneous coordinates.

    Parameters
    ----------
    n : int
        Number of 4d matrixes to be calculated
    xyz : tuple of 3 floats
        accuracy of grating positioning in x, y, z (in mm) - Gaussian sigma, not FWHM!
    angle_xyz : tuple of 3 floats
        accuracy of grating positioning. Rotation around x, y, z (in rad) - Gaussian sigma, not FWHM!

    Returns
    -------
    pos_uncert : list of n (4,4) np.arrays
        Random realizations of the uncertainty
    '''
    translation = np.random.normal(
        size=(n, 3)) * np.asanyarray(xyz)[np.newaxis, :]
    rotation = np.random.normal(
        size=(n, 3)) * np.asanyarray(angle_xyz)[np.newaxis, :]
    return [
        affines.compose(t, euler.euler2mat(a[0], a[1], a[2], 'sxyz'),
                        np.ones(3)) for t, a in zip(translation, rotation)
    ]
示例#18
0
def test_change_position_after_init():
    '''Regression: In MARXS 0.1 the geometry could not be changed after init
    because a lot of stuff was pre-calculated in __init__. That should no
    longer be the case. This test is here to check that for gratings.'''
    photons = generate_test_photons(5)

    g1 = FlatGrating(d=1. / 500,
                     order_selector=OrderSelector([1]),
                     groove_angle=.3)
    g1.order_name = 'one'
    p1 = g1(photons.copy())

    pos3d = axangles.axangle2mat([1, 0, 0], .3)
    trans = np.array([0., -.3, .5])
    # Make grating at some point
    g2 = FlatGrating(d=1. / 500,
                     order_selector=OrderSelector([1]),
                     position=trans)
    # then move it so that pos and groove direction match g1
    g2.geometry.pos4d = affines.compose(np.zeros(3), pos3d, 25. * np.ones(3))
    g2.blaze_name = 'myblaze'
    p2 = g2(photons.copy())

    assert np.allclose(p1['dir'], p2['dir'])
    assert np.allclose(p1['pos'], p2['pos'])

    # Check naming of the grating output columns
    assert np.all(p1['one'] == 1)
    assert 'myblaze' in p2.colnames
示例#19
0
def moveelem(e, dx=0, dy=0, dz=0, rx=0., ry=0., rz=0.):
    '''Move and rotate a marxs element around principal axes.

    Unlike `~marxs.design.tolerancing.moveglobal` this does not work on a
    `~marxs.simulator.Parallel` object, which is a container holding other
    elements, but it moves just one specific element itself (e.g. a single
    detector).

    To make it easier to return the element to its original position,
    the original ``pos4d`` matrix of the element is stored as ``pos4d_orig``.

    Parameters
    ----------
    e :`marxs.optics.base.OpticalElement` or list of those elements
        Elements where uncertainties will be set
    dx, dy, dz : float
        translation in x, y, z (in mm)
    rx, ry, rz : float
        Rotation around x, y, z (in rad)
    '''
    if not hasattr(e.geometry, 'pos4d_orig'):
        e.geometry.pos4d_orig = e.geometry.pos4d.copy()
    move = affines.compose([dx, dy, dz],
                           euler.euler2mat(rx, ry, rz, 'sxyz'),
                           np.ones(3))
    e.geometry.pos4d = move @ e.geometry.pos4d_orig
示例#20
0
def generate_facet_uncertainty(n, xyz, angle_xyz):
    '''Generate 4d matrices that represent facet misalignment.

    Positional and rotational uncertainties are input to this function. It then
    draws randomnly from Gaussians centered on 0 (the correct position) for the displacement
    and rotation, where the :math:`\sigma` of the Gaussian is given by the numbers in the input.
    The linear displacements and angles are expressed as (4,4) matrixes suitable for use with
    homogeneous coordinates.

    Parameters
    ----------
    n : int
        Number of 4d matrixes to be calculated
    xyz : tuple of 3 floats
        accuracy of grating positioning in x, y, z (in mm) - Gaussian sigma, not FWHM!
    angle_xyz : tuple of 3 floats
        accuracy of grating positioning. Rotation around x, y, z (in rad) - Gaussian sigma, not FWHM!

    Returns
    -------
    pos_uncert : list of n (4,4) np.arrays
        Random realizations of the uncertainty
    '''
    translation = np.random.normal(size=(n, 3)) * np.asanyarray(xyz)[np.newaxis, :]
    rotation = np.random.normal(size=(n, 3)) * np.asanyarray(angle_xyz)[np.newaxis, :]
    return [affines.compose(t, euler.euler2mat(a[0], a[1], a[2], 'sxyz'), np.ones(3))
            for t, a in zip(translation, rotation)]
示例#21
0
def test_change_position_after_init():
    '''Regression: In MARXS 0.1 the geometry could not be changed after init
    because a lot of stuff was pre-calculated in __init__. That should no
    longer be the case. This test is here to check that for gratings.'''
    photons = generate_test_photons(5)

    g1 = FlatGrating(d=1./500, order_selector=OrderSelector([1]), groove_angle=.3)
    g1.order_name = 'one'
    p1 = g1(photons.copy())

    pos3d = axangles.axangle2mat([1,0,0], .3)
    trans = np.array([0., -.3, .5])
    # Make grating at some point
    g2 = FlatGrating(d=1./500, order_selector=OrderSelector([1]), position=trans)
    # then move it so that pos and groove direction match g1
    g2.geometry.pos4d = affines.compose(np.zeros(3), pos3d, 25.*np.ones(3))
    g2.blaze_name = 'myblaze'
    p2 = g2(photons.copy())

    assert np.allclose(p1['dir'], p2['dir'])
    assert np.allclose(p1['pos'], p2['pos'])

    # Check naming of the grating output columns
    assert np.all(p1['one'] == 1)
    assert 'myblaze' in p2.colnames
示例#22
0
    def from_rowland(cls, rowland, width, rotation=0., kwargs={}):
        '''Generate a `Cylinder` from a `RowlandTorus`.

        According to the definition of the `marxs.design.rowland.RowlandTorus`
        the origin phi=0 is at the "top". When this class method is used to
        make a detector that catches all dispersed grating signal on the
        Rowland torus, a ``rotation=np.pi`` places the center of the Cylinder
        close to the center of the torus (the location of the focal point in
        the standard Rowland geometry).

        Parameters
        ----------
        rowland : `~marxs.design.RowlandTorus`
            The circular detector is constructed to fit exactly into the
            Rowland Circle defined by ``rowland``.
        width : float
            Half-width of the tube in the flat direction (z-axis) in mm
        rotation : float
            Rotation angle of the Cylinder around its z-axis compared to the
            phi=0 position of the Rowland torus.

        '''
        # Step 1: Rotate around z axis
        rot = axangle2mat(np.array([0, 0, 1.]), rotation)
        # Step 2: Get position and size from Rowland torus
        pos4d_circ = compose([rowland.R, 0, 0], rot, [rowland.r, rowland.r, width])
        # Step 3: Transform to global coordinate system
        pos4d_circ = np.dot(rowland.pos4d, pos4d_circ)
        # Step 4: Make detector
        det_kwargs = {'pos4d': pos4d_circ}
        det_kwargs.update(kwargs)
        return cls(det_kwargs)
示例#23
0
 def __call__(self, element, dx=0, dy=0, dz=0, rx=0., ry=0., rz=0.):
     pos = affines.compose(
         [dx, dy, dz + self.conf['f']],
         euler.euler2mat(rx, ry, rz, 'sxyz') @ xyz2zxy[:3, :3], [
             self.conf['mirr_length'], self.conf['mirr_rout'],
             self.conf['mirr_rout']
         ])
     element.geometry.pos4d[:] = pos
    def offset_angle(self, angle):
        self.angleOffset = angle

        Rotation = euler2mat(0, angle, 0, "syxz")

        matrix = compose([0, 0, 0], Rotation, [1, 1, 1], [0, 0, 0])

        self.first.offset_apparatus(matrix)
示例#25
0
    def get_projection(self, inputs, tar_cam_pose):
        """
        Function to get corresponding pixel location in an image given pixel location in another image.
        Uses camera_intrinsics and depth images to first get 3D positions of pixels in target frame,
        then projects to another image using cv2.projectPoints().
        Returns: (Nx2) NumPy array of N keypoints' 2D pixel coordinates.
        Input arguments:
        inputs - list of tuples of pixel coordinates, associated depth images and associated camera poses
        """
        if len(inputs) == 0:
            return []
        target_frame = tfa.compose(
            np.array(tar_cam_pose[:3]),
            tfq.quat2mat(np.array([tar_cam_pose[-1]] + tar_cam_pose[3:-1])),
            np.ones(3))
        point_positions = []
        for (pt, depth, pose) in inputs:
            if not isinstance(depth, np.ndarray) or not isinstance(pose, list):
                continue
            pt3d_z = (depth[pt[1], pt[0]]) * (1.0 / self.scale)
            if pt == [-1, -1] or pt3d_z == 0:
                continue
            pt3d_x = (pt[0] - self.camera_intrinsics[2]) * (
                pt3d_z / self.camera_intrinsics[0])
            pt3d_y = (pt[1] - self.camera_intrinsics[3]) * (
                pt3d_z / self.camera_intrinsics[1])
            position = [pt3d_x, pt3d_y, pt3d_z]
            source_frame = tfa.compose(
                np.array(pose[:3]),
                tfq.quat2mat(np.array([pose[-1]] + pose[3:-1])), np.ones(3))
            relative_tf = np.linalg.inv(target_frame).dot(source_frame)
            position_tf = relative_tf[:3, :3].dot(position) + relative_tf[:3,
                                                                          3]
            point_positions.append(position_tf)

        #project 3D points to 2D image plane
        rvec = cv2.Rodrigues(np.eye(3))[0]
        tvec = np.zeros(3)
        try:
            keypoint_pixels = cv2.projectPoints(np.array(point_positions),
                                                rvec, tvec, self.camera_matrix,
                                                None)[0]
            return keypoint_pixels.transpose(1, 0, 2)[0]
        except Exception as e:
            return np.array([])
示例#26
0
def test_rand_de_compose():
    # random matrices
    for i in range(50):
        M = np.random.normal(size=(4,4))
        M[-1] = [0, 0, 0, 1]
        for func in decompose, decompose44:
            T, R, Z, S = func(M)
            M2 = compose(T, R, Z, S)
            assert_array_almost_equal(M, M2)
示例#27
0
    def __call__(self, sample):
        # image: numpy array, (D,H,W)
        # label: integer, 0,1,..
        image = sample['image']
        label = sample['label']
        # get transform coordinate
        img_size = image.shape
        coords0, coords1, coords2 = np.mgrid[:img_size[0], :img_size[1], :
                                             img_size[2]]
        coords = np.array([
            coords0 - img_size[0] / 2, coords1 - img_size[1] / 2,
            coords2 - img_size[2] / 2
        ])
        tform_coords = np.append(coords.reshape(3, -1),
                                 np.ones((1, np.prod(img_size))),
                                 axis=0)
        # transform configuration
        # translation
        if 't' in self.mode:
            translation = [
                0, np.random.uniform(-5, 5),
                np.random.uniform(-5, 5)
            ]
        else:
            translation = [0, 0, 0]

        # rotation
        if 'r' in self.mode:
            rotation = euler2mat(
                np.random.uniform(-5, 5) / 180.0 * np.pi, 0, 0, 'sxyz')
        else:
            rotation = euler2mat(0, 0, 0, 'sxyz')

        # zoom
        if 'z' in self.mode:
            zoom = [
                1, np.random.uniform(0.9, 1.1),
                np.random.uniform(0.9, 1.1)
            ]
        else:
            zoom = [1, 1, 1]

        # compose
        warp_mat = compose(translation, rotation, zoom)

        # transform
        w = np.dot(warp_mat, tform_coords)
        w[0] = w[0] + img_size[0] / 2
        w[1] = w[1] + img_size[1] / 2
        w[2] = w[2] + img_size[2] / 2
        warp_coords = w[0:3].reshape(3, img_size[0], img_size[1], img_size[2])

        image = warp(image, warp_coords)

        new_sample = {'image': image, 'label': label}

        return new_sample
	def move_first(self, displacement = [0,0,0]):

		currentPosition, currentRotation, currentZoom, currentShear = decompose(self.first.pos4d)

		position = np.array(currentPosition) + np.array(displacement)

		matrix = compose( position, currentRotation, [1,1,1], [0,0,0])

		self.first.offset_apparatus(matrix)
	def defaultDetector(self, detectorDistance):
		self.defaultDetectorOrientation = euler2mat(0, 0 , -np.pi/2, 'syxz')
		self.defaultDetectorOrientation = np.dot(euler2mat(np.pi/2, 0, 0, 'syxz'), self.defaultDetectorOrientation)
		self.defaultDetectorPosition = np.array([0, detectorDistance, 0])
		self.defaultDetectorPos4d = compose(self.defaultDetectorPosition, self.defaultDetectorOrientation, [1, 12.288*3, 12.288*3], np.zeros(3))
		# True zoom is [1,12.288,12.288]
		detector = FlatDetector(pixsize=24.576e-3, pos4d = self.defaultDetectorPos4d)

		return detector
示例#30
0
def test_random_mat():
    '''Compare multiplication of trans and zoom matrixes with transfomrs3d.

    This test ensures that the same conventions are used for ordering.
    '''
    rot = np.eye(3)
    trans = np.random.rand(3)
    zoom = np.random.rand(3)
    assert np.allclose(np.dot(utils.translation2aff(trans), np.dot(utils.mat2aff(rot), utils.zoom2aff(zoom))),
                       compose(trans, rot, zoom))
    def defaultDetector(self, detectorDistance):
        self.defaultDetectorOrientation = euler2mat(0, 0, -np.pi / 2, 'syxz')
        self.defaultDetectorPosition = np.array([0, detectorDistance, 0])
        self.defaultDetectorPos4d = compose(self.defaultDetectorPosition,
                                            self.defaultDetectorOrientation,
                                            [1, 12.288, 12.288], np.zeros(3))
        detector = FlatDetector(pixsize=24.576e-3,
                                pos4d=self.defaultDetectorPos4d)

        return detector
示例#32
0
    def generate_labels(self):
        """
        Main function to generate labels for RGB images according to provided input array.
        Returns a list of samples where each sample is tuple of the RGB image and the
        associated label, where each label is a tuple of the keypoints, center and scale.
        """
        samples = []
        idx = 2

        #read the names of image frames in this scene
        with open(os.path.join(self.dataset_path, 'associations.txt'),
                  'r') as file:
            img_name_list = file.readlines()

        #read the camera pose corresponding to each frame
        with open(os.path.join(self.dataset_path, 'camera.poses'),
                  'r') as file:
            cam_pose_list = [
                list(map(float,
                         line.split()[1:])) for line in file.readlines()
            ]

        #generate labels only for a fraction of total images in scene
        zipped_list = list(zip(img_name_list, cam_pose_list))[::self.ratio]
        for im_path, cam_pose in zipped_list:
            label = []
            for idx in range(len(self.scene_tfs)):
                scene_tf = self.scene_tfs[idx]
                obj_model = self.object_model[idx]
                #read the RGB images using opencv
                img_name = im_path.split()
                rgb_im_path = os.path.join(self.dataset_path, img_name[3])
                input_rgb_image = cv2.resize(cv2.imread(rgb_im_path),
                                             (self.width, self.height))
                #compose 4x4 camera pose matrix
                cam_t = tfa.compose(
                    np.asarray(cam_pose[:3]),
                    tfq.quat2mat(np.asarray([cam_pose[-1]] + cam_pose[3:-1])),
                    np.ones(3))
                #get 2D positions of keypoints, centers and scale of bounding box
                label.append(
                    self.project_points(obj_model,
                                        np.dot(np.linalg.inv(cam_t),
                                               scene_tf)))
                #samples.append((input_rgb_image, label))

            #visualize if required
            colors = [(100, 0, 0), (0, 150, 0), (0, 0, 150)]
            if self.visualize:
                for l, color in zip(label, colors):
                    img = self.visualize_sample((input_rgb_image, l), color)
                cv2.imshow('window', img)
                cv2.waitKey(10)

        return samples
示例#33
0
    def generate_labels(self):
        """
        Main function to generate labels for RGB images according to provided input array.
        Returns a list of samples where each sample is tuple of the RGB image and the
        associated label, where each label is a tuple of the keypoints, center and scale.
        """
        samples = []
        #iterate through a zip of list of scene dirs and the relative scene tfs
        for data_dir_idx, (cur_scene_dir, sce_t) in enumerate(
                zip(self.list_of_scene_dirs, self.scene_tfs)):

            #read the names of image frames in this scene
            with open(
                    os.path.join(self.dataset_path, cur_scene_dir,
                                 'associations.txt'), 'r') as file:
                img_name_list = file.readlines()

            #read the camera pose corresponding to each frame
            with open(
                    os.path.join(self.dataset_path, cur_scene_dir,
                                 'camera.poses'), 'r') as file:
                cam_pose_list = [
                    list(map(float,
                             line.split()[1:])) for line in file.readlines()
                ]

            #generate labels only for a fraction of total images in scene
            zipped_list = list(zip(img_name_list, cam_pose_list))[::self.ratio]
            for img_name, cam_pose in zipped_list:
                #read the RGB images using opencv
                img_name = img_name.split()
                rgb_im_path = os.path.join(self.dataset_path, cur_scene_dir,
                                           img_name[3])
                input_rgb_image = cv2.resize(cv2.imread(rgb_im_path),
                                             (self.width, self.height))
                #compose 4x4 camera pose matrix
                cam_t = tfa.compose(
                    np.asarray(cam_pose[:3]),
                    tfq.quat2mat(np.asarray([cam_pose[-1]] + cam_pose[3:-1])),
                    np.ones(3))
                #get 2D positions of keypoints, centers and scale of bounding box
                label = self.project_points(
                    self.object_model, np.dot(np.linalg.inv(cam_t), sce_t))
                #label = self.project_points(self.object_model, np.dot(np.linalg.inv(sce_t), cam_t))
                samples.append((input_rgb_image, label))

                #visualize if required
                if self.visualize:
                    self.visualize_sample((input_rgb_image, label))

            print(
                "Created {} labeled samples from dataset {} (with {} raw samples)."
                .format(len(zipped_list), data_dir_idx, len(img_name_list)))
        return samples
示例#34
0
 def init_aper(self, conf):
     x = conf['aper_rin'] - conf['mirr_rout']
     apers = []
     for chan in conf['channels']:
         for rot in [0, np.pi]:
             rot = conf['rotchan'][chan] @ euler2aff(rot, 0, 0, 'szyz')
             pos = affines.compose([0, x, conf['aper_z']],
                                   euler2mat(0, -np.pi / 2, 0, 'sxyz'),
                                   conf['aper_zoom'])
             apers.append(RectangleAperture(pos4d=rot @ pos))
     return optics.MultiAperture(elements=apers)
	def offset_angle(self, angle, axis = [1,0,0]):
		self.angleOffset = angle

		axis = np.array(axis)

		Rotation = axangle2mat(axis, angle)

		currentPosition, currentRotation, currentZoom, currentShear = decompose(self.first.pos4d)

		matrix = compose( currentPosition ,Rotation,[1,1,1],[0,0,0])

		self.first.offset_apparatus(matrix)
示例#36
0
    def get_pixel_errors(self):
        """
        Function to compute 2D distance error between the estimated 2D keypoints and
        projections of ground-truth 3D keypoints.
        Returns a list of scene-wise errors.
        """
        scene_err_list = []
        true_poses = self.get_true_poses()
        for idx, (cur_scene_dir, sce_t) in enumerate(
                zip(self.list_of_scene_dirs, self.scene_tfs)):
            err = []
            #read the names of image frames in this scene
            with open(
                    os.path.join(self.dataset_path, cur_scene_dir,
                                 'associations.txt'), 'r') as file:
                img_name_list = file.readlines()

            #read the camera pose corresponding to each frame
            with open(
                    os.path.join(self.dataset_path, cur_scene_dir,
                                 'camera.poses'), 'r') as file:
                cam_pose_list = [
                    list(map(float,
                             line.split()[1:])) for line in file.readlines()
                ]

            for img_name, cam_pose in zip(img_name_list, cam_pose_list):
                #read the RGB images using opencv
                img_name = img_name.split()
                rgb_im_path = os.path.join(self.dataset_path, cur_scene_dir,
                                           img_name[3])
                input_rgb_image = cv2.resize(cv2.imread(rgb_im_path),
                                             (self.width, self.height))
                #compose 4x4 camera pose matrix
                cam_t = tfa.compose(
                    np.asarray(cam_pose[:3]),
                    tfq.quat2mat(np.asarray([cam_pose[-1]] + cam_pose[3:-1])),
                    np.ones(3))
                #get estimated 2D positions of keypoints
                estpts, _, _ = self.project_points(
                    self.object_model, np.dot(np.linalg.inv(cam_t), sce_t))
                #get the ground truth 2D positions of keypoints
                trupts, _, _ = self.project_points(true_poses[idx],
                                                   np.linalg.inv(cam_t))

                #calculate the error distance
                err.extend([((x - y)**2).sum()**0.5
                            for (x, y) in zip(trupts, estpts)])
                #visualize if required
                if self.visualize:
                    self.visualize_joint(input_rgb_image, estpts, trupts)
            scene_err_list.append(err)
        return scene_err_list
示例#37
0
    def define_grasp_point(self, ply_path):
        """
        Function to define grasp pose.
        """
        # create output dir if not exists
        if not os.path.isdir(self.output_dir):
            os.makedirs(self.output_dir)

        if (self.scene_kpts.shape) != (1, 3, 2):
            raise Exception("2 3D points must exist to define a grasp pose.")
        if not os.path.exists(ply_path):
            raise Exception("%s path does not exist" % ply_path)
        #get 3 user-defined 3D points
        point_1 = self.scene_kpts.transpose(0, 2, 1)[0, 0]
        point_2 = self.scene_kpts.transpose(0, 2, 1)[0, 1]
        #read point cloud and get normals
        scene_cloud = o3d.io.read_point_cloud(ply_path)
        scene_cloud.estimate_normals()
        scene_cloud.normalize_normals()
        scene_points = scene_cloud.points
        scene_normals = scene_cloud.normals
        scene_tree = o3d.geometry.KDTreeFlann(scene_cloud)
        #get neightbors and find normal direction
        [_, idx, _] = scene_tree.search_knn_vector_3d(point_1, 200)
        normals = np.asarray(scene_normals)[list(idx)]
        normal_dir = normals.mean(0)
        normal_dir = normal_dir / np.linalg.norm(normal_dir)
        #get intersection of point and plane
        # d = (a.x_0 + b.y_0 + c.z_0)/(a.x_u + b.y_u + c.z_u)
        lamda = np.dot(normal_dir, point_1) / np.dot(normal_dir, point_2)
        point_i = lamda * point_2
        vector_x = (point_i - point_1)
        vector_y = np.cross(normal_dir, vector_x)
        #normalize
        vector_x = vector_x / np.linalg.norm(vector_x)
        vector_y = vector_y / np.linalg.norm(vector_y)
        #create rotation matrix
        rot_mat = np.array([vector_x, vector_y, normal_dir])
        tf_mat = tfa.compose(point_1, rot_mat, np.ones(3))

        #save the grasp point
        SparseModel().grasp_writer(
            tf_mat, os.path.join(self.output_dir, "sparse_model.txt"))

        #visualize in open3d
        vis_mesh_list = []
        vis_mesh_list.append(scene_cloud)
        coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
            0.2)
        coordinate_frame.transform(tf_mat)
        vis_mesh_list.append(coordinate_frame)
        o3d.visualization.draw_geometries(vis_mesh_list)
        return True, tf_mat
示例#38
0
def test_random_mat():
    '''Compare multiplication of trans and zoom matrixes with transfomrs3d.

    This test ensures that the same conventions are used for ordering.
    '''
    rot = np.eye(3)
    trans = np.random.rand(3)
    zoom = np.random.rand(3)
    assert np.allclose(
        np.dot(utils.translation2aff(trans),
               np.dot(utils.mat2aff(rot), utils.zoom2aff(zoom))),
        compose(trans, rot, zoom))
示例#39
0
def _parse_position_keywords(kwargs):
    '''Parse keywords to define position.

    If ``pos4d`` is given, use that, otherwise look for ``position``, ``zoom``
    and ``orientation``.

    Parameters
    ----------
    pos4d : 4x4 array
        Transformation to bring an element from the default position
        (see description of individual elements) to a certain point in
        the space of homogeneous coordinates.
    position : 3-d vector in real space
        Measured from the origin of the spacecraft coordinate system.
    orientation : Rotation matrix or ``None``
        Relative orientation of the base vectors of this optical
        element relative to the orientation of the coordinate system
        of the spacecraft. The default is no rotation (i.e. the axes of both
        coordinate systems are parallel).
    zoom : float or 3-d vector
        Scale the size of an optical element in each dimension by
        ``zoom``. If ``zoom`` is a scalar, the same scale is applied
        in all dimesions. This only affects the oter dimensions of the
        optical element, not internal scales like the pixel size or
        grating constant (if defined for the optical element in
        question).  '''
    pos4d = kwargs.pop('pos4d', None)
    if pos4d is None:
        position = kwargs.pop('position', np.zeros(3))
        orientation = kwargs.pop('orientation', np.eye(3))
        zoom = kwargs.pop('zoom', 1.)
        if np.isscalar(zoom):
            zoom = np.ones(3) * zoom
        if not len(zoom) == 3:
            raise ValueError(
                'zoom must have three elements for x,y,z or be a scalar (global zoom).'
            )
        if np.any(np.array(zoom) <= 0):
            raise ValueError(
                'All values in zoom must be positive-definite to keep pos4d matrix valid. Specify zero-thickness elements with display properties.'
            )
        pos4d = affines.compose(position, orientation, zoom)
    else:
        if ('position' in kwargs) or ('orientation' in kwargs) or ('zoom'
                                                                   in kwargs):
            raise ValueError(
                'If pos4d is specificed, the following keywords cannot be given at the same time: position, orientation, zoom.'
            )

    if np.linalg.det(pos4d) == 0:
        raise ValueError('pos4d matrix is invalid (determinant is 0).')
    return pos4d
示例#40
0
    def pos_spec(self):
        '''Calculate pos4d matrices for each element on the boom'''

        fullboompos4d = []
        for h in range(self.start_bay, self.n_bays):
            trans = [h * self.l_longeron, 0, 0]
            for i in range(self.n_sides):
                rot = euler2mat((i * 2) * np.pi / self.n_sides, 0, 0, 'sxyz')
                affmat = compose(trans, rot, np.ones(3))
                for p in self.one_window():
                    fullboompos4d.append(np.dot(affmat, p))

        return fullboompos4d
示例#41
0
文件: boom.py 项目: hamogu/ARCUS
    def pos_spec(self):
        '''Calculate pos4d matrices for each element on the boom'''

        fullboompos4d = []
        for h in range(self.start_bay, self.n_bays):
            trans = [h * self.l_longeron, 0, 0]
            for i in range(self.n_sides):
                rot = euler2mat((i * 2) * np.pi / self.n_sides, 0, 0, 'sxyz')
                affmat = compose(trans, rot, np.ones(3))
                for p in self.one_window():
                    fullboompos4d.append(np.dot(affmat, p))

        return fullboompos4d
示例#42
0
def pose2mat(pos, rot_euler, axes="sxyz"):
    """
    Convert vector of position and euler angle to numpy matrix
    :param pos: np.array [x, y, z]
    :param rot_euler: np.array [rot_x, rot_y, rot_z]
    :param axes: corresponding relation between rotation angles and axis
    :return: 4x4 transformation matrix in np.mat
    """
    t = np.array(pos)
    r = euler2mat(rot_euler[0], rot_euler[1], rot_euler[2], axes=axes)
    z = np.ones(3)
    pose_mat = compose(t, r, z)
    return np.mat(pose_mat)
	def __init__(self, firstMirrorREFL = './mirror_files/A12113.txt', firstMirrorPOL = './mirror_files/ALSPolarization.txt', secondMirrorREFL= './mirror_files/A12113.txt', secondMirrorPOL = './mirror_files/ALSPolarization.txt'):

		self.first = SourceMLMirror(firstMirrorREFL, firstMirrorPOL)
		self.second = MLMirrorDetector(secondMirrorREFL, secondMirrorPOL)


		self.distanceBetweenHalves = 500
		self.angleOffset = 0

		Rotation = euler2mat(0,0,0,'syxz')
		FirstHalfPlacement = compose([-self.distanceBetweenHalves,0,0], Rotation, [1,1,1], [0,0,0]) #This will move the first half away from the second by 50 mm

		self.first.offset_apparatus(FirstHalfPlacement)
示例#44
0
def test_de_compose():
    # Make a series of translations etc, compose and decompose
    for trans in permutations([10,20,30]):
        for rots in permutations([0.2,0.3,0.4]):
            for zooms in permutations([1.1,1.9,2.3]):
                for shears in permutations([0.01, 0.04, 0.09]):
                    Rmat = euler2mat(*rots)
                    M = compose(trans, Rmat, zooms, shears)
                    for func in decompose, decompose44:
                        T, R, Z, S = func(M)
                        assert (
                            np.allclose(trans, T) and
                            np.allclose(Rmat, R) and
                            np.allclose(zooms, Z) and
                            np.allclose(shears, S))
示例#45
0
def moveglobal(e, dx=0, dy=0, dz=0, rx=0., ry=0., rz=0.):
    '''Move and rotate origin of the whole `marxs.simulator.Parallel` object.

    Parameters
    ----------
    e :`marxs.simulator.Parallel` or list of those elements
        Elements where uncertainties will be set
    dx, dy, dz : float
        translation in x, y, z (in mm)
    rx, ry, rz : float
        Rotation around x, y, z (in rad)
    '''
    e.uncertainty = affines.compose([dx, dy, dz],
                                    euler.euler2mat(rx, ry, rz, 'sxyz'),
                                    np.ones(3))
    e.generate_elements()
	def defaultMirror(self,reflFile, testedPolarization):
		#mirrorData Defaults - reference files
		self.reflFile = reflFile
		self.testedPolarization = testedPolarization

		#mirror Defauls
		self.defaultMirrorOrientation = euler2mat(-np.pi/4, 0, 0, 'syxz')
		self.defaultMirrorOrientation = np.dot(euler2mat(0,-np.pi/2,0,'syxz'),self.defaultMirrorOrientation)


		self.defaultMirrorPosition = np.array([0,0,0])

		self.defaultMirrorPos4d = compose(self.defaultMirrorPosition, self.defaultMirrorOrientation, np.array([1, 24.5, 12]), np.zeros(3))

		mirror = MultiLayerMirror(self.reflFile, self.testedPolarization,
        pos4d = self.defaultMirrorPos4d); return mirror
示例#47
0
文件: detector.py 项目: hamogu/marxs
    def from_rowland(cls, rowland, width):
        '''Generate a `CircularDetector` from a `RowlandTorus`.

        Parameters
        ----------
        rowland : `~marxs.design.RowlandTorus`
            The circular detector is constructed to fit exactly into the
            Rowland Circle defined by ``rowland``.
        width : float
            Half-width of the tube in the flat direction (z-axis) in mm
        '''
        # Step 1: Get position and size from Rowland torus
        pos4d_circ = compose([rowland.R, 0, 0], np.eye(3), [rowland.r, rowland.r, width])
        # Step 2: Transform to global coordinate system
        pos4d_circ = np.dot(rowland.pos4d, pos4d_circ)
        # Step 3: Make detector
        return cls(pos4d=pos4d_circ, phi_offset=-np.pi)
示例#48
0
文件: base.py 项目: jhfrost314/marxs
def _parse_position_keywords(kwargs):
    '''Parse keywords to define position.

    If pos4d is given, use that, otherwise look for ``position``, ``zoom``
    and ``orientation``.
    '''
    pos4d = kwargs.pop('pos4d', None)
    if pos4d is None:
        position = kwargs.pop('position', np.zeros(3))
        orientation = kwargs.pop('orientation', np.eye(3))
        zoom = kwargs.pop('zoom', 1.)
        if np.isscalar(zoom):
            zoom = np.ones(3) * zoom
        if not len(zoom) == 3:
            raise ValueError('zoom must have three elements for x,y,z or be a scalar (global zoom).')
        pos4d = affines.compose(position, orientation, zoom)
    else:
        if ('position' in kwargs) or ('orientation' in kwargs) or ('zoom' in kwargs):
            raise ValueError('If pos4d is specificed, the following keywords cannot be given at the same time: position, orientation, zoom.')
    return pos4d
示例#49
0
def moveindividual(e, dx=0, dy=0, dz=0, rx=0, ry=0, rz=0):
    '''Move and rotate all elements of `marxs.simulator.Parallel` object.

    Unlike `~marxs.design.tolerancing.moveglobal` this does not move to
    center of the `~marxs.simulator.Parallel` object, instead it moves
    all elements individually. Unlike `~marxs.design.tolerancing.wiggle`,
    each element is moved by the same amount, not a number drawn from from
    distribution.

    Parameters
    ----------
    e :`marxs.simulator.Parallel` or list of those elements
        Elements where uncertainties will be set
    dx, dy, dz : float
        translation in x, y, z (in mm)
    rx, ry, rz : float
        Rotation around x, y, z (in rad)
    '''
    e.elem_uncertainty = [affines.compose((dx, dy, dz),
                                          euler.euler2mat(rx, ry, rz, 'sxyz'),
                                          np.ones(3))] * len(e.elements)
    e.generate_elements()
	def run(self, exposureTime = 1000, IntermediateDetector=False):


		# Generating photons that travel down the beamline
		print "Generating Cross Photons..."
		cross = self.first.generate_photons(exposureTime)

		if IntermediateDetector:
			# place large detector midwat between the mirrors

			# correct Location
			mirrorPosition = np.dot(self.first.pos4d, self.first.mirror.geometry['center'])
			detectorLocation = mirrorPosition/2
			detectorLocation = detectorLocation[0:3]

			# Correct Orientation
			detectorRotation = euler2mat(0,-np.pi/2,0,'syxz')

			# Combine
			detectorPos4d = compose(detectorLocation, detectorRotation, [1, 3*12.288,3*12.288], np.zeros(3))

			detector = FlatDetector(pixsize=24.576e-3, pos4d = detectorPos4d)

			self.intermediateResults = detector.process_photons(cross)

			self.intermediateResults = self.intermediateResults[self.intermediateResults['probability']>0]




		# Receive Photons on other side
		print "Receiving Photons..."
		self.results = self.second.detect_photons(cross)


		if IntermediateDetector:
			return self.results, self.intermediateResults
		else:
			return self.results
示例#51
0
文件: base.py 项目: bsipocz/marxs
def _parse_position_keywords(kwargs):
    '''Parse keywords to define position.

    If ``pos4d`` is given, use that, otherwise look for ``position``, ``zoom``
    and ``orientation``.

    Parameters
    ----------
    pos4d : 4x4 array
        Transformation to bring an element from the default position (see description of
        individual elements) to a certain point in the space of homogeneous coordinates.
    position : 3-d vector in real space
        Measured from the origin of the spacecraft coordinate system.
    orientation : Rotation matrix or ``None``
        Relative orientation of the base vectors of this optical
        element relative to the orientation of the coordinate system
        of the spacecraft. The default is no rotation (i.e. the axes of both
        coordinate systems are parallel).
    zoom : float or 3-d vector
        Scale the size of an optical element in each dimension by ``zoom``. If ``zoom`` is a scalar,
        the same scale is applied in all dimesions. This only affects the oter dimensions of
        the optical element, not internal scales like the pixel size or grating constant (if
        defined for the optical element in question).
    '''
    pos4d = kwargs.pop('pos4d', None)
    if pos4d is None:
        position = kwargs.pop('position', np.zeros(3))
        orientation = kwargs.pop('orientation', np.eye(3))
        zoom = kwargs.pop('zoom', 1.)
        if np.isscalar(zoom):
            zoom = np.ones(3) * zoom
        if not len(zoom) == 3:
            raise ValueError('zoom must have three elements for x,y,z or be a scalar (global zoom).')
        pos4d = affines.compose(position, orientation, zoom)
    else:
        if ('position' in kwargs) or ('orientation' in kwargs) or ('zoom' in kwargs):
            raise ValueError('If pos4d is specificed, the following keywords cannot be given at the same time: position, orientation, zoom.')
    return pos4d
示例#52
0
文件: spo.py 项目: hamogu/ARCUS
inplanescatter = 10. / 2.3545 / 3600 / 180. * np.pi
perpplanescatter = 1.5 / 2.345 / 3600. / 180. * np.pi

focallength = 12000.


spogeom = load_table('spos', 'petallayout')
spogeom['r_mid'] = (spogeom['outer_radius'] + spogeom['inner_radius']) / 2
spo_pos4d = []
# Convert angle to quantity here to make sure that unit is taken into account
for row, ang in zip(spogeom, u.Quantity(spogeom['clocking_angle']).to(u.rad).value):
    spo_pos4d.append(compose([0,  # focallength,  # - spogeom[i]['d_from_12m']
                              row['r_mid'] * np.sin(ang),
                              row['r_mid'] * np.cos(ang)],
                             euler2mat(-ang, 0., 0.),
                             # In detail this should be (primary_length + gap + secondary_length) / 2
                             # but the gap is somewhat complicated and this is only used
                             # for display, we'll ignore that for now.
                             [row['primary_length'],
                              row['azwidth'] / 2.,
                              (row['outer_radius'] - row['inner_radius']) / 2.]))

spo_pos4d = [np.dot(xyz2zxy, s) for s in spo_pos4d]

reflectivity = load_table2d('spos', 'reflectivity')
reflectivity_interpolator = RectBivariateSpline(reflectivity[1].to(u.keV),
                                                reflectivity[2].to(u.rad),
                                                reflectivity[3][0])


class PerfectLensSegment(PerfectLens):
    def __init__(self, **kwargs):