示例#1
0
文件: fitting.py 项目: ddale/hexrd
def objFuncSX(pFit, pFull, pFlag, dFunc, dFlag,
              xyo_det, hkls_idx, bMat, vInv, wavelength,
              bVec, eVec, omePeriod,
              simOnly=False, returnScalarValue=returnScalarValue):
    """
    """
    npts   = len(xyo_det)

    refineFlag = np.hstack([pFlag, dFlag])

    # pFull[refineFlag] = pFit/scl[refineFlag]
    pFull[refineFlag] = pFit

    dParams = pFull[-len(dFlag):]
    xy_unwarped = dFunc(xyo_det[:, :2], dParams)

    # detector quantities
    rMat_d = xf.makeDetectorRotMat(pFull[:3])
    tVec_d = pFull[3:6].reshape(3, 1)

    # sample quantities
    chi    = pFull[6]
    tVec_s = pFull[7:10].reshape(3, 1)

    # crystal quantities
    rMat_c = xf.makeRotMatOfExpMap(pFull[10:13])
    tVec_c = pFull[13:16].reshape(3, 1)

    gVec_c = np.dot(bMat, hkls_idx)
    vMat_s = mutil.vecMVToSymm(vInv)                # stretch tensor comp matrix from MV notation in SAMPLE frame
    gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c)) # reciprocal lattice vectors in SAMPLE frame
    gHat_s = mutil.unitVector(gVec_s)               # unit reciprocal lattice vectors in SAMPLE frame
    gHat_c = np.dot(rMat_c.T, gHat_s)               # unit reciprocal lattice vectors in CRYSTAL frame

    match_omes, calc_omes = matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength,
                                        vInv=vInv, beamVec=bVec, etaVec=eVec, omePeriod=omePeriod)

    calc_xy = np.zeros((npts, 2))
    for i in range(npts):
        rMat_s = xfcapi.makeOscillRotMat([chi, calc_omes[i]])
        calc_xy[i, :] = xfcapi.gvecToDetectorXY(gHat_c[:, i],
                                                rMat_d, rMat_s, rMat_c,
                                                tVec_d, tVec_s, tVec_c,
                                                beamVec=bVec).flatten()
        pass
    if np.any(np.isnan(calc_xy)):
        print "infeasible pFull: may want to scale back finite difference step size"

    # return values
    if simOnly:
        retval = np.hstack([calc_xy, calc_omes.reshape(npts, 1)])
    else:
        diff_vecs_xy = calc_xy - xy_unwarped[:, :2]
        diff_ome     = xf.angularDifference( calc_omes, xyo_det[:, 2] )
        retval = np.hstack([diff_vecs_xy,
                            diff_ome.reshape(npts, 1)
                            ]).flatten()
        if returnScalarValue:
            retval = sum( retval )
    return retval
示例#2
0
文件: fitting.py 项目: lind9/hexrd
def objFuncSX(pFit, pFull, pFlag, dFunc, dFlag,
              xyo_det, hkls_idx, bMat, vInv, wavelength,
              bVec, eVec, omePeriod,
              simOnly=False, returnScalarValue=returnScalarValue):
    """
    """
    npts   = len(xyo_det)

    refineFlag = np.hstack([pFlag, dFlag])

    # pFull[refineFlag] = pFit/scl[refineFlag]
    pFull[refineFlag] = pFit

    dParams = pFull[-len(dFlag):]
    xy_unwarped = dFunc(xyo_det[:, :2], dParams)

    # detector quantities
    rMat_d = xf.makeDetectorRotMat(pFull[:3])
    tVec_d = pFull[3:6].reshape(3, 1)

    # sample quantities
    chi    = pFull[6]
    tVec_s = pFull[7:10].reshape(3, 1)

    # crystal quantities
    rMat_c = xf.makeRotMatOfExpMap(pFull[10:13])
    tVec_c = pFull[13:16].reshape(3, 1)

    gVec_c = np.dot(bMat, hkls_idx)
    vMat_s = mutil.vecMVToSymm(vInv)                # stretch tensor comp matrix from MV notation in SAMPLE frame
    gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c)) # reciprocal lattice vectors in SAMPLE frame
    gHat_s = mutil.unitVector(gVec_s)               # unit reciprocal lattice vectors in SAMPLE frame
    gHat_c = np.dot(rMat_c.T, gHat_s)               # unit reciprocal lattice vectors in CRYSTAL frame

    match_omes, calc_omes = matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength,
                                        vInv=vInv, beamVec=bVec, etaVec=eVec, omePeriod=omePeriod)

    calc_xy = np.zeros((npts, 2))
    for i in range(npts):
        rMat_s = xfcapi.makeOscillRotMat([chi, calc_omes[i]])
        calc_xy[i, :] = xfcapi.gvecToDetectorXY(gHat_c[:, i],
                                                rMat_d, rMat_s, rMat_c,
                                                tVec_d, tVec_s, tVec_c,
                                                beamVec=bVec).flatten()
        pass
    if np.any(np.isnan(calc_xy)):
        print "infeasible pFull: may want to scale back finite difference step size"

    # return values
    if simOnly:
        retval = np.hstack([calc_xy, calc_omes.reshape(npts, 1)])
    else:
        diff_vecs_xy = calc_xy - xy_unwarped[:, :2]
        diff_ome     = xf.angularDifference( calc_omes, xyo_det[:, 2] )
        retval = np.hstack([diff_vecs_xy,
                            diff_ome.reshape(npts, 1)
                            ]).flatten()
        if returnScalarValue:
            retval = sum( retval )
    return retval
示例#3
0
 def xy_of_angs(self, angs):
     """
     Cartesion coordinates of vstacked (tth, eta) pairs
     *) wrapper for transforms.anglesToGVec
     """
     gVec_l = xf.anglesToGVec(angs, self.bVec, self.eVec,
                              rMat_s=None, rMat_c=None)
     xy = xf.gvecToDetectorXY(gVec_l,
                              self.rMat, I3, I3, 
                              self.tVec, self.tVec_s, self.tVec_c,
                              distortion=self.distortion,
                              beamVec=self.bVec, 
                              etaVec=self.eVec)
     return xy
示例#4
0
    def angles_to_cart(self, tth_eta):
        """
        TODO: distortion
        """
        rmat_s = rmat_c = ct.identity_3x3
        tvec_s = tvec_c = ct.zeros_3

        angs = np.hstack([tth_eta, np.zeros((len(tth_eta), 1))])

        xy_det = gvecToDetectorXY(
            anglesToGVec(angs, bHat_l=self.bvec, eHat_l=self.evec),
            self.rmat, rmat_s, rmat_c,
            self.tvec, tvec_s, tvec_c,
            beamVec=self.bvec)
        return xy_det
示例#5
0
    def angles_to_cart(self, tth_eta):
        """
        TODO: distortion
        """
        rmat_s = rmat_c = ct.identity_3x3
        tvec_s = tvec_c = ct.zeros_3

        angs = np.hstack([tth_eta, np.zeros((len(tth_eta), 1))])

        xy_det = gvecToDetectorXY(anglesToGVec(angs,
                                               bHat_l=self.bvec,
                                               eHat_l=self.evec),
                                  self.rmat,
                                  rmat_s,
                                  rmat_c,
                                  self.tvec,
                                  tvec_s,
                                  tvec_c,
                                  beamVec=self.bvec)
        return xy_det
示例#6
0
 def xy_of_angs(self, angs):
     """
     Cartesion coordinates of vstacked (tth, eta) pairs
     *) wrapper for transforms.anglesToGVec
     """
     gVec_l = xf.anglesToGVec(angs,
                              self.bVec,
                              self.eVec,
                              rMat_s=None,
                              rMat_c=None)
     xy = xf.gvecToDetectorXY(gVec_l,
                              self.rMat,
                              I3,
                              I3,
                              self.tVec,
                              self.tVec_s,
                              self.tVec_c,
                              distortion=self.distortion,
                              beamVec=self.bVec,
                              etaVec=self.eVec)
     return xy
def make_reflection_patches(
    instr_cfg,
    tth_eta,
    ang_pixel_size,
    omega=None,
    tth_tol=0.2,
    eta_tol=1.0,
    rMat_c=np.eye(3),
    tVec_c=np.c_[0.0, 0.0, 0.0].T,
    distortion=distortion,
    npdiv=1,
    quiet=False,
    compute_areas_func=compute_areas,
):
    """
    prototype function for making angular patches on a detector

    panel_dims are [(xmin, ymin), (xmax, ymax)] in mm

    pixel_pitch is [row_size, column_size] in mm

    DISTORTION HANDING IS STILL A KLUDGE

    patches are:

                 delta tth
   d  ------------- ... -------------
   e  | x | x | x | ... | x | x | x |
   l  ------------- ... -------------
   t                 .
   a                 .
                     .
   e  ------------- ... -------------
   t  | x | x | x | ... | x | x | x |
   a  ------------- ... -------------

    """
    npts = len(tth_eta)

    # detector frame
    rMat_d = xfcapi.makeDetectorRotMat(instr_cfg["detector"]["transform"]["tilt_angles"])
    tVec_d = np.r_[instr_cfg["detector"]["transform"]["t_vec_d"]]
    pixel_size = instr_cfg["detector"]["pixels"]["size"]

    frame_nrows = instr_cfg["detector"]["pixels"]["rows"]
    frame_ncols = instr_cfg["detector"]["pixels"]["columns"]

    panel_dims = (
        -0.5 * np.r_[frame_ncols * pixel_size[1], frame_nrows * pixel_size[0]],
        0.5 * np.r_[frame_ncols * pixel_size[1], frame_nrows * pixel_size[0]],
    )
    row_edges = np.arange(frame_nrows + 1)[::-1] * pixel_size[1] + panel_dims[0][1]
    col_edges = np.arange(frame_ncols + 1) * pixel_size[0] + panel_dims[0][0]

    # sample frame
    chi = instr_cfg["oscillation_stage"]["chi"]
    tVec_s = np.r_[instr_cfg["oscillation_stage"]["t_vec_s"]]

    # data to loop
    # ...WOULD IT BE CHEAPER TO CARRY ZEROS OR USE CONDITIONAL?
    if omega is None:
        full_angs = np.hstack([tth_eta, np.zeros((npts, 1))])
    else:
        full_angs = np.hstack([tth_eta, omega.reshape(npts, 1)])
    patches = []
    for angs, pix in zip(full_angs, ang_pixel_size):
        # need to get angular pixel size
        rMat_s = xfcapi.makeOscillRotMat([chi, angs[2]])

        ndiv_tth = npdiv * np.ceil(tth_tol / np.degrees(pix[0]))
        ndiv_eta = npdiv * np.ceil(eta_tol / np.degrees(pix[1]))

        tth_del = np.arange(0, ndiv_tth + 1) * tth_tol / float(ndiv_tth) - 0.5 * tth_tol
        eta_del = np.arange(0, ndiv_eta + 1) * eta_tol / float(ndiv_eta) - 0.5 * eta_tol

        # store dimensions for convenience
        #   * etas and tths are bin vertices, ome is already centers
        sdims = [len(eta_del) - 1, len(tth_del) - 1]

        # meshgrid args are (cols, rows), a.k.a (fast, slow)
        m_tth, m_eta = np.meshgrid(tth_del, eta_del)
        npts_patch = m_tth.size

        # calculate the patch XY coords from the (tth, eta) angles
        # * will CHEAT and ignore the small perturbation the different
        #   omega angle values causes and simply use the central value
        gVec_angs_vtx = np.tile(angs, (npts_patch, 1)) + np.radians(
            np.vstack([m_tth.flatten(), m_eta.flatten(), np.zeros(npts_patch)]).T
        )

        # will need this later
        rMat_s = xfcapi.makeOscillRotMat([chi, angs[2]])

        # FOR ANGULAR MESH
        conn = gutil.cellConnectivity(sdims[0], sdims[1], origin="ll")
        gVec_c = xf.anglesToGVec(gVec_angs_vtx, xf.bVec_ref, xf.eta_ref, rMat_s=rMat_s, rMat_c=rMat_c)

        xy_eval_vtx = xfcapi.gvecToDetectorXY(gVec_c.T, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c)
        if distortion is not None and len(distortion) == 2:
            xy_eval_vtx = distortion[0](xy_eval_vtx, distortion[1], invert=True)
            pass

        areas = compute_areas_func(xy_eval_vtx, conn)

        # EVALUATION POINTS
        #   * for lack of a better option will use centroids
        tth_eta_cen = gutil.cellCentroids(np.atleast_2d(gVec_angs_vtx[:, :2]), conn)
        gVec_angs = np.hstack([tth_eta_cen, np.tile(angs[2], (len(tth_eta_cen), 1))])
        gVec_c = xf.anglesToGVec(gVec_angs, xf.bVec_ref, xf.eta_ref, rMat_s=rMat_s, rMat_c=rMat_c)

        xy_eval = xfcapi.gvecToDetectorXY(gVec_c.T, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c)
        if distortion is not None and len(distortion) == 2:
            xy_eval = distortion[0](xy_eval, distortion[1], invert=True)
            pass
        row_indices = gutil.cellIndices(row_edges, xy_eval[:, 1])
        col_indices = gutil.cellIndices(col_edges, xy_eval[:, 0])

        patches.append(
            (
                (gVec_angs_vtx[:, 0].reshape(m_tth.shape), gVec_angs_vtx[:, 1].reshape(m_tth.shape)),
                (xy_eval_vtx[:, 0].reshape(m_tth.shape), xy_eval_vtx[:, 1].reshape(m_tth.shape)),
                conn,
                areas.reshape(sdims[0], sdims[1]),
                (row_indices.reshape(sdims[0], sdims[1]), col_indices.reshape(sdims[0], sdims[1])),
            )
        )
        pass
    return patches
示例#8
0
文件: fitting.py 项目: mwiebe/hexrd
def objFuncFitGrain(gFit, gFull, gFlag,
                    detectorParams,
                    xyo_det, hkls_idx, bMat, wavelength,
                    bVec, eVec,
                    dFunc, dParams,
                    omePeriod,
                    simOnly=False, returnScalarValue=returnScalarValue):
    """
    gFull[0]  = expMap_c[0]
    gFull[1]  = expMap_c[1]
    gFull[2]  = expMap_c[2]
    gFull[3]  = tVec_c[0]
    gFull[4]  = tVec_c[1]
    gFull[5]  = tVec_c[2]
    gFull[6]  = vInv_MV[0]
    gFull[7]  = vInv_MV[1]
    gFull[8]  = vInv_MV[2]
    gFull[9]  = vInv_MV[3]
    gFull[10] = vInv_MV[4]
    gFull[11] = vInv_MV[5]

    detectorParams[0]  = tiltAngles[0]
    detectorParams[1]  = tiltAngles[1]
    detectorParams[2]  = tiltAngles[2]
    detectorParams[3]  = tVec_d[0]
    detectorParams[4]  = tVec_d[1]
    detectorParams[5]  = tVec_d[2]
    detectorParams[6]  = chi
    detectorParams[7]  = tVec_s[0]
    detectorParams[8]  = tVec_s[1]
    detectorParams[9]  = tVec_s[2]
    """
    npts   = len(xyo_det)

    gFull[gFlag] = gFit

    xy_unwarped = dFunc(xyo_det[:, :2], dParams)

    rMat_d = xfcapi.makeDetectorRotMat(detectorParams[:3])
    tVec_d = detectorParams[3:6].reshape(3, 1)
    chi    = detectorParams[6]
    tVec_s = detectorParams[7:10].reshape(3, 1)

    rMat_c = xfcapi.makeRotMatOfExpMap(gFull[:3])
    tVec_c = gFull[3:6].reshape(3, 1)
    vInv_s = gFull[6:]
    vMat_s = mutil.vecMVToSymm(vInv_s)              # NOTE: Inverse of V from F = V * R

    gVec_c = np.dot(bMat, hkls_idx)                 # gVecs with magnitudes in CRYSTAL frame
    gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c)) # stretched gVecs in SAMPLE frame
    gHat_c = mutil.unitVector(
        np.dot(rMat_c.T, gVec_s)) # unit reciprocal lattice vectors in CRYSTAL frame

    match_omes, calc_omes = matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength,
                                        vInv=vInv_s, beamVec=bVec, etaVec=eVec,
                                        omePeriod=omePeriod)

    calc_xy = np.zeros((npts, 2))
    for i in range(npts):
        rMat_s = xfcapi.makeOscillRotMat([chi, calc_omes[i]])
        calc_xy[i, :] = xfcapi.gvecToDetectorXY(gHat_c[:, i],
                                                rMat_d, rMat_s, rMat_c,
                                                tVec_d, tVec_s, tVec_c,
                                                beamVec=bVec).flatten()
        pass
    if np.any(np.isnan(calc_xy)):
        print "infeasible pFull"

    # return values
    if simOnly:
        retval = np.hstack([calc_xy, calc_omes.reshape(npts, 1)])
    else:
        diff_vecs_xy = calc_xy - xy_unwarped[:, :2]
        diff_ome     = xf.angularDifference( calc_omes, xyo_det[:, 2] )
        retval = mutil.rowNorm(
            np.hstack([diff_vecs_xy,
                       diff_ome.reshape(npts, 1)
                       ]) ).flatten()
        if returnScalarValue:
            retval = sum( retval )
    return retval
示例#9
0
    def pull_spots(self, plane_data, grain_params,
                   imgser_dict,
                   tth_tol=0.25, eta_tol=1., ome_tol=1.,
                   npdiv=2, threshold=10,
                   eta_ranges=[(-np.pi, np.pi), ],
                   ome_period=(-np.pi, np.pi),
                   dirname='results', filename=None, output_format='text',
                   save_spot_list=False,
                   quiet=True, check_only=False,
                   interp='nearest'):
        """
        Exctract reflection info from a rotation series encoded as an
        OmegaImageseries object
        """

        # grain parameters
        rMat_c = makeRotMatOfExpMap(grain_params[:3])
        tVec_c = grain_params[3:6]

        # grab omega ranges from first imageseries
        #
        # WARNING: all imageseries AND all wedges within are assumed to have
        # the same omega values; put in a check that they are all the same???
        oims0 = imgser_dict[imgser_dict.keys()[0]]
        ome_ranges = [np.radians([i['ostart'], i['ostop']])
                      for i in oims0.omegawedges.wedges]

        # delta omega in DEGREES grabbed from first imageseries in the dict
        delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0]

        # make omega grid for frame expansion around reference frame
        # in DEGREES
        ndiv_ome, ome_del = make_tolerance_grid(
            delta_ome, ome_tol, 1, adjust_window=True,
        )

        # generate structuring element for connected component labeling
        if ndiv_ome == 1:
            label_struct = ndimage.generate_binary_structure(2, 2)
        else:
            label_struct = ndimage.generate_binary_structure(3, 3)

        # simulate rotation series
        sim_results = self.simulate_rotation_series(
            plane_data, [grain_params, ],
            eta_ranges=eta_ranges,
            ome_ranges=ome_ranges,
            ome_period=ome_period)

        # patch vertex generator (global for instrument)
        tol_vec = 0.5*np.radians(
            [-tth_tol, -eta_tol,
             -tth_tol,  eta_tol,
             tth_tol,  eta_tol,
             tth_tol, -eta_tol])

        # prepare output if requested
        if filename is not None and output_format.lower() == 'hdf5':
            this_filename = os.path.join(dirname, filename)
            writer = io.GrainDataWriter_h5(
                os.path.join(dirname, filename),
                self.write_config(), grain_params)

        # =====================================================================
        # LOOP OVER PANELS
        # =====================================================================
        iRefl = 0
        compl = []
        output = dict.fromkeys(self.detectors)
        for detector_id in self.detectors:
            # initialize text-based output writer
            if filename is not None and output_format.lower() == 'text':
                output_dir = os.path.join(
                    dirname, detector_id
                    )
                if not os.path.exists(output_dir):
                    os.makedirs(output_dir)
                this_filename = os.path.join(
                    output_dir, filename
                )
                writer = io.PatchDataWriter(this_filename)

            # grab panel
            panel = self.detectors[detector_id]
            instr_cfg = panel.config_dict(self.chi, self.tvec)
            native_area = panel.pixel_area  # pixel ref area

            # pull out the OmegaImageSeries for this panel from input dict
            ome_imgser = imgser_dict[detector_id]

            # extract simulation results
            sim_results_p = sim_results[detector_id]
            hkl_ids = sim_results_p[0][0]
            hkls_p = sim_results_p[1][0]
            ang_centers = sim_results_p[2][0]
            xy_centers = sim_results_p[3][0]
            ang_pixel_size = sim_results_p[4][0]

            # now verify that full patch falls on detector...
            # ???: strictly necessary?
            #
            # patch vertex array from sim
            nangs = len(ang_centers)
            patch_vertices = (
                np.tile(ang_centers[:, :2], (1, 4)) +
                np.tile(tol_vec, (nangs, 1))
            ).reshape(4*nangs, 2)
            ome_dupl = np.tile(
                ang_centers[:, 2], (4, 1)
            ).T.reshape(len(patch_vertices), 1)

            # find vertices that all fall on the panel
            det_xy, _ = xrdutil._project_on_detector_plane(
                np.hstack([patch_vertices, ome_dupl]),
                panel.rmat, rMat_c, self.chi,
                panel.tvec, tVec_c, self.tvec,
                panel.distortion)
            _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True)

            # all vertices must be on...
            patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1)
            patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on]

            # re-filter...
            hkl_ids = hkl_ids[patch_is_on]
            hkls_p = hkls_p[patch_is_on, :]
            ang_centers = ang_centers[patch_is_on, :]
            xy_centers = xy_centers[patch_is_on, :]
            ang_pixel_size = ang_pixel_size[patch_is_on, :]

            # TODO: add polygon testing right here!
            # done <JVB 06/21/16>
            if check_only:
                patch_output = []
                for i_pt, angs in enumerate(ang_centers):
                    # the evaluation omegas;
                    # expand about the central value using tol vector
                    ome_eval = np.degrees(angs[2]) + ome_del

                    # ...vectorize the omega_to_frame function to avoid loop?
                    frame_indices = [
                        ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval
                    ]
                    if -1 in frame_indices:
                        if not quiet:
                            msg = """
                            window for (%d%d%d) falls outside omega range
                            """ % tuple(hkls_p[i_pt, :])
                            print(msg)
                        continue
                    else:
                        these_vertices = patch_xys[i_pt]
                        ijs = panel.cartToPixel(these_vertices)
                        ii, jj = polygon(ijs[:, 0], ijs[:, 1])
                        contains_signal = False
                        for i_frame in frame_indices:
                            contains_signal = contains_signal or np.any(
                                ome_imgser[i_frame][ii, jj] > threshold
                            )
                        compl.append(contains_signal)
                        patch_output.append((ii, jj, frame_indices))
            else:
                # make the tth,eta patches for interpolation
                patches = xrdutil.make_reflection_patches(
                    instr_cfg, ang_centers[:, :2], ang_pixel_size,
                    omega=ang_centers[:, 2],
                    tth_tol=tth_tol, eta_tol=eta_tol,
                    rMat_c=rMat_c, tVec_c=tVec_c,
                    distortion=panel.distortion,
                    npdiv=npdiv, quiet=True,
                    beamVec=self.beam_vector)

                # GRAND LOOP over reflections for this panel
                patch_output = []
                for i_pt, patch in enumerate(patches):

                    # strip relevant objects out of current patch
                    vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch

                    prows, pcols = areas.shape
                    nrm_fac = areas/float(native_area)
                    nrm_fac = nrm_fac / np.min(nrm_fac)

                    # grab hkl info
                    hkl = hkls_p[i_pt, :]
                    hkl_id = hkl_ids[i_pt]

                    # edge arrays
                    tth_edges = vtx_angs[0][0, :]
                    delta_tth = tth_edges[1] - tth_edges[0]
                    eta_edges = vtx_angs[1][:, 0]
                    delta_eta = eta_edges[1] - eta_edges[0]

                    # need to reshape eval pts for interpolation
                    xy_eval = np.vstack([xy_eval[0].flatten(),
                                         xy_eval[1].flatten()]).T

                    # the evaluation omegas;
                    # expand about the central value using tol vector
                    ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del

                    # ???: vectorize the omega_to_frame function to avoid loop?
                    frame_indices = [
                        ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval
                    ]

                    if -1 in frame_indices:
                        if not quiet:
                            msg = """
                            window for (%d%d%d) falls outside omega range
                            """ % tuple(hkl)
                            print(msg)
                        continue
                    else:
                        # initialize spot data parameters
                        # !!! maybe change these to nan to not f**k up writer
                        peak_id = -999
                        sum_int = None
                        max_int = None
                        meas_angs = None
                        meas_xy = None

                        # quick check for intensity
                        contains_signal = False
                        patch_data_raw = []
                        for i_frame in frame_indices:
                            tmp = ome_imgser[i_frame][ijs[0], ijs[1]]
                            contains_signal = contains_signal or np.any(
                                tmp > threshold
                            )
                            patch_data_raw.append(tmp)
                            pass
                        patch_data_raw = np.stack(patch_data_raw, axis=0)
                        compl.append(contains_signal)

                        if contains_signal:
                            # initialize patch data array for intensities
                            if interp.lower() == 'bilinear':
                                patch_data = np.zeros(
                                    (len(frame_indices), prows, pcols))
                                for i, i_frame in enumerate(frame_indices):
                                    patch_data[i] = \
                                        panel.interpolate_bilinear(
                                            xy_eval,
                                            ome_imgser[i_frame],
                                            pad_with_nans=False
                                        ).reshape(prows, pcols)  # * nrm_fac
                            elif interp.lower() == 'nearest':
                                patch_data = patch_data_raw  # * nrm_fac
                            else:
                                msg = "interpolation option " + \
                                    "'%s' not understood"
                                raise(RuntimeError, msg % interp)

                            # now have interpolated patch data...
                            labels, num_peaks = ndimage.label(
                                patch_data > threshold, structure=label_struct
                            )
                            slabels = np.arange(1, num_peaks + 1)

                            if num_peaks > 0:
                                peak_id = iRefl
                                coms = np.array(
                                    ndimage.center_of_mass(
                                        patch_data,
                                        labels=labels,
                                        index=slabels
                                    )
                                )
                                if num_peaks > 1:
                                    center = np.r_[patch_data.shape]*0.5
                                    center_t = np.tile(center, (num_peaks, 1))
                                    com_diff = coms - center_t
                                    closest_peak_idx = np.argmin(
                                        np.sum(com_diff**2, axis=1)
                                    )
                                else:
                                    closest_peak_idx = 0
                                    pass  # end multipeak conditional
                                coms = coms[closest_peak_idx]
                                # meas_omes = \
                                #     ome_edges[0] + (0.5 + coms[0])*delta_ome
                                meas_omes = \
                                    ome_eval[0] + coms[0]*delta_ome
                                meas_angs = np.hstack(
                                    [tth_edges[0] + (0.5 + coms[2])*delta_tth,
                                     eta_edges[0] + (0.5 + coms[1])*delta_eta,
                                     mapAngle(
                                         np.radians(meas_omes), ome_period
                                         )
                                     ]
                                )

                                # intensities
                                #   - summed is 'integrated' over interpolated
                                #     data
                                #   - max is max of raw input data
                                sum_int = np.sum(
                                    patch_data[
                                        labels == slabels[closest_peak_idx]
                                    ]
                                )
                                max_int = np.max(
                                    patch_data_raw[
                                        labels == slabels[closest_peak_idx]
                                    ]
                                )
                                # ???: Should this only use labeled pixels?
                                # Those are segmented from interpolated data,
                                # not raw; likely ok in most cases.

                                # need MEASURED xy coords
                                gvec_c = anglesToGVec(
                                    meas_angs,
                                    chi=self.chi,
                                    rMat_c=rMat_c,
                                    bHat_l=self.beam_vector)
                                rMat_s = makeOscillRotMat(
                                    [self.chi, meas_angs[2]]
                                )
                                meas_xy = gvecToDetectorXY(
                                    gvec_c,
                                    panel.rmat, rMat_s, rMat_c,
                                    panel.tvec, self.tvec, tVec_c,
                                    beamVec=self.beam_vector)
                                if panel.distortion is not None:
                                    # FIXME: distortion handling
                                    meas_xy = panel.distortion[0](
                                        np.atleast_2d(meas_xy),
                                        panel.distortion[1],
                                        invert=True).flatten()
                                    pass
                                # FIXME: why is this suddenly necessary???
                                meas_xy = meas_xy.squeeze()
                                pass  # end num_peaks > 0
                        else:
                            patch_data = patch_data_raw
                            pass  # end contains_signal
                        # write output
                        if filename is not None:
                            if output_format.lower() == 'text':
                                writer.dump_patch(
                                    peak_id, hkl_id, hkl, sum_int, max_int,
                                    ang_centers[i_pt], meas_angs,
                                    xy_centers[i_pt], meas_xy)
                            elif output_format.lower() == 'hdf5':
                                xyc_arr = xy_eval.reshape(
                                    prows, pcols, 2
                                ).transpose(2, 0, 1)
                                writer.dump_patch(
                                    detector_id, iRefl, peak_id, hkl_id, hkl,
                                    tth_edges, eta_edges, np.radians(ome_eval),
                                    xyc_arr, ijs, frame_indices, patch_data,
                                    ang_centers[i_pt], xy_centers[i_pt],
                                    meas_angs, meas_xy)
                            pass  # end conditional on write output
                        pass  # end conditional on check only
                        patch_output.append([
                                peak_id, hkl_id, hkl, sum_int, max_int,
                                ang_centers[i_pt], meas_angs, meas_xy,
                                ])
                        iRefl += 1
                    pass  # end patch conditional
                pass  # end patch loop
            output[detector_id] = patch_output
            if filename is not None and output_format.lower() == 'text':
                writer.close()
            pass  # end detector loop
        if filename is not None and output_format.lower() == 'hdf5':
            writer.close()
        return compl, output
示例#10
0
def objFuncSX(pFit, pFull, pFlag, dFunc, dFlag,
              xyo_det, hkls_idx, bMat, vInv,
              bVec, eVec, omePeriod,
              simOnly=False, return_value_flag=return_value_flag):
    """
    """
    npts = len(xyo_det)

    refineFlag = np.array(np.hstack([pFlag, dFlag]), dtype=bool)
    print refineFlag

    # pFull[refineFlag] = pFit/scl[refineFlag]
    pFull[refineFlag] = pFit

    if dFunc is not None:
        dParams = pFull[-len(dFlag):]
        xys = dFunc(xyo_det[:, :2], dParams)
    else:
        xys = xyo_det[:, :2]

    # detector quantities
    wavelength = pFull[0]

    rMat_d = xf.makeDetectorRotMat(pFull[1:4])
    tVec_d = pFull[4:7].reshape(3, 1)

    # sample quantities
    chi = pFull[7]
    tVec_s = pFull[8:11].reshape(3, 1)

    # crystal quantities
    rMat_c = xf.makeRotMatOfExpMap(pFull[11:14])
    tVec_c = pFull[14:17].reshape(3, 1)

    # stretch tensor comp matrix from MV notation in SAMPLE frame
    vMat_s = mutil.vecMVToSymm(vInv)

    # g-vectors:
    #   1. calculate full g-vector components in CRYSTAL frame from B
    #   2. rotate into SAMPLE frame and apply stretch
    #   3. rotate back into CRYSTAL frame and normalize to unit magnitude
    # IDEA: make a function for this sequence of operations with option for
    # choosing ouput frame (i.e. CRYSTAL vs SAMPLE vs LAB)
    gVec_c = np.dot(bMat, hkls_idx)
    gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c))
    gHat_c = mutil.unitVector(np.dot(rMat_c.T, gVec_s))

    match_omes, calc_omes = matchOmegas(
            xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength,
            vInv=vInv, beamVec=bVec, etaVec=eVec, omePeriod=omePeriod)

    calc_xy = np.zeros((npts, 2))
    for i in range(npts):
        rMat_s = xfcapi.makeOscillRotMat([chi, calc_omes[i]])
        calc_xy[i, :] = xfcapi.gvecToDetectorXY(gHat_c[:, i],
                                                rMat_d, rMat_s, rMat_c,
                                                tVec_d, tVec_s, tVec_c,
                                                beamVec=bVec).flatten()
        pass
    if np.any(np.isnan(calc_xy)):
        raise RuntimeError(
            "infeasible pFull: may want to scale" +
            "back finite difference step size")

    # return values
    if simOnly:
        # return simulated values
        retval = np.hstack([calc_xy, calc_omes.reshape(npts, 1)])
    else:
        # return residual vector
        # IDEA: try angles instead of xys?
        diff_vecs_xy = calc_xy - xys[:, :2]
        diff_ome = xf.angularDifference(calc_omes, xyo_det[:, 2])
        retval = np.hstack([diff_vecs_xy,
                            diff_ome.reshape(npts, 1)
                            ]).flatten()
        if return_value_flag == 1:
            # return scalar sum of squared residuals
            retval = sum(abs(retval))
        elif return_value_flag == 2:
            # return DOF-normalized chisq
            # TODO: check this calculation
            denom = npts - len(pFit) - 1.
            if denom != 0:
                nu_fac = 1. / denom
            else:
                nu_fac = 1.
            nu_fac = 1 / (npts - len(pFit) - 1.)
            retval = nu_fac * sum(retval**2)
    return retval
示例#11
0
tth_vec = tth_size * (np.arange(ntth) - 0.5 * ntth - 1) + tth0
eta_vec = eta_size * (np.arange(neta) - 0.5 * neta - 1) + eta0

angpts = np.meshgrid(eta_vec, tth_vec, indexing='ij')
gpts = xfc.anglesToGVec(np.vstack([
    np.radians(angpts[1].flatten()),
    np.radians(angpts[0].flatten()),
    np.zeros(neta * ntth)
]).T,
                        bHat_l=d.bvec)

xypts = xfc.gvecToDetectorXY(gpts,
                             d.rmat,
                             np.eye(3),
                             np.eye(3),
                             d.tvec,
                             np.zeros(3),
                             np.zeros(3),
                             beamVec=d.bvec)

img2 = d.interpolate_bilinear(xypts, average_frame).reshape(neta, ntth)
img3 = copy.deepcopy(img2)
borders = np.isnan(img2)
img2[borders] = 0.
img3[borders] = 0.
img3 += np.min(img3) + 1
img3 = np.log(img3)
img3[borders] = np.nan

extent = (np.min(angpts[1]), np.max(angpts[1]), np.min(angpts[0]),
          np.max(angpts[0]))
示例#12
0
except (KeyError):
    distortion = None

# for defining patches
delta_eta = 0.5
neta = int(360 / float(delta_eta))

eta = np.radians(delta_eta * np.linspace(0, neta - 1, num=neta))

angs = [
    np.vstack([i * np.ones(neta), eta, np.zeros(neta)]) for i in pd.getTTh()
]

# need xy coords and pixel sizes
gVec_ring_l = xf.anglesToGVec(angs[0].T, xf.bVec_ref, xf.eta_ref)
xydet_ring = xfcapi.gvecToDetectorXY(gVec_ring_l.T, rMat_d, rMat_s, rMat_c,
                                     tVec_d, tVec_s, tVec_c)

if distortion is not None:
    det_xy = distortion[0](xydet_ring, distortion[1], invert=True)
ang_ps = angularPixelSize(det_xy,
                          pixel_pitch,
                          rMat_d,
                          rMat_s,
                          tVec_d,
                          tVec_s,
                          tVec_c,
                          distortion=distortion)


def compute_areas(xy_eval_vtx, conn):
    areas = np.zeros(len(conn))
示例#13
0
    def simulate_laue_pattern(self,
                              crystal_data,
                              minEnergy=5.,
                              maxEnergy=35.,
                              rmat_s=None,
                              tvec_s=None,
                              grain_params=None,
                              beam_vec=None):
        """
        """
        if isinstance(crystal_data, PlaneData):

            plane_data = crystal_data

            # grab the expanded list of hkls from plane_data
            hkls = np.hstack(plane_data.getSymHKLs())

            # and the unit plane normals (G-vectors) in CRYSTAL FRAME
            gvec_c = np.dot(plane_data.latVecOps['B'], hkls)
        elif len(crystal_data) == 2:
            # !!! should clean this up
            hkls = np.array(crystal_data[0])
            bmat = crystal_data[1]
            gvec_c = np.dot(bmat, hkls)
        else:
            raise (RuntimeError, 'argument list not understood')
        nhkls_tot = hkls.shape[1]

        # parse energy ranges
        # TODO: allow for spectrum parsing
        multipleEnergyRanges = False
        if hasattr(maxEnergy, '__len__'):
            assert len(maxEnergy) == len(minEnergy), \
                'energy cutoff ranges must have the same length'
            multipleEnergyRanges = True
            lmin = []
            lmax = []
            for i in range(len(maxEnergy)):
                lmin.append(ct.keVToAngstrom(maxEnergy[i]))
                lmax.append(ct.keVToAngstrom(minEnergy[i]))
        else:
            lmin = ct.keVToAngstrom(maxEnergy)
            lmax = ct.keVToAngstrom(minEnergy)

        # parse grain parameters kwarg
        if grain_params is None:
            grain_params = np.atleast_2d(
                np.hstack([np.zeros(6), ct.identity_6x1]))
        n_grains = len(grain_params)

        # sample rotation
        if rmat_s is None:
            rmat_s = ct.identity_3x3

        # dummy translation vector... make input
        if tvec_s is None:
            tvec_s = ct.zeros_3

        # beam vector
        if beam_vec is None:
            beam_vec = ct.beam_vec

        # =========================================================================
        # LOOP OVER GRAINS
        # =========================================================================

        # pre-allocate output arrays
        xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2))
        hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot))
        angles = np.nan * np.ones((n_grains, nhkls_tot, 2))
        dspacing = np.nan * np.ones((n_grains, nhkls_tot))
        energy = np.nan * np.ones((n_grains, nhkls_tot))
        for iG, gp in enumerate(grain_params):
            rmat_c = makeRotMatOfExpMap(gp[:3])
            tvec_c = gp[3:6].reshape(3, 1)
            vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1))

            # stretch them: V^(-1) * R * Gc
            gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c))
            ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str))

            # project
            dpts = gvecToDetectorXY(ghat_c_str.T,
                                    self.rmat,
                                    rmat_s,
                                    rmat_c,
                                    self.tvec,
                                    tvec_s,
                                    tvec_c,
                                    beamVec=beam_vec)

            # check intersections with detector plane
            canIntersect = ~np.isnan(dpts[:, 0])
            npts_in = sum(canIntersect)

            if np.any(canIntersect):
                dpts = dpts[canIntersect, :].reshape(npts_in, 2)
                dhkl = hkls[:, canIntersect].reshape(3, npts_in)

                # back to angles
                tth_eta, gvec_l = detectorXYToGvec(dpts,
                                                   self.rmat,
                                                   rmat_s,
                                                   self.tvec,
                                                   tvec_s,
                                                   tvec_c,
                                                   beamVec=beam_vec)
                tth_eta = np.vstack(tth_eta).T

                # warp measured points
                if self.distortion is not None:
                    if len(self.distortion) == 2:
                        dpts = self.distortion[0](dpts,
                                                  self.distortion[1],
                                                  invert=True)
                    else:
                        raise (RuntimeError,
                               "something is wrong with the distortion")

                # plane spacings and energies
                dsp = 1. / rowNorm(gvec_s_str[:, canIntersect].T)
                wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0])

                # clip to detector panel
                _, on_panel = self.clip_to_panel(dpts, buffer_edges=True)

                if multipleEnergyRanges:
                    validEnergy = np.zeros(len(wlen), dtype=bool)
                    for i in range(len(lmin)):
                        in_energy_range = np.logical_and(
                            wlen >= lmin[i], wlen <= lmax[i])
                        validEnergy = validEnergy | in_energy_range
                        pass
                else:
                    validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax)
                    pass

                # index for valid reflections
                keepers = np.where(np.logical_and(on_panel, validEnergy))[0]

                # assign output arrays
                xy_det[iG][keepers, :] = dpts[keepers, :]
                hkls_in[iG][:, keepers] = dhkl[:, keepers]
                angles[iG][keepers, :] = tth_eta[keepers, :]
                dspacing[iG, keepers] = dsp[keepers]
                energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers])
                pass  # close conditional on valids
            pass  # close loop on grains
        return xy_det, hkls_in, angles, dspacing, energy
    dparams = instr_cfg["detector"]["distortion"]["parameters"]
    distortion = (xf.dFunc_ref, dparams)
except (KeyError):
    distortion = None

# for defining patches
delta_eta = 0.5
neta = int(360 / float(delta_eta))

eta = np.radians(delta_eta * np.linspace(0, neta - 1, num=neta))

angs = [np.vstack([i * np.ones(neta), eta, np.zeros(neta)]) for i in pd.getTTh()]

# need xy coords and pixel sizes
gVec_ring_l = xf.anglesToGVec(angs[0].T, xf.bVec_ref, xf.eta_ref)
xydet_ring = xfcapi.gvecToDetectorXY(gVec_ring_l.T, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c)

if distortion is not None:
    det_xy = distortion[0](xydet_ring, distortion[1], invert=True)
ang_ps = angularPixelSize(det_xy, pixel_pitch, rMat_d, rMat_s, tVec_d, tVec_s, tVec_c, distortion=distortion)


def compute_areas(xy_eval_vtx, conn):
    areas = np.zeros(len(conn))
    for i in range(len(conn)):
        polygon = [[xy_eval_vtx[conn[i, j], 0], xy_eval_vtx[conn[i, j], 1]] for j in range(4)]
        areas[i] = gutil.computeArea(polygon)
    return areas


@numba.jit
示例#15
0
    def simulate_laue_pattern(self, crystal_data,
                              minEnergy=5., maxEnergy=35.,
                              rmat_s=None, tvec_s=None,
                              grain_params=None,
                              beam_vec=None):
        """
        """
        if isinstance(crystal_data, PlaneData):

            plane_data = crystal_data

            # grab the expanded list of hkls from plane_data
            hkls = np.hstack(plane_data.getSymHKLs())

            # and the unit plane normals (G-vectors) in CRYSTAL FRAME
            gvec_c = np.dot(plane_data.latVecOps['B'], hkls)
        elif len(crystal_data) == 2:
            # !!! should clean this up
            hkls = np.array(crystal_data[0])
            bmat = crystal_data[1]
            gvec_c = np.dot(bmat, hkls)
        else:
            raise(RuntimeError, 'argument list not understood')
        nhkls_tot = hkls.shape[1]

        # parse energy ranges
        # TODO: allow for spectrum parsing
        multipleEnergyRanges = False
        if hasattr(maxEnergy, '__len__'):
            assert len(maxEnergy) == len(minEnergy), \
                'energy cutoff ranges must have the same length'
            multipleEnergyRanges = True
            lmin = []
            lmax = []
            for i in range(len(maxEnergy)):
                lmin.append(ct.keVToAngstrom(maxEnergy[i]))
                lmax.append(ct.keVToAngstrom(minEnergy[i]))
        else:
            lmin = ct.keVToAngstrom(maxEnergy)
            lmax = ct.keVToAngstrom(minEnergy)

        # parse grain parameters kwarg
        if grain_params is None:
            grain_params = np.atleast_2d(
                np.hstack([np.zeros(6), ct.identity_6x1])
            )
        n_grains = len(grain_params)

        # sample rotation
        if rmat_s is None:
            rmat_s = ct.identity_3x3

        # dummy translation vector... make input
        if tvec_s is None:
            tvec_s = ct.zeros_3

        # beam vector
        if beam_vec is None:
            beam_vec = ct.beam_vec

        # =========================================================================
        # LOOP OVER GRAINS
        # =========================================================================

        # pre-allocate output arrays
        xy_det = np.nan*np.ones((n_grains, nhkls_tot, 2))
        hkls_in = np.nan*np.ones((n_grains, 3, nhkls_tot))
        angles = np.nan*np.ones((n_grains, nhkls_tot, 2))
        dspacing = np.nan*np.ones((n_grains, nhkls_tot))
        energy = np.nan*np.ones((n_grains, nhkls_tot))
        for iG, gp in enumerate(grain_params):
            rmat_c = makeRotMatOfExpMap(gp[:3])
            tvec_c = gp[3:6].reshape(3, 1)
            vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1))

            # stretch them: V^(-1) * R * Gc
            gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c))
            ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str))

            # project
            dpts = gvecToDetectorXY(ghat_c_str.T,
                                    self.rmat, rmat_s, rmat_c,
                                    self.tvec, tvec_s, tvec_c,
                                    beamVec=beam_vec)

            # check intersections with detector plane
            canIntersect = ~np.isnan(dpts[:, 0])
            npts_in = sum(canIntersect)

            if np.any(canIntersect):
                dpts = dpts[canIntersect, :].reshape(npts_in, 2)
                dhkl = hkls[:, canIntersect].reshape(3, npts_in)

                # back to angles
                tth_eta, gvec_l = detectorXYToGvec(
                    dpts,
                    self.rmat, rmat_s,
                    self.tvec, tvec_s, tvec_c,
                    beamVec=beam_vec)
                tth_eta = np.vstack(tth_eta).T

                # warp measured points
                if self.distortion is not None:
                    if len(self.distortion) == 2:
                        dpts = self.distortion[0](
                            dpts, self.distortion[1],
                            invert=True)
                    else:
                        raise(RuntimeError,
                              "something is wrong with the distortion")

                # plane spacings and energies
                dsp = 1. / rowNorm(gvec_s_str[:, canIntersect].T)
                wlen = 2*dsp*np.sin(0.5*tth_eta[:, 0])

                # clip to detector panel
                _, on_panel = self.clip_to_panel(dpts, buffer_edges=True)

                if multipleEnergyRanges:
                    validEnergy = np.zeros(len(wlen), dtype=bool)
                    for i in range(len(lmin)):
                        in_energy_range = np.logical_and(
                                wlen >= lmin[i],
                                wlen <= lmax[i])
                        validEnergy = validEnergy | in_energy_range
                        pass
                else:
                    validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax)
                    pass

                # index for valid reflections
                keepers = np.where(np.logical_and(on_panel, validEnergy))[0]

                # assign output arrays
                xy_det[iG][keepers, :] = dpts[keepers, :]
                hkls_in[iG][:, keepers] = dhkl[:, keepers]
                angles[iG][keepers, :] = tth_eta[keepers, :]
                dspacing[iG, keepers] = dsp[keepers]
                energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers])
                pass    # close conditional on valids
            pass    # close loop on grains
        return xy_det, hkls_in, angles, dspacing, energy
示例#16
0
    def make_powder_rings(
            self, pd, merge_hkls=False, delta_tth=None,
            delta_eta=10., eta_period=None,
            rmat_s=ct.identity_3x3,  tvec_s=ct.zeros_3,
            tvec_c=ct.zeros_3, full_output=False):
        """
        """
        # in case you want to give it tth angles directly
        if hasattr(pd, '__len__'):
            tth = np.array(pd).flatten()
            if delta_tth is None:
                raise RuntimeError(
                    "If supplying a 2theta list as first arg, "
                    + "must supply a delta_tth")
            sector_vertices = np.tile(
                0.5*np.radians([-delta_tth, -delta_eta,
                                -delta_tth, delta_eta,
                                delta_tth, delta_eta,
                                delta_tth, -delta_eta,
                                0.0, 0.0]), (len(tth), 1)
                )
        else:
            # Okay, we have a PlaneData object
            pd = PlaneData.makeNew(pd)    # make a copy to munge
            if delta_tth is not None:
                pd.tThWidth = np.radians(delta_tth)
            else:
                delta_tth = np.degrees(pd.tThWidth)

            # conversions, meh...
            del_eta = np.radians(delta_eta)

            # do merging if asked
            if merge_hkls:
                _, tth_ranges = pd.getMergedRanges()
                tth = np.array([0.5*sum(i) for i in tth_ranges])
            else:
                tth_ranges = pd.getTThRanges()
                tth = pd.getTTh()
            tth_pm = tth_ranges - np.tile(tth, (2, 1)).T
            sector_vertices = np.vstack(
                [[i[0], -del_eta,
                  i[0], del_eta,
                  i[1], del_eta,
                  i[1], -del_eta,
                  0.0, 0.0]
                 for i in tth_pm])

        # for generating rings
        if eta_period is None:
            eta_period = (-np.pi, np.pi)

        neta = int(360./float(delta_eta))
        eta = mapAngle(
            np.radians(delta_eta*(np.linspace(0, neta - 1, num=neta) + 0.5)) +
            eta_period[0], eta_period
        )

        angs = [np.vstack([i*np.ones(neta), eta, np.zeros(neta)]) for i in tth]

        # need xy coords and pixel sizes
        valid_ang = []
        valid_xy = []
        map_indices = []
        npp = 5  # [ll, ul, ur, lr, center]
        for i_ring in range(len(angs)):
            # expand angles to patch vertices
            these_angs = angs[i_ring].T
            patch_vertices = (
                np.tile(these_angs[:, :2], (1, npp))
                + np.tile(sector_vertices[i_ring], (neta, 1))
            ).reshape(npp*neta, 2)

            # duplicate ome array
            ome_dupl = np.tile(
                these_angs[:, 2], (npp, 1)
            ).T.reshape(npp*neta, 1)

            # find vertices that all fall on the panel
            gVec_ring_l = anglesToGVec(
                np.hstack([patch_vertices, ome_dupl]),
                bHat_l=self.bvec)
            all_xy = gvecToDetectorXY(
                gVec_ring_l,
                self.rmat, rmat_s, ct.identity_3x3,
                self.tvec, tvec_s, tvec_c,
                beamVec=self.bvec)
            _, on_panel = self.clip_to_panel(all_xy)

            # all vertices must be on...
            patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1)
            patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on]

            idx = np.where(patch_is_on)[0]

            valid_ang.append(these_angs[patch_is_on, :2])
            valid_xy.append(patch_xys[:, -1, :].squeeze())
            map_indices.append(idx)
            pass
        # ??? is this option necessary?
        if full_output:
            return valid_ang, valid_xy, map_indices, eta
        else:
            return valid_ang, valid_xy
示例#17
0
def objFuncFitGrain(gFit,
                    gFull,
                    gFlag,
                    detectorParams,
                    xyo_det,
                    hkls_idx,
                    bMat,
                    wavelength,
                    bVec,
                    eVec,
                    dFunc,
                    dParams,
                    omePeriod,
                    simOnly=False,
                    returnScalarValue=returnScalarValue):
    """
    gFull[0]  = expMap_c[0]
    gFull[1]  = expMap_c[1]
    gFull[2]  = expMap_c[2]
    gFull[3]  = tVec_c[0]
    gFull[4]  = tVec_c[1]
    gFull[5]  = tVec_c[2]
    gFull[6]  = vInv_MV[0]
    gFull[7]  = vInv_MV[1]
    gFull[8]  = vInv_MV[2]
    gFull[9]  = vInv_MV[3]
    gFull[10] = vInv_MV[4]
    gFull[11] = vInv_MV[5]

    detectorParams[0]  = tiltAngles[0]
    detectorParams[1]  = tiltAngles[1]
    detectorParams[2]  = tiltAngles[2]
    detectorParams[3]  = tVec_d[0]
    detectorParams[4]  = tVec_d[1]
    detectorParams[5]  = tVec_d[2]
    detectorParams[6]  = chi
    detectorParams[7]  = tVec_s[0]
    detectorParams[8]  = tVec_s[1]
    detectorParams[9]  = tVec_s[2]
    """
    npts = len(xyo_det)

    gFull[gFlag] = gFit

    xy_unwarped = dFunc(xyo_det[:, :2], dParams)

    rMat_d = xfcapi.makeDetectorRotMat(detectorParams[:3])
    tVec_d = detectorParams[3:6].reshape(3, 1)
    chi = detectorParams[6]
    tVec_s = detectorParams[7:10].reshape(3, 1)

    rMat_c = xfcapi.makeRotMatOfExpMap(gFull[:3])
    tVec_c = gFull[3:6].reshape(3, 1)
    vInv_s = gFull[6:]
    vMat_s = mutil.vecMVToSymm(vInv_s)  # NOTE: Inverse of V from F = V * R

    gVec_c = np.dot(bMat, hkls_idx)  # gVecs with magnitudes in CRYSTAL frame
    gVec_s = np.dot(vMat_s, np.dot(rMat_c,
                                   gVec_c))  # stretched gVecs in SAMPLE frame
    gHat_c = mutil.unitVector(np.dot(
        rMat_c.T, gVec_s))  # unit reciprocal lattice vectors in CRYSTAL frame

    match_omes, calc_omes = matchOmegas(xyo_det,
                                        hkls_idx,
                                        chi,
                                        rMat_c,
                                        bMat,
                                        wavelength,
                                        vInv=vInv_s,
                                        beamVec=bVec,
                                        etaVec=eVec,
                                        omePeriod=omePeriod)

    calc_xy = np.zeros((npts, 2))
    for i in range(npts):
        rMat_s = xfcapi.makeOscillRotMat([chi, calc_omes[i]])
        calc_xy[i, :] = xfcapi.gvecToDetectorXY(gHat_c[:, i],
                                                rMat_d,
                                                rMat_s,
                                                rMat_c,
                                                tVec_d,
                                                tVec_s,
                                                tVec_c,
                                                beamVec=bVec).flatten()
        pass
    if np.any(np.isnan(calc_xy)):
        print "infeasible pFull"

    # return values
    if simOnly:
        retval = np.hstack([calc_xy, calc_omes.reshape(npts, 1)])
    else:
        diff_vecs_xy = calc_xy - xy_unwarped[:, :2]
        diff_ome = xf.angularDifference(calc_omes, xyo_det[:, 2])
        retval = np.hstack([diff_vecs_xy, diff_ome.reshape(npts, 1)]).flatten()
        if returnScalarValue:
            retval = sum(retval)
    return retval
示例#18
0
    def pull_spots(self,
                   plane_data,
                   grain_params,
                   imgser_dict,
                   tth_tol=0.25,
                   eta_tol=1.,
                   ome_tol=1.,
                   npdiv=2,
                   threshold=10,
                   eta_ranges=[
                       (-np.pi, np.pi),
                   ],
                   ome_period=(-np.pi, np.pi),
                   dirname='results',
                   filename=None,
                   output_format='text',
                   save_spot_list=False,
                   quiet=True,
                   check_only=False,
                   interp='nearest'):
        """
        Exctract reflection info from a rotation series encoded as an
        OmegaImageseries object
        """

        # grain parameters
        rMat_c = makeRotMatOfExpMap(grain_params[:3])
        tVec_c = grain_params[3:6]

        # grab omega ranges from first imageseries
        #
        # WARNING: all imageseries AND all wedges within are assumed to have
        # the same omega values; put in a check that they are all the same???
        oims0 = imgser_dict[imgser_dict.keys()[0]]
        ome_ranges = [
            np.radians([i['ostart'], i['ostop']])
            for i in oims0.omegawedges.wedges
        ]

        # delta omega in DEGREES grabbed from first imageseries in the dict
        delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0]

        # make omega grid for frame expansion around reference frame
        # in DEGREES
        ndiv_ome, ome_del = make_tolerance_grid(
            delta_ome,
            ome_tol,
            1,
            adjust_window=True,
        )

        # generate structuring element for connected component labeling
        if ndiv_ome == 1:
            label_struct = ndimage.generate_binary_structure(2, 2)
        else:
            label_struct = ndimage.generate_binary_structure(3, 3)

        # simulate rotation series
        sim_results = self.simulate_rotation_series(plane_data, [
            grain_params,
        ],
                                                    eta_ranges=eta_ranges,
                                                    ome_ranges=ome_ranges,
                                                    ome_period=ome_period)

        # patch vertex generator (global for instrument)
        tol_vec = 0.5 * np.radians([
            -tth_tol, -eta_tol, -tth_tol, eta_tol, tth_tol, eta_tol, tth_tol,
            -eta_tol
        ])

        # prepare output if requested
        if filename is not None and output_format.lower() == 'hdf5':
            this_filename = os.path.join(dirname, filename)
            writer = io.GrainDataWriter_h5(os.path.join(dirname, filename),
                                           self.write_config(), grain_params)

        # =====================================================================
        # LOOP OVER PANELS
        # =====================================================================
        iRefl = 0
        compl = []
        output = dict.fromkeys(self.detectors)
        for detector_id in self.detectors:
            # initialize text-based output writer
            if filename is not None and output_format.lower() == 'text':
                output_dir = os.path.join(dirname, detector_id)
                if not os.path.exists(output_dir):
                    os.makedirs(output_dir)
                this_filename = os.path.join(output_dir, filename)
                writer = io.PatchDataWriter(this_filename)

            # grab panel
            panel = self.detectors[detector_id]
            instr_cfg = panel.config_dict(self.chi, self.tvec)
            native_area = panel.pixel_area  # pixel ref area

            # pull out the OmegaImageSeries for this panel from input dict
            ome_imgser = imgser_dict[detector_id]

            # extract simulation results
            sim_results_p = sim_results[detector_id]
            hkl_ids = sim_results_p[0][0]
            hkls_p = sim_results_p[1][0]
            ang_centers = sim_results_p[2][0]
            xy_centers = sim_results_p[3][0]
            ang_pixel_size = sim_results_p[4][0]

            # now verify that full patch falls on detector...
            # ???: strictly necessary?
            #
            # patch vertex array from sim
            nangs = len(ang_centers)
            patch_vertices = (np.tile(ang_centers[:, :2],
                                      (1, 4)) + np.tile(tol_vec,
                                                        (nangs, 1))).reshape(
                                                            4 * nangs, 2)
            ome_dupl = np.tile(ang_centers[:, 2],
                               (4, 1)).T.reshape(len(patch_vertices), 1)

            # find vertices that all fall on the panel
            det_xy, _ = xrdutil._project_on_detector_plane(
                np.hstack([patch_vertices, ome_dupl]), panel.rmat, rMat_c,
                self.chi, panel.tvec, tVec_c, self.tvec, panel.distortion)
            _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True)

            # all vertices must be on...
            patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1)
            patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on]

            # re-filter...
            hkl_ids = hkl_ids[patch_is_on]
            hkls_p = hkls_p[patch_is_on, :]
            ang_centers = ang_centers[patch_is_on, :]
            xy_centers = xy_centers[patch_is_on, :]
            ang_pixel_size = ang_pixel_size[patch_is_on, :]

            # TODO: add polygon testing right here!
            # done <JVB 06/21/16>
            if check_only:
                patch_output = []
                for i_pt, angs in enumerate(ang_centers):
                    # the evaluation omegas;
                    # expand about the central value using tol vector
                    ome_eval = np.degrees(angs[2]) + ome_del

                    # ...vectorize the omega_to_frame function to avoid loop?
                    frame_indices = [
                        ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval
                    ]
                    if -1 in frame_indices:
                        if not quiet:
                            msg = """
                            window for (%d%d%d) falls outside omega range
                            """ % tuple(hkls_p[i_pt, :])
                            print(msg)
                        continue
                    else:
                        these_vertices = patch_xys[i_pt]
                        ijs = panel.cartToPixel(these_vertices)
                        ii, jj = polygon(ijs[:, 0], ijs[:, 1])
                        contains_signal = False
                        for i_frame in frame_indices:
                            contains_signal = contains_signal or np.any(
                                ome_imgser[i_frame][ii, jj] > threshold)
                        compl.append(contains_signal)
                        patch_output.append((ii, jj, frame_indices))
            else:
                # make the tth,eta patches for interpolation
                patches = xrdutil.make_reflection_patches(
                    instr_cfg,
                    ang_centers[:, :2],
                    ang_pixel_size,
                    omega=ang_centers[:, 2],
                    tth_tol=tth_tol,
                    eta_tol=eta_tol,
                    rMat_c=rMat_c,
                    tVec_c=tVec_c,
                    distortion=panel.distortion,
                    npdiv=npdiv,
                    quiet=True,
                    beamVec=self.beam_vector)

                # GRAND LOOP over reflections for this panel
                patch_output = []
                for i_pt, patch in enumerate(patches):

                    # strip relevant objects out of current patch
                    vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch

                    prows, pcols = areas.shape
                    nrm_fac = areas / float(native_area)
                    nrm_fac = nrm_fac / np.min(nrm_fac)

                    # grab hkl info
                    hkl = hkls_p[i_pt, :]
                    hkl_id = hkl_ids[i_pt]

                    # edge arrays
                    tth_edges = vtx_angs[0][0, :]
                    delta_tth = tth_edges[1] - tth_edges[0]
                    eta_edges = vtx_angs[1][:, 0]
                    delta_eta = eta_edges[1] - eta_edges[0]

                    # need to reshape eval pts for interpolation
                    xy_eval = np.vstack(
                        [xy_eval[0].flatten(), xy_eval[1].flatten()]).T

                    # the evaluation omegas;
                    # expand about the central value using tol vector
                    ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del

                    # ???: vectorize the omega_to_frame function to avoid loop?
                    frame_indices = [
                        ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval
                    ]

                    if -1 in frame_indices:
                        if not quiet:
                            msg = """
                            window for (%d%d%d) falls outside omega range
                            """ % tuple(hkl)
                            print(msg)
                        continue
                    else:
                        # initialize spot data parameters
                        # !!! maybe change these to nan to not f**k up writer
                        peak_id = -999
                        sum_int = None
                        max_int = None
                        meas_angs = None
                        meas_xy = None

                        # quick check for intensity
                        contains_signal = False
                        patch_data_raw = []
                        for i_frame in frame_indices:
                            tmp = ome_imgser[i_frame][ijs[0], ijs[1]]
                            contains_signal = contains_signal or np.any(
                                tmp > threshold)
                            patch_data_raw.append(tmp)
                            pass
                        patch_data_raw = np.stack(patch_data_raw, axis=0)
                        compl.append(contains_signal)

                        if contains_signal:
                            # initialize patch data array for intensities
                            if interp.lower() == 'bilinear':
                                patch_data = np.zeros(
                                    (len(frame_indices), prows, pcols))
                                for i, i_frame in enumerate(frame_indices):
                                    patch_data[i] = \
                                        panel.interpolate_bilinear(
                                            xy_eval,
                                            ome_imgser[i_frame],
                                            pad_with_nans=False
                                        ).reshape(prows, pcols)  # * nrm_fac
                            elif interp.lower() == 'nearest':
                                patch_data = patch_data_raw  # * nrm_fac
                            else:
                                msg = "interpolation option " + \
                                    "'%s' not understood"
                                raise (RuntimeError, msg % interp)

                            # now have interpolated patch data...
                            labels, num_peaks = ndimage.label(
                                patch_data > threshold, structure=label_struct)
                            slabels = np.arange(1, num_peaks + 1)

                            if num_peaks > 0:
                                peak_id = iRefl
                                coms = np.array(
                                    ndimage.center_of_mass(patch_data,
                                                           labels=labels,
                                                           index=slabels))
                                if num_peaks > 1:
                                    center = np.r_[patch_data.shape] * 0.5
                                    center_t = np.tile(center, (num_peaks, 1))
                                    com_diff = coms - center_t
                                    closest_peak_idx = np.argmin(
                                        np.sum(com_diff**2, axis=1))
                                else:
                                    closest_peak_idx = 0
                                    pass  # end multipeak conditional
                                coms = coms[closest_peak_idx]
                                # meas_omes = \
                                #     ome_edges[0] + (0.5 + coms[0])*delta_ome
                                meas_omes = \
                                    ome_eval[0] + coms[0]*delta_ome
                                meas_angs = np.hstack([
                                    tth_edges[0] + (0.5 + coms[2]) * delta_tth,
                                    eta_edges[0] + (0.5 + coms[1]) * delta_eta,
                                    mapAngle(np.radians(meas_omes), ome_period)
                                ])

                                # intensities
                                #   - summed is 'integrated' over interpolated
                                #     data
                                #   - max is max of raw input data
                                sum_int = np.sum(patch_data[
                                    labels == slabels[closest_peak_idx]])
                                max_int = np.max(patch_data_raw[
                                    labels == slabels[closest_peak_idx]])
                                # ???: Should this only use labeled pixels?
                                # Those are segmented from interpolated data,
                                # not raw; likely ok in most cases.

                                # need MEASURED xy coords
                                gvec_c = anglesToGVec(meas_angs,
                                                      chi=self.chi,
                                                      rMat_c=rMat_c,
                                                      bHat_l=self.beam_vector)
                                rMat_s = makeOscillRotMat(
                                    [self.chi, meas_angs[2]])
                                meas_xy = gvecToDetectorXY(
                                    gvec_c,
                                    panel.rmat,
                                    rMat_s,
                                    rMat_c,
                                    panel.tvec,
                                    self.tvec,
                                    tVec_c,
                                    beamVec=self.beam_vector)
                                if panel.distortion is not None:
                                    # FIXME: distortion handling
                                    meas_xy = panel.distortion[0](
                                        np.atleast_2d(meas_xy),
                                        panel.distortion[1],
                                        invert=True).flatten()
                                    pass
                                # FIXME: why is this suddenly necessary???
                                meas_xy = meas_xy.squeeze()
                                pass  # end num_peaks > 0
                        else:
                            patch_data = patch_data_raw
                            pass  # end contains_signal
                        # write output
                        if filename is not None:
                            if output_format.lower() == 'text':
                                writer.dump_patch(peak_id, hkl_id, hkl,
                                                  sum_int, max_int,
                                                  ang_centers[i_pt], meas_angs,
                                                  xy_centers[i_pt], meas_xy)
                            elif output_format.lower() == 'hdf5':
                                xyc_arr = xy_eval.reshape(prows, pcols,
                                                          2).transpose(
                                                              2, 0, 1)
                                writer.dump_patch(
                                    detector_id, iRefl, peak_id, hkl_id,
                                    hkl, tth_edges, eta_edges,
                                    np.radians(ome_eval), xyc_arr, ijs,
                                    frame_indices, patch_data,
                                    ang_centers[i_pt], xy_centers[i_pt],
                                    meas_angs, meas_xy)
                            pass  # end conditional on write output
                        pass  # end conditional on check only
                        patch_output.append([
                            peak_id,
                            hkl_id,
                            hkl,
                            sum_int,
                            max_int,
                            ang_centers[i_pt],
                            meas_angs,
                            meas_xy,
                        ])
                        iRefl += 1
                    pass  # end patch conditional
                pass  # end patch loop
            output[detector_id] = patch_output
            if filename is not None and output_format.lower() == 'text':
                writer.close()
            pass  # end detector loop
        if filename is not None and output_format.lower() == 'hdf5':
            writer.close()
        return compl, output
示例#19
0
    def make_powder_rings(self,
                          pd,
                          merge_hkls=False,
                          delta_tth=None,
                          delta_eta=10.,
                          eta_period=None,
                          rmat_s=ct.identity_3x3,
                          tvec_s=ct.zeros_3,
                          tvec_c=ct.zeros_3,
                          full_output=False):
        """
        """
        # in case you want to give it tth angles directly
        if hasattr(pd, '__len__'):
            tth = np.array(pd).flatten()
            if delta_tth is None:
                raise RuntimeError(
                    "If supplying a 2theta list as first arg, " +
                    "must supply a delta_tth")
            sector_vertices = np.tile(
                0.5 * np.radians([
                    -delta_tth, -delta_eta, -delta_tth, delta_eta, delta_tth,
                    delta_eta, delta_tth, -delta_eta, 0.0, 0.0
                ]), (len(tth), 1))
        else:
            # Okay, we have a PlaneData object
            pd = PlaneData.makeNew(pd)  # make a copy to munge
            if delta_tth is not None:
                pd.tThWidth = np.radians(delta_tth)
            else:
                delta_tth = np.degrees(pd.tThWidth)

            # conversions, meh...
            del_eta = np.radians(delta_eta)

            # do merging if asked
            if merge_hkls:
                _, tth_ranges = pd.getMergedRanges()
                tth = np.array([0.5 * sum(i) for i in tth_ranges])
            else:
                tth_ranges = pd.getTThRanges()
                tth = pd.getTTh()
            tth_pm = tth_ranges - np.tile(tth, (2, 1)).T
            sector_vertices = np.vstack([[
                i[0], -del_eta, i[0], del_eta, i[1], del_eta, i[1], -del_eta,
                0.0, 0.0
            ] for i in tth_pm])

        # for generating rings
        if eta_period is None:
            eta_period = (-np.pi, np.pi)

        neta = int(360. / float(delta_eta))
        eta = mapAngle(
            np.radians(delta_eta *
                       (np.linspace(0, neta - 1, num=neta) + 0.5)) +
            eta_period[0], eta_period)

        angs = [
            np.vstack([i * np.ones(neta), eta,
                       np.zeros(neta)]) for i in tth
        ]

        # need xy coords and pixel sizes
        valid_ang = []
        valid_xy = []
        map_indices = []
        npp = 5  # [ll, ul, ur, lr, center]
        for i_ring in range(len(angs)):
            # expand angles to patch vertices
            these_angs = angs[i_ring].T
            patch_vertices = (np.tile(these_angs[:, :2], (1, npp)) +
                              np.tile(sector_vertices[i_ring],
                                      (neta, 1))).reshape(npp * neta, 2)

            # duplicate ome array
            ome_dupl = np.tile(these_angs[:, 2],
                               (npp, 1)).T.reshape(npp * neta, 1)

            # find vertices that all fall on the panel
            gVec_ring_l = anglesToGVec(np.hstack([patch_vertices, ome_dupl]),
                                       bHat_l=self.bvec)
            all_xy = gvecToDetectorXY(gVec_ring_l,
                                      self.rmat,
                                      rmat_s,
                                      ct.identity_3x3,
                                      self.tvec,
                                      tvec_s,
                                      tvec_c,
                                      beamVec=self.bvec)
            _, on_panel = self.clip_to_panel(all_xy)

            # all vertices must be on...
            patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1)
            patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on]

            idx = np.where(patch_is_on)[0]

            valid_ang.append(these_angs[patch_is_on, :2])
            valid_xy.append(patch_xys[:, -1, :].squeeze())
            map_indices.append(idx)
            pass
        # ??? is this option necessary?
        if full_output:
            return valid_ang, valid_xy, map_indices, eta
        else:
            return valid_ang, valid_xy
示例#20
0
def objFuncSX(pFit,
              pFull,
              pFlag,
              dFunc,
              dFlag,
              xyo_det,
              hkls_idx,
              bMat,
              vInv,
              bVec,
              eVec,
              omePeriod,
              simOnly=False,
              return_value_flag=return_value_flag):
    """
    """
    npts = len(xyo_det)

    refineFlag = np.array(np.hstack([pFlag, dFlag]), dtype=bool)
    print refineFlag

    # pFull[refineFlag] = pFit/scl[refineFlag]
    pFull[refineFlag] = pFit

    if dFunc is not None:
        dParams = pFull[-len(dFlag):]
        xys = dFunc(xyo_det[:, :2], dParams)
    else:
        xys = xyo_det[:, :2]

    # detector quantities
    wavelength = pFull[0]

    rMat_d = xf.makeDetectorRotMat(pFull[1:4])
    tVec_d = pFull[4:7].reshape(3, 1)

    # sample quantities
    chi = pFull[7]
    tVec_s = pFull[8:11].reshape(3, 1)

    # crystal quantities
    rMat_c = xf.makeRotMatOfExpMap(pFull[11:14])
    tVec_c = pFull[14:17].reshape(3, 1)

    # stretch tensor comp matrix from MV notation in SAMPLE frame
    vMat_s = mutil.vecMVToSymm(vInv)

    # g-vectors:
    #   1. calculate full g-vector components in CRYSTAL frame from B
    #   2. rotate into SAMPLE frame and apply stretch
    #   3. rotate back into CRYSTAL frame and normalize to unit magnitude
    # IDEA: make a function for this sequence of operations with option for
    # choosing ouput frame (i.e. CRYSTAL vs SAMPLE vs LAB)
    gVec_c = np.dot(bMat, hkls_idx)
    gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c))
    gHat_c = mutil.unitVector(np.dot(rMat_c.T, gVec_s))

    match_omes, calc_omes = matchOmegas(xyo_det,
                                        hkls_idx,
                                        chi,
                                        rMat_c,
                                        bMat,
                                        wavelength,
                                        vInv=vInv,
                                        beamVec=bVec,
                                        etaVec=eVec,
                                        omePeriod=omePeriod)

    calc_xy = np.zeros((npts, 2))
    for i in range(npts):
        rMat_s = xfcapi.makeOscillRotMat([chi, calc_omes[i]])
        calc_xy[i, :] = xfcapi.gvecToDetectorXY(gHat_c[:, i],
                                                rMat_d,
                                                rMat_s,
                                                rMat_c,
                                                tVec_d,
                                                tVec_s,
                                                tVec_c,
                                                beamVec=bVec).flatten()
        pass
    if np.any(np.isnan(calc_xy)):
        raise RuntimeError("infeasible pFull: may want to scale" +
                           "back finite difference step size")

    # return values
    if simOnly:
        # return simulated values
        retval = np.hstack([calc_xy, calc_omes.reshape(npts, 1)])
    else:
        # return residual vector
        # IDEA: try angles instead of xys?
        diff_vecs_xy = calc_xy - xys[:, :2]
        diff_ome = xf.angularDifference(calc_omes, xyo_det[:, 2])
        retval = np.hstack([diff_vecs_xy, diff_ome.reshape(npts, 1)]).flatten()
        if return_value_flag == 1:
            # return scalar sum of squared residuals
            retval = sum(abs(retval))
        elif return_value_flag == 2:
            # return DOF-normalized chisq
            # TODO: check this calculation
            denom = npts - len(pFit) - 1.
            if denom != 0:
                nu_fac = 1. / denom
            else:
                nu_fac = 1.
            nu_fac = 1 / (npts - len(pFit) - 1.)
            retval = nu_fac * sum(retval**2)
    return retval
## print "Maximum disagreement in eta:  %f"%maxDiff_eta
# maxDiff_gVec = np.linalg.norm(np.sqrt(np.sum(np.asarray(gVec_l1.T-gVec_l2)**2,1)),np.inf)
# print "Maximum disagreement in gVec: %f"%maxDiff_gVec



gVec_c1 = np.dot(rMat_c.T,np.dot(rMat_s.T,gVec_l1))
gVec_c2 = np.ascontiguousarray(np.dot(rMat_c.T,np.dot(rMat_s.T,gVec_l2.T)).T)

start3 = timer()                      # time this
xy1 = xf.gvecToDetectorXY(gVec_c1,rMat_d,rMat_s,rMat_c,tVec_d,tVec_s,tVec_c,beamVec=bVec_ref)
elapsed3 = (timer() - start3)
print "Time for Python gvecToDetectorXY: %f"%(elapsed3)

start4 = timer()                      # time this
xy2 = xfcapi.gvecToDetectorXY(gVec_c2,rMat_d,rMat_s,rMat_c,tVec_d,tVec_s,tVec_c,beamVec=bVec_ref)
elapsed4 = (timer() - start4)
print "Time for CAPI gvecToDetectorXY: %f"%(elapsed4)


print 'cudadevice: ', numba.cuda.get_current_device().name


# setup or numba version
# should be able to run in nopython mode
bHat_l = np.zeros(3)
nVec_l = np.zeros(3)
P0_l = np.zeros(3)
P2_l = np.zeros(3)
P2_d = np.zeros(3)
P3_l = np.zeros(3)
neta = int(eta_range/eta_size)

tth_vec = tth_size*(np.arange(ntth) - 0.5*ntth - 1) + tth0
eta_vec = eta_size*(np.arange(neta) - 0.5*neta - 1) + eta0

angpts = np.meshgrid(eta_vec, tth_vec, indexing='ij')
gpts = xfc.anglesToGVec(
    np.vstack([
        np.radians(angpts[1].flatten()), 
        np.radians(angpts[0].flatten()), 
        np.zeros(neta*ntth)
        ]).T, bHat_l=d.bvec)

xypts = xfc.gvecToDetectorXY(
    gpts, 
    d.rmat, np.eye(3), np.eye(3), 
    d.tvec, np.zeros(3), np.zeros(3), 
    beamVec=d.bvec)

img2 = d.interpolate_bilinear(xypts, average_frame).reshape(neta, ntth)
img3 = copy.deepcopy(img2)
borders = np.isnan(img2)
img2[borders] = 0.
img3[borders] = 0.
img3 += np.min(img3) + 1
img3 = np.log(img3)
img3[borders] = np.nan

extent = (
    np.min(angpts[1]), np.max(angpts[1]), 
    np.min(angpts[0]), np.max(angpts[0])
示例#23
0
def make_reflection_patches(instr_cfg,
                            tth_eta,
                            ang_pixel_size,
                            omega=None,
                            tth_tol=0.2,
                            eta_tol=1.0,
                            rMat_c=np.eye(3),
                            tVec_c=np.c_[0., 0., 0.].T,
                            distortion=distortion,
                            npdiv=1,
                            quiet=False,
                            compute_areas_func=compute_areas):
    """
    prototype function for making angular patches on a detector

    panel_dims are [(xmin, ymin), (xmax, ymax)] in mm

    pixel_pitch is [row_size, column_size] in mm

    DISTORTION HANDING IS STILL A KLUDGE

    patches are:

                 delta tth
   d  ------------- ... -------------
   e  | x | x | x | ... | x | x | x |
   l  ------------- ... -------------
   t                 .
   a                 .
                     .
   e  ------------- ... -------------
   t  | x | x | x | ... | x | x | x |
   a  ------------- ... -------------

    """
    npts = len(tth_eta)

    # detector frame
    rMat_d = xfcapi.makeDetectorRotMat(
        instr_cfg['detector']['transform']['tilt_angles'])
    tVec_d = np.r_[instr_cfg['detector']['transform']['t_vec_d']]
    pixel_size = instr_cfg['detector']['pixels']['size']

    frame_nrows = instr_cfg['detector']['pixels']['rows']
    frame_ncols = instr_cfg['detector']['pixels']['columns']

    panel_dims = (
        -0.5 * np.r_[frame_ncols * pixel_size[1], frame_nrows * pixel_size[0]],
        0.5 * np.r_[frame_ncols * pixel_size[1], frame_nrows * pixel_size[0]])
    row_edges = np.arange(frame_nrows +
                          1)[::-1] * pixel_size[1] + panel_dims[0][1]
    col_edges = np.arange(frame_ncols + 1) * pixel_size[0] + panel_dims[0][0]

    # sample frame
    chi = instr_cfg['oscillation_stage']['chi']
    tVec_s = np.r_[instr_cfg['oscillation_stage']['t_vec_s']]

    # data to loop
    # ...WOULD IT BE CHEAPER TO CARRY ZEROS OR USE CONDITIONAL?
    if omega is None:
        full_angs = np.hstack([tth_eta, np.zeros((npts, 1))])
    else:
        full_angs = np.hstack([tth_eta, omega.reshape(npts, 1)])
    patches = []
    for angs, pix in zip(full_angs, ang_pixel_size):
        # need to get angular pixel size
        rMat_s = xfcapi.makeOscillRotMat([chi, angs[2]])

        ndiv_tth = npdiv * np.ceil(tth_tol / np.degrees(pix[0]))
        ndiv_eta = npdiv * np.ceil(eta_tol / np.degrees(pix[1]))

        tth_del = np.arange(
            0, ndiv_tth + 1) * tth_tol / float(ndiv_tth) - 0.5 * tth_tol
        eta_del = np.arange(
            0, ndiv_eta + 1) * eta_tol / float(ndiv_eta) - 0.5 * eta_tol

        # store dimensions for convenience
        #   * etas and tths are bin vertices, ome is already centers
        sdims = [len(eta_del) - 1, len(tth_del) - 1]

        # meshgrid args are (cols, rows), a.k.a (fast, slow)
        m_tth, m_eta = np.meshgrid(tth_del, eta_del)
        npts_patch = m_tth.size

        # calculate the patch XY coords from the (tth, eta) angles
        # * will CHEAT and ignore the small perturbation the different
        #   omega angle values causes and simply use the central value
        gVec_angs_vtx = np.tile(angs, (npts_patch, 1)) \
                        + np.radians(
                            np.vstack([m_tth.flatten(),
                                       m_eta.flatten(),
                                       np.zeros(npts_patch)
                                       ]).T
                                     )

        # will need this later
        rMat_s = xfcapi.makeOscillRotMat([chi, angs[2]])

        # FOR ANGULAR MESH
        conn = gutil.cellConnectivity(sdims[0], sdims[1], origin='ll')
        gVec_c = xf.anglesToGVec(gVec_angs_vtx,
                                 xf.bVec_ref,
                                 xf.eta_ref,
                                 rMat_s=rMat_s,
                                 rMat_c=rMat_c)

        xy_eval_vtx = xfcapi.gvecToDetectorXY(gVec_c.T, rMat_d, rMat_s, rMat_c,
                                              tVec_d, tVec_s, tVec_c)
        if distortion is not None and len(distortion) == 2:
            xy_eval_vtx = distortion[0](xy_eval_vtx,
                                        distortion[1],
                                        invert=True)
            pass

        areas = compute_areas_func(xy_eval_vtx, conn)

        # EVALUATION POINTS
        #   * for lack of a better option will use centroids
        tth_eta_cen = gutil.cellCentroids(np.atleast_2d(gVec_angs_vtx[:, :2]),
                                          conn)
        gVec_angs = np.hstack(
            [tth_eta_cen, np.tile(angs[2], (len(tth_eta_cen), 1))])
        gVec_c = xf.anglesToGVec(gVec_angs,
                                 xf.bVec_ref,
                                 xf.eta_ref,
                                 rMat_s=rMat_s,
                                 rMat_c=rMat_c)

        xy_eval = xfcapi.gvecToDetectorXY(gVec_c.T, rMat_d, rMat_s, rMat_c,
                                          tVec_d, tVec_s, tVec_c)
        if distortion is not None and len(distortion) == 2:
            xy_eval = distortion[0](xy_eval, distortion[1], invert=True)
            pass
        row_indices = gutil.cellIndices(row_edges, xy_eval[:, 1])
        col_indices = gutil.cellIndices(col_edges, xy_eval[:, 0])

        patches.append(
            ((gVec_angs_vtx[:, 0].reshape(m_tth.shape),
              gVec_angs_vtx[:, 1].reshape(m_tth.shape)),
             (xy_eval_vtx[:, 0].reshape(m_tth.shape),
              xy_eval_vtx[:, 1].reshape(m_tth.shape)), conn,
             areas.reshape(sdims[0],
                           sdims[1]), (row_indices.reshape(sdims[0], sdims[1]),
                                       col_indices.reshape(sdims[0],
                                                           sdims[1]))))
        pass
    return patches
eta_d2   = dangs2[0][1]
gVec_l2  = dangs2[1]
elapsed2 = (time.clock() - start2)
print "Time for CAPI detectorXYToGvec: %f"%(elapsed2)

maxDiff_tTh = np.linalg.norm(tTh_d1-tTh_d2,np.inf)
print "Maximum disagreement in tTh:  %f"%maxDiff_tTh
maxDiff_eta = np.linalg.norm(eta_d1-eta_d2,np.inf)
print "Maximum disagreement in eta:  %f"%maxDiff_eta
maxDiff_gVec = np.linalg.norm(np.sqrt(np.sum(np.asarray(gVec_l1.T-gVec_l2)**2,1)),np.inf)
print "Maximum disagreement in gVec: %f"%maxDiff_gVec

gVec_c1 = np.dot(rMat_c.T,np.dot(rMat_s.T,gVec_l1))
gVec_c2 = np.ascontiguousarray(np.dot(rMat_c.T,np.dot(rMat_s.T,gVec_l2.T)).T)

start3 = time.clock()                      # time this
xy1 = xf.gvecToDetectorXY(gVec_c1,rMat_d,rMat_s,rMat_c,tVec_d,tVec_s,tVec_c,beamVec=bVec_ref)
elapsed3 = (time.clock() - start3)
print "Time for Python gvecToDetectorXY: %f"%(elapsed3)

maxDiff_xy = np.linalg.norm(np.sqrt(np.sum(np.asarray(XY-xy1)**2,1)),np.inf)
print "Maximum disagreement in gVec: %f"%maxDiff_xy

start4 = time.clock()                      # time this
xy2 = xfcapi.gvecToDetectorXY(gVec_c2,rMat_d,rMat_s,rMat_c,tVec_d,tVec_s,tVec_c,beamVec=bVec_ref)
elapsed4 = (time.clock() - start4)
print "Time for CAPI gvecToDetectorXY: %f"%(elapsed4)

maxDiff_xy = np.linalg.norm(np.sqrt(np.sum(np.asarray(XY-xy2)**2,1)),np.inf)
print "Maximum disagreement in gVec: %f"%maxDiff_xy