Example #1
0
def discretefiber_reduced(params_in):
    """
    input parameters are [hkl_id, com_ome, com_eta]
    """
    bMat       = paramMP['bMat']
    chi        = paramMP['chi']
    csym       = paramMP['csym']
    fiber_ndiv = paramMP['fiber_ndiv']

    hkl = params_in[:3].reshape(3, 1)

    gVec_s = xfcapi.anglesToGVec(
        np.atleast_2d(params_in[3:]),
        chi=chi,
        ).T

    tmp = mutil.uniqueVectors(
        rot.discreteFiber(
            hkl,
            gVec_s,
            B=bMat,
            ndiv=fiber_ndiv,
            invert=False,
            csym=csym
            )[0]
        )
    return tmp
Example #2
0
def run_paintGrid(pd,
                  omeEta,
                  seed_hkl_ids,
                  threshold,
                  fiber_ndiv,
                  omeTol=None,
                  etaTol=None,
                  omeRange=None,
                  etaRange=None,
                  omePeriod=(-np.pi, np.pi),
                  qTol=1e-7,
                  doMultiProc=True,
                  nCPUs=multiprocessing.cpu_count(),
                  useGrid=None):
    """
    wrapper for indexer.paintGrid
    """
    del_ome = omeEta.omegas[1] - omeEta.omegas[0]
    del_eta = omeEta.etas[1] - omeEta.etas[0]

    # tolerances in degrees...  I know, pathological
    if omeTol is None:
        omeTol = 360. / float(fiber_ndiv)
    if etaTol is None:
        etaTol = 360. / float(fiber_ndiv)

    # must be consistent
    pd_hkl_ids = omeEta.iHKLList[seed_hkl_ids]

    tTh = pd.getTTh()
    bMat = pd.latVecOps['B']
    csym = pd.getLaueGroup()
    qsym = pd.getQSym()

    if useGrid is not None:
        try:
            print "loading quaternion grid file: %s" % (useGrid)
            qfib = np.loadtxt(useGrid).T
        except:
            raise RuntimeError, "unable to load quaternion grid file"
    else:
        structureNDI_label = ndimage.generate_binary_structure(2, 2)
        qfib = []
        ii = 0
        jj = fiber_ndiv
        print "labeling maps..."
        labels = []
        numSpots = []
        coms = []
        for i in seed_hkl_ids:
            labels_t, numSpots_t = ndimage.label(
                omeEta.dataStore[i] > threshold, structureNDI_label)
            coms_t = np.atleast_2d(
                ndimage.center_of_mass(omeEta.dataStore[i],
                                       labels=labels_t,
                                       index=np.arange(1,
                                                       np.amax(labels_t) + 1)))
            labels.append(labels_t)
            numSpots.append(numSpots_t)
            coms.append(coms_t)
            pass

        # second pass for generation
        print "generating quaternions..."
        qfib_tmp = np.empty((4, fiber_ndiv * sum(numSpots)))
        for i in range(len(pd_hkl_ids)):
            for ispot in range(numSpots[i]):
                if not np.isnan(coms[i][ispot][0]):
                    ome_c = omeEta.omeEdges[0] + (0.5 +
                                                  coms[i][ispot][0]) * del_ome
                    eta_c = omeEta.etaEdges[0] + (0.5 +
                                                  coms[i][ispot][1]) * del_eta

                    gVec_s = xrdutil.makeMeasuredScatteringVectors(
                        tTh[pd_hkl_ids[i]], eta_c, ome_c)

                    qfib_tmp[:, ii:jj] = rot.discreteFiber(
                        pd.hkls[:, pd_hkl_ids[i]].reshape(3, 1),
                        gVec_s,
                        B=bMat,
                        ndiv=fiber_ndiv,
                        invert=False,
                        csym=csym)[0]
                    ii = jj
                    jj += fiber_ndiv
                    pass
                pass
            qfib.append(mutil.uniqueVectors(qfib_tmp))
            pass
        qfib = np.hstack(qfib)
    print "Running paintGrid on %d orientations" % (qfib.shape[1])
    complPG = idx.paintGrid(qfib,
                            omeEta,
                            omegaRange=omeRange,
                            etaRange=etaRange,
                            omeTol=d2r * omeTol,
                            etaTol=d2r * etaTol,
                            omePeriod=omePeriod,
                            threshold=threshold,
                            doMultiProc=doMultiProc,
                            nCPUs=nCPUs)
    return complPG, qfib
Example #3
0
def fiberSearch(spotsArray, hklList,
                iPhase=0,
                nsteps=120,
                minCompleteness=0.60,
                minPctClaimed=0.95,
                preserveClaims=False,
                friedelOnly=True,
                dspTol=None,
                etaTol=0.025,
                omeTol=0.025,
                etaTolF=0.00225,
                omeTolF=0.00875,
                nStdDev=2,
                quitAfter=None,
                doRefinement=True,
                debug=True,
                doMultiProc=True,
                nCPUs=None,
                outputGrainList=False
                ):
    """
    This indexer finds grains by performing 1-d searches along the fibers under the
    valid spots associated with each reflection order specified in hklList.  The set
    of spots used to generate the candidate orientations may be restricted to Friedel
    pairs only.

    hklList *must* have length > 0;
    Dach hkl entry in hklList *must* be a tuple, not a list

    the output is a concatenated list of orientation matrices ((n, 3, 3) numpy.ndarray).
    """

    assert hasattr(hklList, '__len__'), "the HKL list must have length, and len(hklList) > 0."

    nHKLs = len(hklList)
    grainList = []
    nGrains = 0
    planeData = spotsArray.getPlaneData(iPhase)
    csym = planeData.getLaueGroup()
    bMat = planeData.latVecOps['B']
    if dspTol is None:
        dspTol = planeData.strainMag

    centroSymRefl = planeData.getCentroSymHKLs()

    candidate = Grain(spotsArray, rMat=None,
                      etaTol=etaTol, omeTol=omeTol)
    multiProcMode = xrdbase.haveMultiProc and doMultiProc
    #
    global foundFlagShared
    global multiProcMode_MP
    global spotsArray_MP
    global candidate_MP
    global dspTol_MP
    global minCompleteness_MP
    global doRefinement_MP
    global nStdDev_MP
    multiProcMode_MP   = multiProcMode
    spotsArray_MP      = spotsArray
    candidate_MP       = candidate
    dspTol_MP          = dspTol
    minCompleteness_MP = minCompleteness
    doRefinement_MP    = doRefinement
    nStdDev_MP         = nStdDev
    """
    set up for shared memory multiprocessing
    """
    if multiProcMode:
        nCPUs = nCPUs or xrdbase.dfltNCPU
        spotsArray.multiprocMode = True
        pool = multiprocessing.Pool(nCPUs)

    """
    HKL ITERATOR
    """
    if isinstance(quitAfter, dict):
        n_hkls_to_search = quitAfter['nHKLs']
    else:
        n_hkls_to_search = nHKLs

    if isinstance(quitAfter, int):
        quit_after_ngrains = quitAfter
    else:
        quit_after_ngrains = 0

    numTotal = len(spotsArray)
    pctClaimed = 0.
    time_to_quit = False
    tic = time.time()

    for iHKL in range(n_hkls_to_search):
        print "\n#####################\nProcessing hkl %d of %d\n" % (iHKL+1, nHKLs)
        thisHKLID = planeData.getHKLID(hklList[iHKL])
        thisRingSpots0   = spotsArray.getHKLSpots(thisHKLID)
        thisRingSpots0W  = num.where(thisRingSpots0)[0]
        unclaimedOfThese = -spotsArray.checkClaims(indices=thisRingSpots0W)
        thisRingSpots    = copy.deepcopy(thisRingSpots0)
        thisRingSpots[thisRingSpots0W] = unclaimedOfThese
        if friedelOnly:
            # first, find Friedel Pairs
            spotsArray.findFriedelPairsHKL(hklList[iHKL],
                                           etaTol=etaTolF,
                                           omeTol=omeTolF)
            spotsIteratorI = spotsArray.getIterHKL(hklList[iHKL], unclaimedOnly=True, friedelOnly=True)
            # make some stuff for counters
            maxSpots = 0.5*(sum(thisRingSpots) - sum(spotsArray.friedelPair[thisRingSpots] == -1))
        else:
            spotsIteratorI = spotsArray.getIterHKL(hklList[iHKL], unclaimedOnly=True, friedelOnly=False)
            maxSpots = sum(thisRingSpots)
        """
        SPOT ITERATOR
          - this is where we iterate over all 'valid' spots for the current HKL as
            subject to the conditions of claims and ID as a friedel pair (when requested)
        """
        for iRefl, stuff in enumerate(spotsIteratorI):
            unclaimedOfThese = -spotsArray.checkClaims(indices=thisRingSpots0W)
            thisRingSpots    = copy.deepcopy(thisRingSpots0)
            thisRingSpots[thisRingSpots0W] = unclaimedOfThese
            if friedelOnly:
                iSpot, jSpot, angs_I, angs_J = stuff

                Gplus  = makeMeasuredScatteringVectors(*angs_I)
                Gminus = makeMeasuredScatteringVectors(*angs_J)

                Gvec = 0.5*(Gplus - Gminus)
                maxSpots = 0.5*(sum(thisRingSpots) - sum(spotsArray.friedelPair[thisRingSpots] == -1))
            else:
                iSpot, angs_I = stuff
                Gvec  = makeMeasuredScatteringVectors(*angs_I)
                maxSpots = sum(thisRingSpots)
            print "\nProcessing reflection %d (spot %d), %d remain unclaimed\n" % (iRefl+1, iSpot, maxSpots)
            if multiProcMode and debugMultiproc > 1:
                marks = spotsArray._Spots__marks[:]
                print 'marks : '+str(marks)
            # make the fiber;
            qfib = discreteFiber(hklList[iHKL], Gvec,
                                 B=bMat,
                                 ndiv=nsteps,
                                 invert=False,
                                 csym=csym, ssym=None)[0]
            # if +/- hkl aren't in the symmetry group, need '-' fiber
            if not centroSymRefl[thisHKLID]:
                minusHKL = -num.r_[hklList[iHKL]]
                qfibM = discreteFiber(minusHKL, Gvec,
                                      B=bMat,
                                      ndiv=nsteps,
                                      invert=False,
                                      csym=csym, ssym=None)[0]
                qfib = num.hstack([qfib, qfibM])
                pass
            # cull out duplicate orientations
            qfib = mUtil.uniqueVectors(qfib, tol=1e-4)
            numTrials = qfib.shape[1]
            """
            THIS IS THE BIGGIE; THE LOOP OVER THE DISCRETE ORIENTATIONS IN THE CURRENT FIBER
            """
            if multiProcMode:
                foundFlagShared.value = False
                qfibList = map(num.array, qfib.T.tolist())
                #if debugMultiproc:
                #    print 'qfibList : '+str(qfibList)
                results = num.array(pool.map(testThisQ, qfibList, chunksize=1))
                trialGrains = results[num.where(num.array(results, dtype=bool))]
                # for trialGrain in trialGrains:
                #     trialGrain.restore(candidate)
            else:
                trialGrains = []
                for iR in range(numTrials):
                    foundGrainData = testThisQ(qfib[:, iR])
                    if foundGrainData is not None:
                        trialGrains.append(foundGrainData)
                        break
            'end of if multiProcMode'

            if len(trialGrains) == 0:
                print "No grain found containing spot %d\n" % (iSpot)
                # import pdb;pdb.set_trace()
            else:
                asMaster = multiProcMode
                'sort based on completeness'
                trialGrainCompletenesses = [tgd['completeness'] for tgd in trialGrains]
                order = num.argsort(trialGrainCompletenesses)[-1::-1]
                for iTrialGrain in order:
                    foundGrainData = trialGrains[iTrialGrain]
                    foundGrain = Grain(spotsArray, grainData=foundGrainData, claimingSpots=False)
                    'check completeness before accepting, especially important for multiproc'
                    foundGrain.checkClaims() # updates completeness
                    if debugMultiproc:
                        print 'final completeness of candidate is %g' % (foundGrain.completeness)
                    if foundGrain.completeness >= minCompleteness:
                        conflicts = foundGrain.claimSpots(asMaster=asMaster)
                        numConfl = num.sum(conflicts)
                        if numConfl > 0:
                            'tried to claim %d spots that are already claimed' % (numConfl)
                        grainList.append(foundGrain)
                        nGrains += 1
                numUnClaimed = num.sum(-spotsArray.checkClaims())
                numClaimed = numTotal - numUnClaimed
                pctClaimed = num.float(numClaimed) / numTotal
                print "Found %d grains so far, %f%% claimed" % (nGrains,100*pctClaimed)

                time_to_quit = (pctClaimed > minPctClaimed) or\
                  ((quit_after_ngrains > 0) and (nGrains >= quit_after_ngrains))
                if time_to_quit:
                    break
        'end of iRefl loop'

        if time_to_quit:
            break

    'end of iHKL loop'
    rMats = num.empty((len(grainList), 3, 3))
    for i in range(len(grainList)):
        rMats[i, :, :] = grainList[i].rMat

    if outputGrainList:
        retval = (rMats, grainList)
    else:
        retval = rMats

    if not preserveClaims:
        spotsArray.resetClaims()
    toc = time.time()
    print 'fiberSearch execution took %g seconds' % (toc-tic)

    if multiProcMode:
        pool.close()
        spotsArray.multiprocMode = False
        foundFlagShared.value = False
    # global foundFlagShared
    # global multiProcMode_MP
    # global spotsArray_MP
    # global candidate_MP
    # global dspTol_MP
    # global minCompleteness_MP
    # global doRefinement_MP
    multiProcMode_MP = None
    spotsArray_MP = None
    candidate_MP = None
    dspTol_MP = None
    minCompleteness_MP = None
    doRefinement_MP = None

    return retval
Example #4
0
def fiberSearch(spotsArray,
                hklList,
                iPhase=0,
                nsteps=120,
                minCompleteness=0.60,
                minPctClaimed=0.95,
                preserveClaims=False,
                friedelOnly=True,
                dspTol=None,
                etaTol=0.025,
                omeTol=0.025,
                etaTolF=0.00225,
                omeTolF=0.00875,
                nStdDev=2,
                quitAfter=None,
                doRefinement=True,
                debug=True,
                doMultiProc=True,
                nCPUs=None,
                outputGrainList=False):
    """
    This indexer finds grains by performing 1-d searches along the fibers under the
    valid spots associated with each reflection order specified in hklList.  The set
    of spots used to generate the candidate orientations may be restricted to Friedel
    pairs only.

    hklList *must* have length > 0;
    Dach hkl entry in hklList *must* be a tuple, not a list

    the output is a concatenated list of orientation matrices ((n, 3, 3) numpy.ndarray).
    """

    assert hasattr(
        hklList,
        '__len__'), "the HKL list must have length, and len(hklList) > 0."

    nHKLs = len(hklList)
    grainList = []
    nGrains = 0
    planeData = spotsArray.getPlaneData(iPhase)
    csym = planeData.getLaueGroup()
    bMat = planeData.latVecOps['B']
    if dspTol is None:
        dspTol = planeData.strainMag

    centroSymRefl = planeData.getCentroSymHKLs()

    candidate = Grain(spotsArray, rMat=None, etaTol=etaTol, omeTol=omeTol)
    multiProcMode = xrdbase.haveMultiProc and doMultiProc
    #
    global foundFlagShared
    global multiProcMode_MP
    global spotsArray_MP
    global candidate_MP
    global dspTol_MP
    global minCompleteness_MP
    global doRefinement_MP
    global nStdDev_MP
    multiProcMode_MP = multiProcMode
    spotsArray_MP = spotsArray
    candidate_MP = candidate
    dspTol_MP = dspTol
    minCompleteness_MP = minCompleteness
    doRefinement_MP = doRefinement
    nStdDev_MP = nStdDev
    """
    set up for shared memory multiprocessing
    """
    if multiProcMode:
        nCPUs = nCPUs or xrdbase.dfltNCPU
        spotsArray.multiprocMode = True
        pool = multiprocessing.Pool(nCPUs)
    """
    HKL ITERATOR
    """
    if isinstance(quitAfter, dict):
        n_hkls_to_search = quitAfter['nHKLs']
    else:
        n_hkls_to_search = nHKLs

    if isinstance(quitAfter, int):
        quit_after_ngrains = quitAfter
    else:
        quit_after_ngrains = 0

    numTotal = len(spotsArray)
    pctClaimed = 0.
    time_to_quit = False
    tic = time.time()

    for iHKL in range(n_hkls_to_search):
        print "\n#####################\nProcessing hkl %d of %d\n" % (iHKL + 1,
                                                                      nHKLs)
        thisHKLID = planeData.getHKLID(hklList[iHKL])
        thisRingSpots0 = spotsArray.getHKLSpots(thisHKLID)
        thisRingSpots0W = num.where(thisRingSpots0)[0]
        unclaimedOfThese = -spotsArray.checkClaims(indices=thisRingSpots0W)
        thisRingSpots = copy.deepcopy(thisRingSpots0)
        thisRingSpots[thisRingSpots0W] = unclaimedOfThese
        if friedelOnly:
            # first, find Friedel Pairs
            spotsArray.findFriedelPairsHKL(hklList[iHKL],
                                           etaTol=etaTolF,
                                           omeTol=omeTolF)
            spotsIteratorI = spotsArray.getIterHKL(hklList[iHKL],
                                                   unclaimedOnly=True,
                                                   friedelOnly=True)
            # make some stuff for counters
            maxSpots = 0.5 * (sum(thisRingSpots) -
                              sum(spotsArray.friedelPair[thisRingSpots] == -1))
        else:
            spotsIteratorI = spotsArray.getIterHKL(hklList[iHKL],
                                                   unclaimedOnly=True,
                                                   friedelOnly=False)
            maxSpots = sum(thisRingSpots)
        """
        SPOT ITERATOR
          - this is where we iterate over all 'valid' spots for the current HKL as
            subject to the conditions of claims and ID as a friedel pair (when requested)
        """
        for iRefl, stuff in enumerate(spotsIteratorI):
            unclaimedOfThese = -spotsArray.checkClaims(indices=thisRingSpots0W)
            thisRingSpots = copy.deepcopy(thisRingSpots0)
            thisRingSpots[thisRingSpots0W] = unclaimedOfThese
            if friedelOnly:
                iSpot, jSpot, angs_I, angs_J = stuff

                Gplus = makeMeasuredScatteringVectors(*angs_I)
                Gminus = makeMeasuredScatteringVectors(*angs_J)

                Gvec = 0.5 * (Gplus - Gminus)
                maxSpots = 0.5 * (sum(thisRingSpots) - sum(
                    spotsArray.friedelPair[thisRingSpots] == -1))
            else:
                iSpot, angs_I = stuff
                Gvec = makeMeasuredScatteringVectors(*angs_I)
                maxSpots = sum(thisRingSpots)
            print "\nProcessing reflection %d (spot %d), %d remain unclaimed\n" % (
                iRefl + 1, iSpot, maxSpots)
            if multiProcMode and debugMultiproc > 1:
                marks = spotsArray._Spots__marks[:]
                print 'marks : ' + str(marks)
            # make the fiber;
            qfib = discreteFiber(hklList[iHKL],
                                 Gvec,
                                 B=bMat,
                                 ndiv=nsteps,
                                 invert=False,
                                 csym=csym,
                                 ssym=None)[0]
            # if +/- hkl aren't in the symmetry group, need '-' fiber
            if not centroSymRefl[thisHKLID]:
                minusHKL = -num.r_[hklList[iHKL]]
                qfibM = discreteFiber(minusHKL,
                                      Gvec,
                                      B=bMat,
                                      ndiv=nsteps,
                                      invert=False,
                                      csym=csym,
                                      ssym=None)[0]
                qfib = num.hstack([qfib, qfibM])
                pass
            # cull out duplicate orientations
            qfib = mUtil.uniqueVectors(qfib, tol=1e-4)
            numTrials = qfib.shape[1]
            """
            THIS IS THE BIGGIE; THE LOOP OVER THE DISCRETE ORIENTATIONS IN THE CURRENT FIBER
            """
            if multiProcMode:
                foundFlagShared.value = False
                qfibList = map(num.array, qfib.T.tolist())
                #if debugMultiproc:
                #    print 'qfibList : '+str(qfibList)
                results = num.array(pool.map(testThisQ, qfibList, chunksize=1))
                trialGrains = results[num.where(num.array(results,
                                                          dtype=bool))]
                # for trialGrain in trialGrains:
                #     trialGrain.restore(candidate)
            else:
                trialGrains = []
                for iR in range(numTrials):
                    foundGrainData = testThisQ(qfib[:, iR])
                    if foundGrainData is not None:
                        trialGrains.append(foundGrainData)
                        break
            'end of if multiProcMode'

            if len(trialGrains) == 0:
                print "No grain found containing spot %d\n" % (iSpot)
                # import pdb;pdb.set_trace()
            else:
                asMaster = multiProcMode
                'sort based on completeness'
                trialGrainCompletenesses = [
                    tgd['completeness'] for tgd in trialGrains
                ]
                order = num.argsort(trialGrainCompletenesses)[-1::-1]
                for iTrialGrain in order:
                    foundGrainData = trialGrains[iTrialGrain]
                    foundGrain = Grain(spotsArray,
                                       grainData=foundGrainData,
                                       claimingSpots=False)
                    'check completeness before accepting, especially important for multiproc'
                    foundGrain.checkClaims()  # updates completeness
                    if debugMultiproc:
                        print 'final completeness of candidate is %g' % (
                            foundGrain.completeness)
                    if foundGrain.completeness >= minCompleteness:
                        conflicts = foundGrain.claimSpots(asMaster=asMaster)
                        numConfl = num.sum(conflicts)
                        if numConfl > 0:
                            'tried to claim %d spots that are already claimed' % (
                                numConfl)
                        grainList.append(foundGrain)
                        nGrains += 1
                numUnClaimed = num.sum(-spotsArray.checkClaims())
                numClaimed = numTotal - numUnClaimed
                pctClaimed = num.float(numClaimed) / numTotal
                print "Found %d grains so far, %f%% claimed" % (nGrains, 100 *
                                                                pctClaimed)

                time_to_quit = (pctClaimed > minPctClaimed) or\
                  ((quit_after_ngrains > 0) and (nGrains >= quit_after_ngrains))
                if time_to_quit:
                    break
        'end of iRefl loop'

        if time_to_quit:
            break

    'end of iHKL loop'
    rMats = num.empty((len(grainList), 3, 3))
    for i in range(len(grainList)):
        rMats[i, :, :] = grainList[i].rMat

    if outputGrainList:
        retval = (rMats, grainList)
    else:
        retval = rMats

    if not preserveClaims:
        spotsArray.resetClaims()
    toc = time.time()
    print 'fiberSearch execution took %g seconds' % (toc - tic)

    if multiProcMode:
        pool.close()
        spotsArray.multiprocMode = False
        foundFlagShared.value = False
    # global foundFlagShared
    # global multiProcMode_MP
    # global spotsArray_MP
    # global candidate_MP
    # global dspTol_MP
    # global minCompleteness_MP
    # global doRefinement_MP
    multiProcMode_MP = None
    spotsArray_MP = None
    candidate_MP = None
    dspTol_MP = None
    minCompleteness_MP = None
    doRefinement_MP = None

    return retval
Example #5
0
def generate_orientation_fibers(eta_ome, chi, threshold, seed_hkl_ids, fiber_ndiv, filt_stdev=0.8):
    """
    From ome-eta maps and hklid spec, generate list of
    quaternions from fibers
    """
    # seed_hkl_ids must be consistent with this...
    pd_hkl_ids = eta_ome.iHKLList[seed_hkl_ids]

    # grab angular grid infor from maps
    del_ome = eta_ome.omegas[1] - eta_ome.omegas[0]
    del_eta = eta_ome.etas[1] - eta_ome.etas[0]

    # labeling mask
    structureNDI_label = ndimage.generate_binary_structure(2, 1)

    # crystallography data from the pd object
    pd = eta_ome.planeData
    tTh  = pd.getTTh()
    bMat = pd.latVecOps['B']
    csym = pd.getLaueGroup()

    ############################################
    ##    Labeling of spots from seed hkls    ##
    ############################################

    qfib     = []
    labels   = []
    numSpots = []
    coms     = []
    for i in seed_hkl_ids:
        # First apply filter
        this_map_f = -ndimage.filters.gaussian_laplace(eta_ome.dataStore[i], filt_stdev)

        labels_t, numSpots_t = ndimage.label(
            this_map_f > threshold,
            structureNDI_label
            )
        coms_t = np.atleast_2d(
            ndimage.center_of_mass(
                this_map_f,
                labels=labels_t,
                index=np.arange(1, np.amax(labels_t)+1)
                )
            )
        labels.append(labels_t)
        numSpots.append(numSpots_t)
        coms.append(coms_t)
        pass

    ############################################
    ##  Generate discrete fibers from labels  ##
    ############################################

    for i in range(len(pd_hkl_ids)):
        ii = 0
        qfib_tmp = np.empty((4, fiber_ndiv*numSpots[i]))
        for ispot in range(numSpots[i]):
            if not np.isnan(coms[i][ispot][0]):
                ome_c = eta_ome.omeEdges[0] \
                        + (0.5 + coms[i][ispot][0])*del_ome
                eta_c = eta_ome.etaEdges[0] \
                        + (0.5 + coms[i][ispot][1])*del_eta

                #gVec_s = xrdutil.makeMeasuredScatteringVectors(
                #    tTh[pd_hkl_ids[i]], eta_c, ome_c
                #    )
                gVec_s = xfcapi.anglesToGVec(
                    np.atleast_2d(
                        [tTh[pd_hkl_ids[i]], eta_c, ome_c]
                        ),
                    chi=chi
                    ).T

                tmp = mutil.uniqueVectors(
                    rot.discreteFiber(
                        pd.hkls[:, pd_hkl_ids[i]].reshape(3, 1),
                        gVec_s,
                        B=bMat,
                        ndiv=fiber_ndiv,
                        invert=False,
                        csym=csym
                        )[0]
                    )
                jj = ii + tmp.shape[1]
                qfib_tmp[:, ii:jj] = tmp
                ii += tmp.shape[1]
                pass
            pass
        qfib.append(qfib_tmp[:, :ii])
        pass
    return np.hstack(qfib)
Example #6
0
def run_paintGrid(pd, omeEta, seed_hkl_ids, threshold, fiber_ndiv,
                  omeTol=None, etaTol=None,
                  omeRange=None, etaRange=None,
                  omePeriod=(-np.pi, np.pi),
                  qTol=1e-7,
                  doMultiProc=True, nCPUs=multiprocessing.cpu_count(),
                  useGrid=None):
    """
    wrapper for indexer.paintGrid
    """
    del_ome = omeEta.omegas[1] - omeEta.omegas[0]
    del_eta = omeEta.etas[1] - omeEta.etas[0]

    # tolerances in degrees...  I know, pathological
    if omeTol is None:
        omeTol = 360. / float(fiber_ndiv)
    if etaTol is None:
        etaTol = 360. / float(fiber_ndiv)

    # must be consistent
    pd_hkl_ids = omeEta.iHKLList[seed_hkl_ids]

    tTh  = pd.getTTh()
    bMat = pd.latVecOps['B']
    csym = pd.getLaueGroup()
    qsym = pd.getQSym()

    if useGrid is not None:
        try:
            print "loading quaternion grid file: %s" % (useGrid)
            qfib = np.loadtxt(useGrid).T
        except:
            raise RuntimeError, "unable to load quaternion grid file"
    else:
        structureNDI_label = ndimage.generate_binary_structure(2, 2)
        qfib = []
        ii = 0
        jj = fiber_ndiv
        print "labeling maps..."
        labels   = []
        numSpots = []
        coms     = []
        for i in seed_hkl_ids:
            labels_t, numSpots_t = ndimage.label(omeEta.dataStore[i] > threshold, structureNDI_label)
            coms_t = np.atleast_2d(ndimage.center_of_mass(omeEta.dataStore[i],
                                                          labels=labels_t,
                                                          index=np.arange(1, np.amax(labels_t)+1)))
            labels.append(labels_t)
            numSpots.append(numSpots_t)
            coms.append(coms_t)
            pass

        # second pass for generation
        print "generating quaternions..."
        qfib_tmp = np.empty((4, fiber_ndiv*sum(numSpots)))
        for i in range(len(pd_hkl_ids)):
            for ispot in range(numSpots[i]):
                if not np.isnan(coms[i][ispot][0]):
                    ome_c = omeEta.omeEdges[0] + (0.5 + coms[i][ispot][0])*del_ome
                    eta_c = omeEta.etaEdges[0] + (0.5 + coms[i][ispot][1])*del_eta

                    gVec_s = xrdutil.makeMeasuredScatteringVectors(tTh[pd_hkl_ids[i]], eta_c, ome_c)

                    qfib_tmp[:, ii:jj] = rot.discreteFiber(pd.hkls[:, pd_hkl_ids[i]].reshape(3, 1),
                                                           gVec_s, B=bMat, ndiv=fiber_ndiv,
                                                           invert=False, csym=csym)[0]
                    ii  = jj
                    jj += fiber_ndiv
                    pass
                pass
            qfib.append(mutil.uniqueVectors(qfib_tmp))
            pass
        qfib = np.hstack(qfib)
    print "Running paintGrid on %d orientations" % (qfib.shape[1])
    complPG = idx.paintGrid(qfib,
                            omeEta,
                            omegaRange=omeRange, etaRange=etaRange,
                            omeTol=d2r*omeTol, etaTol=d2r*etaTol,
                            omePeriod=omePeriod, threshold=threshold,
                            doMultiProc=doMultiProc,
                            nCPUs=nCPUs)
    return complPG, qfib
Example #7
0
def generate_orientation_fibers(eta_ome, threshold, seed_hkl_ids, fiber_ndiv):
    """ From ome-eta maps and hklid spec, generate list of
    quaternions from fibers

    ** passing of BOTH pd and eta_ome object is redundant; flag for fix!
    """
    # seed_hkl_ids must be consistent with this...
    pd_hkl_ids = eta_ome.iHKLList[seed_hkl_ids]

    # grab angular grid infor from maps
    del_ome = eta_ome.omegas[1] - eta_ome.omegas[0]
    del_eta = eta_ome.etas[1] - eta_ome.etas[0]

    # labeling mask
    structureNDI_label = ndimage.generate_binary_structure(2, 2)

    # crystallography data from the pd object
    pd = eta_ome.planeData
    tTh  = pd.getTTh()
    bMat = pd.latVecOps['B']
    csym = pd.getLaueGroup()
    qsym = pd.getQSym()

    ############################################
    ##    Labeling of spots from seed hkls    ##
    ############################################

    ii       = 0
    jj       = fiber_ndiv
    qfib     = []
    labels   = []
    numSpots = []
    coms     = []
    for i in seed_hkl_ids:
        labels_t, numSpots_t = ndimage.label(
            eta_ome.dataStore[i] > threshold,
            structureNDI_label
            )
        coms_t = np.atleast_2d(
            ndimage.center_of_mass(
                eta_ome.dataStore[i],
                labels=labels_t,
                index=np.arange(1, np.amax(labels_t)+1)
                )
            )
        labels.append(labels_t)
        numSpots.append(numSpots_t)
        coms.append(coms_t)
        pass

    ############################################
    ##  Generate discrete fibers from labels  ##
    ############################################

    qfib_tmp = np.empty((4, fiber_ndiv*sum(numSpots)))

    for i in range(len(pd_hkl_ids)):
        for ispot in range(numSpots[i]):
            if not np.isnan(coms[i][ispot][0]):
                ome_c = eta_ome.omeEdges[0] \
                        + (0.5 + coms[i][ispot][0])*del_ome
                eta_c = eta_ome.etaEdges[0] \
                        + (0.5 + coms[i][ispot][1])*del_eta

                gVec_s = xrdutil.makeMeasuredScatteringVectors(
                    tTh[pd_hkl_ids[i]], eta_c, ome_c
                    )

                qfib_tmp[:, ii:jj] = rot.discreteFiber(
                    pd.hkls[:, pd_hkl_ids[i]].reshape(3, 1),
                    gVec_s,
                    B=bMat,
                    ndiv=fiber_ndiv,
                    invert=False,
                    csym=csym
                    )[0]
                ii  = jj
                jj += fiber_ndiv
                pass
            pass
        qfib.append(mutil.uniqueVectors(qfib_tmp))
        pass
    return np.hstack(qfib)
Example #8
0
def generate_orientation_fibers(eta_ome, threshold, seed_hkl_ids, fiber_ndiv):
    """ From ome-eta maps and hklid spec, generate list of
    quaternions from fibers

    ** passing of BOTH pd and eta_ome object is redundant; flag for fix!
    """
    # seed_hkl_ids must be consistent with this...
    pd_hkl_ids = eta_ome.iHKLList[seed_hkl_ids]

    # grab angular grid infor from maps
    del_ome = eta_ome.omegas[1] - eta_ome.omegas[0]
    del_eta = eta_ome.etas[1] - eta_ome.etas[0]

    # labeling mask
    structureNDI_label = ndimage.generate_binary_structure(2, 2)

    # crystallography data from the pd object
    pd = eta_ome.planeData
    tTh = pd.getTTh()
    bMat = pd.latVecOps['B']
    csym = pd.getLaueGroup()
    qsym = pd.getQSym()

    ############################################
    ##    Labeling of spots from seed hkls    ##
    ############################################

    qfib = []
    labels = []
    numSpots = []
    coms = []
    for i in seed_hkl_ids:
        labels_t, numSpots_t = ndimage.label(eta_ome.dataStore[i] > threshold,
                                             structureNDI_label)
        coms_t = np.atleast_2d(
            ndimage.center_of_mass(eta_ome.dataStore[i],
                                   labels=labels_t,
                                   index=np.arange(1,
                                                   np.amax(labels_t) + 1)))
        labels.append(labels_t)
        numSpots.append(numSpots_t)
        coms.append(coms_t)
        pass

    ############################################
    ##  Generate discrete fibers from labels  ##
    ############################################

    for i in range(len(pd_hkl_ids)):
        ii = 0
        qfib_tmp = np.empty((4, fiber_ndiv * numSpots[i]))
        for ispot in range(numSpots[i]):
            if not np.isnan(coms[i][ispot][0]):
                ome_c = eta_ome.omeEdges[0] \
                        + (0.5 + coms[i][ispot][0])*del_ome
                eta_c = eta_ome.etaEdges[0] \
                        + (0.5 + coms[i][ispot][1])*del_eta

                gVec_s = xrdutil.makeMeasuredScatteringVectors(
                    tTh[pd_hkl_ids[i]], eta_c, ome_c)
                tmp = mutil.uniqueVectors(
                    rot.discreteFiber(pd.hkls[:, pd_hkl_ids[i]].reshape(3, 1),
                                      gVec_s,
                                      B=bMat,
                                      ndiv=fiber_ndiv,
                                      invert=False,
                                      csym=csym)[0])
                jj = ii + tmp.shape[1]
                qfib_tmp[:, ii:jj] = tmp
                ii += tmp.shape[1]
                pass
            pass
        qfib.append(qfib_tmp[:, :ii])
        pass
    return np.hstack(qfib)