def fitTOFCoordinate(box, peak, padeCoefficients, dtSpread=0.03, minFracPixels=0.01, neigh_length_m=3, zBG=1.96, bgPolyOrder=1, qMask=None, plotResults=False, fracStop=0.01, pp_lambda=None, pplmin_frac=0.8, pplmax_frac=1.5, mindtBinWidth=1): # Get info from the peak tof = peak.getTOF() # in us wavelength = peak.getWavelength() # in Angstrom flightPath = peak.getL1() + peak.getL2() # in m scatteringHalfAngle = 0.5 * peak.getScattering() energy = 81.804 / wavelength**2 / 1000.0 # in eV # Set the qMask if qMask is None: qMask = np.ones_like(box.getNumEventsArray()).astype(np.bool) # Calculate the optimal pp_lambda and tofWS, ppl = ICCFT.getTOFWS(box, flightPath, scatteringHalfAngle, tof, peak, qMask, dtSpread=dtSpread, minFracPixels=minFracPixels, neigh_length_m=neigh_length_m, zBG=zBG, pp_lambda=pp_lambda, pplmin_frac=pplmin_frac, pplmax_frac=pplmax_frac, mindtBinWidth=mindtBinWidth) fitResults, fICC = ICCFT.doICCFit(tofWS, energy, flightPath, padeCoefficients, fitOrder=bgPolyOrder, constraintScheme=1) for i, param in enumerate(['A', 'B', 'R', 'T0', 'scale', 'hatWidth', 'k_conv']): fICC[param] = mtd['fit_Parameters'].row(i)['Value'] bgParamsRows = [7 + i for i in range(bgPolyOrder + 1)] bgCoeffs = [] for bgRow in bgParamsRows[::-1]: # reverse for numpy order bgCoeffs.append(mtd['fit_Parameters'].row(bgRow)['Value']) x = tofWS.readX(0) bg = np.polyval(bgCoeffs, x) yFit = mtd['fit_Workspace'].readY(1) yScaled = (yFit - bg) / np.max(yFit - bg) goodIDX = yScaled > fracStop if np.sum(goodIDX) > 0: iStart = np.min(np.where(goodIDX)) iStop = np.max(np.where(goodIDX)) interpF = interp1d(x, yFit, kind='cubic') tofxx = np.linspace(tofWS.readX(0).min(), tofWS.readX(0).max(), 1000) tofyy = interpF(tofxx) if plotResults: plt.figure(1) plt.clf() plt.plot(tofxx, tofyy, label='Interpolated') plt.plot(tofWS.readX(0), tofWS.readY(0), 'o', label='Data') #print('sum:', np.sum(fICC.function1D(tofWS.readX(0)))) #print('bg: ', np.sum(bg[iStart:iStop])) plt.plot(mtd['fit_Workspace'].readX(1), mtd['fit_Workspace'].readY(1), label='Fit') plt.title(fitResults.OutputChi2overDoF) plt.legend(loc='best') ftof = interp1d(tofxx, tofyy, bounds_error=False, fill_value=0.0) XTOF = boxToTOFThetaPhi(box, peak)[:, :, :, 0] YTOF = ftof(XTOF) return YTOF, fICC, [tofWS.readX(0).min(), tofWS.readX(0).max()]
def boxToTOFThetaPhi(box, peak): QX, QY, QZ = ICCFT.getQXQYQZ(box) R, THETA, PHI = ICCFT.cart2sph(QX, QY, QZ) flightPath = peak.getL1() + peak.getL2() scatteringHalfAngle = 0.5 * peak.getScattering() TOF = 3176.507 * flightPath * np.sin(scatteringHalfAngle) / np.abs(R) X = np.empty(TOF.shape + (3,)) X[:, :, :, 0] = TOF X[:, :, :, 1] = THETA X[:, :, :, 2] = PHI return X
def fitTOFCoordinate(box, peak, padeCoefficients, dtSpread=0.03, minFracPixels=0.01, neigh_length_m=3, zBG=1.96, bgPolyOrder=1, qMask=None, plotResults=False, fracStop=0.01, pp_lambda=None, pplmin_frac=0.8, pplmax_frac=1.5, mindtBinWidth=1, maxdtBinWidth=50, peakMaskSize=5, iccFitDict=None, fitPenalty=None): # Get info from the peak tof = peak.getTOF() # in us wavelength = peak.getWavelength() # in Angstrom flightPath = peak.getL1() + peak.getL2() # in m scatteringHalfAngle = 0.5 * peak.getScattering() energy = 81.804 / wavelength**2 / 1000.0 # in eV # Set the qMask if qMask is None: qMask = np.ones_like(box.getNumEventsArray()).astype(np.bool) # Calculate the optimal pp_lambda and tofWS, ppl = ICCFT.getTOFWS(box, flightPath, scatteringHalfAngle, tof, peak, qMask, dtSpread=dtSpread, minFracPixels=minFracPixels, neigh_length_m=neigh_length_m, zBG=zBG, pp_lambda=pp_lambda, pplmin_frac=pplmin_frac, pplmax_frac=pplmax_frac, mindtBinWidth=mindtBinWidth, maxdtBinWidth=maxdtBinWidth, peakMaskSize=peakMaskSize, iccFitDict=iccFitDict, fitPenalty=fitPenalty) fitResults, fICC = ICCFT.doICCFit(tofWS, energy, flightPath, padeCoefficients, fitOrder=bgPolyOrder, constraintScheme=1, iccFitDict=iccFitDict, fitPenalty=fitPenalty) for i, param in enumerate(['A', 'B', 'R', 'T0', 'Scale', 'HatWidth', 'KConv']): fICC[param] = mtd['fit_Parameters'].row(i)['Value'] bgParamsRows = [7 + i for i in range(bgPolyOrder + 1)] bgCoeffs = [] for bgRow in bgParamsRows[::-1]: # reverse for numpy order bgCoeffs.append(mtd['fit_Parameters'].row(bgRow)['Value']) x = tofWS.readX(0) yFit = mtd['fit_Workspace'].readY(1) interpF = interp1d(x, yFit, kind='cubic') tofxx = np.linspace(tofWS.readX(0).min(), tofWS.readX(0).max(), 1000) tofyy = interpF(tofxx) if plotResults: plt.figure(1) plt.clf() plt.plot(tofxx, tofyy, label='Interpolated') plt.plot(tofWS.readX(0), tofWS.readY(0), 'o', label='Data') plt.plot(mtd['fit_Workspace'].readX(1), mtd['fit_Workspace'].readY(1), label='Fit') plt.title(fitResults.OutputChi2overDoF) plt.legend(loc='best') ftof = interp1d(tofxx, tofyy, bounds_error=False, fill_value=0.0) XTOF = boxToTOFThetaPhi(box, peak)[:, :, :, 0] YTOF = ftof(XTOF) return YTOF, fICC, [tofWS.readX(0).min(), tofWS.readX(0).max()]
def addCountsNoFit(df): #add raw counts ctsList = [] sigList = [] for idx,row in df.iterrows(): x = row['t'] yF = row['yFit'] yD = row['yData'] if np.sum(np.isnan(yF)) == 0: bgCoeff = np.array([row['bg_linear'], row['bg_const']]) bgY = np.polyval(bgCoeff,x) t = ICCFT.integratePeak(x,yF,yD,bgY) peaks_ws.getPeak(int(idx)).setSigmaIntensity(t[1]) row['SigInt'] = t[1] sumIDX = np.logical_and(x>t[2],x<t[3]) sumCts = yD-bgY sumCts = np.sum(sumCts[sumIDX]) ctsList.append(sumCts) sig = np.sqrt(np.sum(yD[sumIDX]) + np.sum(bgY[sumIDX])) sigList.append(sig) else: ctsList.append(0) sigList.append(1) df['IntensCts'] = ctsList df['SigCts'] = sigList
def makeFigure(peak, qList, surf, UBMatrix, fracHKL=0.8, figNumber=2, showQBoxOnly=False): plt.clf() fig = plt.figure(figNumber) ax = Axes3D(fig) ax.scatter3D(qList[:, 0], qList[:, 1], qList[:, 2], color='b', linewidth=0) ax.scatter3D(qList[:, 0], qList[:, 1], surf, color='g', linewidth=0) ax.set_xlabel('qx') ax.set_ylabel('qy') ax.set_zlabel('qz') if showQBoxOnly: qS = peak.getQSampleFrame() dQ = np.abs(ICCFT.getDQFracHKL(peak, UBMatrix, frac=fracHKL)) badIDX = (np.sum(qList < qS - dQ[:, 0], axis=1) + np.sum(qList > qS + dQ[:, 1], axis=1) > 0) > 0 qList = qList[~badIDX, :] ax.set_xbound(qS[0] - dQ[0, 0], qS[0] + dQ[0, 1]) ax.set_ybound(qS[1] - dQ[1, 0], qS[1] + dQ[1, 1]) ax.set_zbound(qS[2] - dQ[2, 0], qS[2] + dQ[2, 1])
def fitScaling(n_events, box, YTOF, YBVG, goodIDX=None, neigh_length_m=3): YJOINT = 1.0 * YTOF * YBVG YJOINT /= 1.0 * YJOINT.max() convBox = 1.0 * \ np.ones([neigh_length_m, neigh_length_m, neigh_length_m]) / \ neigh_length_m**3 conv_n_events = convolve(n_events, convBox) QX, QY, QZ = ICCFT.getQXQYQZ(box) dP = 8 fitMaxIDX = tuple( np.array(np.unravel_index(YJOINT.argmax(), YJOINT.shape))) if goodIDX is None: goodIDX = np.zeros_like(YJOINT).astype(np.bool) goodIDX[max(fitMaxIDX[0] - dP, 0):min(fitMaxIDX[0] + dP, goodIDX.shape[0]), max(fitMaxIDX[1] - dP, 0):min(fitMaxIDX[1] + dP, goodIDX.shape[1]), max(fitMaxIDX[2] - dP, 0):min(fitMaxIDX[2] + dP, goodIDX.shape[2])] = True goodIDX = np.logical_and(goodIDX, conv_n_events > 0) scaleLinear = Polynomial(n=1) scaleLinear.constrain("A1>0") scaleX = YJOINT[goodIDX] scaleY = n_events[goodIDX] CreateWorkspace(OutputWorkspace='__scaleWS', dataX=scaleX, dataY=scaleY) fitResultsScaling = Fit(Function=scaleLinear, InputWorkspace='__scaleWS', Output='__scalefit', CostFunction='Unweighted least squares') A0 = fitResultsScaling[3].row(0)['Value'] A1 = fitResultsScaling[3].row(1)['Value'] YRET = A1 * YJOINT + A0 chiSqRed = fitResultsScaling[1] return YRET, chiSqRed, A1
def fitScaling(n_events, box, YTOF, YBVG, goodIDX=None, neigh_length_m=3): YJOINT = 1.0 * YTOF * YBVG YJOINT /= 1.0 * YJOINT.max() convBox = 1.0 * \ np.ones([neigh_length_m, neigh_length_m, neigh_length_m]) / \ neigh_length_m**3 conv_n_events = convolve(n_events, convBox) QX, QY, QZ = ICCFT.getQXQYQZ(box) dP = 8 fitMaxIDX = tuple( np.array(np.unravel_index(YJOINT.argmax(), YJOINT.shape))) if goodIDX is None: goodIDX = np.zeros_like(YJOINT).astype(np.bool) goodIDX[max(fitMaxIDX[0] - dP, 0):min(fitMaxIDX[0] + dP, goodIDX.shape[0]), max(fitMaxIDX[1] - dP, 0):min(fitMaxIDX[1] + dP, goodIDX.shape[1]), max(fitMaxIDX[2] - dP, 0):min(fitMaxIDX[2] + dP, goodIDX.shape[2])] = True goodIDX = np.logical_and(goodIDX, conv_n_events > 0) # A1 = slope, A0 = offset scaleLinear = Polynomial(n=1) scaleLinear.constrain("A1>0") scaleX = YJOINT[goodIDX] scaleY = n_events[goodIDX] # , dataE=np.sqrt(scaleY)) scaleWS = CreateWorkspace( OutputWorkspace='scaleWS', dataX=scaleX, dataY=scaleY) fitResultsScaling = Fit(Function=scaleLinear, InputWorkspace=scaleWS, Output='scalefit', CostFunction='Unweighted least squares') A0 = fitResultsScaling[3].row(0)['Value'] A1 = fitResultsScaling[3].row(1)['Value'] YRET = A1 * YJOINT + A0 chiSqRed = fitResultsScaling[1] return YRET, chiSqRed, A1
def getAngularHistogram(box, useIDX=None, nTheta=200, nPhi=200, zBG=1.96, neigh_length_m=3, fracBoxToHistogram=1.0): n_events = box.getNumEventsArray() hasEventsIDX = n_events > 0 if useIDX is None: if zBG >= 0: goodIDX, pp_lambda = ICCFT.getBGRemovedIndices(n_events) else: goodIDX = hasEventsIDX useIDX = goodIDX # Setup our coordinates QX, QY, QZ = ICCFT.getQXQYQZ(box) R, THETA, PHI = ICCFT.cart2sph(QX, QY, QZ) thetaMin = np.min(THETA) thetaMax = np.max(THETA) dTheta = thetaMax - thetaMin thetaMid = 0.5 * (thetaMin + thetaMax) thetaMin = max(thetaMin, thetaMid - dTheta * fracBoxToHistogram / 2.0) thetaMax = min(thetaMax, thetaMid + dTheta * fracBoxToHistogram / 2.0) phiMin = np.min(PHI) phiMax = np.max(PHI) dPhi = phiMax - phiMin phiMid = 0.5 * (phiMin + phiMax) phiMin = max(phiMin, phiMid - dPhi * fracBoxToHistogram / 2.0) phiMax = min(phiMax, phiMid + dPhi * fracBoxToHistogram / 2.0) thetaBins = np.linspace(thetaMin, thetaMax, nTheta) phiBins = np.linspace(phiMin, phiMax, nPhi) thetaVect = THETA[useIDX] phiVect = PHI[useIDX] nVect = n_events[useIDX] # Do the histogram h, thBins, phBins = np.histogram2d(thetaVect, phiVect, weights=nVect, bins=[thetaBins, phiBins]) return h, thBins, phBins
def getAngularHistogram(box, useIDX=None, nTheta=200, nPhi=200, zBG=1.96, neigh_length_m=3, fracBoxToHistogram=1.0): n_events = box.getNumEventsArray() hasEventsIDX = n_events > 0 if useIDX is None: if zBG >= 0: goodIDX, pp_lambda = ICCFT.getBGRemovedIndices(n_events) else: goodIDX = hasEventsIDX useIDX = goodIDX # Setup our coordinates QX, QY, QZ = ICCFT.getQXQYQZ(box) R, THETA, PHI = ICCFT.cart2sph(QX, QY, QZ) thetaMin = np.min(THETA) thetaMax = np.max(THETA) dTheta = thetaMax - thetaMin thetaMid = 0.5 * (thetaMin + thetaMax) thetaMin = max(thetaMin, thetaMid - dTheta * fracBoxToHistogram / 2.0) thetaMax = min(thetaMax, thetaMid + dTheta * fracBoxToHistogram / 2.0) phiMin = np.min(PHI) phiMax = np.max(PHI) dPhi = phiMax - phiMin phiMid = 0.5 * (phiMin + phiMax) phiMin = max(phiMin, phiMid - dPhi * fracBoxToHistogram / 2.0) phiMax = min(phiMax, phiMid + dPhi * fracBoxToHistogram / 2.0) thetaBins = np.linspace(thetaMin, thetaMax, nTheta) phiBins = np.linspace(phiMin, phiMax, nPhi) thetaVect = THETA[useIDX] phiVect = PHI[useIDX] nVect = n_events[useIDX] # Do the histogram h, thBins, phBins = np.histogram2d( thetaVect, phiVect, weights=nVect, bins=[thetaBins, phiBins]) return h, thBins, phBins
def getMDData(peak, loadDir, DetCalFile): importFlag = True for ws in mtd.getObjectNames(): if mtd[ws].getComment() == 'BSGETBOX%i' % peak.getRunNumber(): print ' Using already loaded MDdata' MDdata = mtd[ws] importFlag = False break if importFlag: nxsTemplate = loadDir + 'TOPAZ_%i_event.nxs' fileName = nxsTemplate % peak.getRunNumber() MDdata = ICCFT.getSample(peak.getRunNumber(), DetCalFile, '', fileName) MDdata.setComment('BSGETBOX%i' % peak.getRunNumber()) return MDdata
def getXTOF(box, peak): from mantid.kernel import V3D QX, QY, QZ = ICCFT.getQXQYQZ(box) origQS = peak.getQSampleFrame() tList = np.zeros_like(QX) for i in xrange(QX.shape[0]): for j in xrange(QX.shape[1]): for k in xrange(QX.shape[2]): newQ = V3D(QX[i, j, k], QY[i, j, k], QZ[i, j, k]) peak.setQSampleFrame(newQ) flightPath = peak.getL1() + peak.getL2() scatteringHalfAngle = 0.5 * peak.getScattering() # convert to microseconds) tList[i, j, k] = 3176.507 * flightPath * \ np.sin(scatteringHalfAngle) / np.linalg.norm(newQ) peak.setQSampleFrame(origQS) return tList
def getMDData(peak, nxsTemplate, DetCalFile, workDir, q_frame): #--imports new MDdata importFlag = True for ws in mtd.getObjectNames(): if mtd[ws].getComment() == 'BSGETBOX%i' % peak.getRunNumber(): print ' Using already loaded MDdata' MDdata = mtd[ws] importFlag = False break if importFlag: fileName = nxsTemplate % peak.getRunNumber() MDdata = ICCFT.getSample(peak.getRunNumber(), DetCalFile, workDir, fileName, q_frame=q_frame) MDdata.setComment('BSGETBOX%i' % peak.getRunNumber()) return MDdata
# Some parameters importPeaks = True print('Loading peaks_ws') for ws in mtd.getObjectNames(): if mtd[ws].getComment() == '%s' % peaksFile: print ' using already loaded peaks file' importPeaks = False peaks_ws = mtd[ws] if importPeaks: peaks_ws = LoadIsawPeaks(Filename=peaksFile) peaks_ws.setComment(peaksFile) LoadIsawUB(InputWorkspace=peaks_ws, FileName=UBFile) UBMatrix = peaks_ws.sample().getOrientedLattice().getUB() dQ = np.abs(ICCFT.getDQFracHKL(UBMatrix, frac=0.5)) df = pd.DataFrame(peaks_ws.toDict()) df = df[df['DSpacing'] > 1.6] def getReflectionFamily(row): sg = SpaceGroupFactory.createSpaceGroup('P 32 2 1') pg = sg.getPointGroup() h = float(row['h']) k = float(row['k']) l = float(row['l']) hklPlus = np.array([h, k, l]) hklMinus = np.array([-h, -k, -l]) refFamPlus = pg.getReflectionFamily(hklPlus) refFamMinus = pg.getReflectionFamily(hklMinus)
def get3DPeak(peak, box, padeCoefficients, qMask, nTheta=150, nPhi=150, fracBoxToHistogram=1.0, plotResults=False, zBG=1.96, bgPolyOrder=1, fICCParams=None, oldICCFit=None, strongPeakParams=None, forceCutoff=250, edgeCutoff=15, predCoefficients=None, neigh_length_m=3, q_frame='sample', dtSpread=0.03, pplmin_frac=0.8, pplmax_frac=1.5, mindtBinWidth=1, figureNumber=2): n_events = box.getNumEventsArray() if q_frame == 'lab': q0 = peak.getQLabFrame() elif q_frame == 'sample': q0 = peak.getQSampleFrame() else: raise ValueError( 'BVGFT:get3DPeak - q_frame must be either \'lab\' or \'sample\'; %s was provided' % q_frame) if fICCParams is None: goodIDX, pp_lambda = ICCFT.getBGRemovedIndices( n_events, peak=peak, box=box, qMask=qMask, calc_pp_lambda=True, padeCoefficients=padeCoefficients, predCoefficients=predCoefficients, neigh_length_m=neigh_length_m, pp_lambda=None, pplmin_frac=pplmin_frac, pplmax_frac=pplmax_frac, mindtBinWidth=mindtBinWidth) YTOF, fICC, x_lims = fitTOFCoordinate( box, peak, padeCoefficients, dtSpread=dtSpread, qMask=qMask, bgPolyOrder=bgPolyOrder, zBG=zBG, plotResults=plotResults, pp_lambda=pp_lambda, neigh_length_m=neigh_length_m, pplmin_frac=pplmin_frac, pplmax_frac=pplmax_frac, mindtBinWidth=mindtBinWidth) else: # we already did I-C profile, so we'll just read the parameters pp_lambda = fICCParams[-1] fICC = ICC.IkedaCarpenterConvoluted() fICC.init() fICC['A'] = fICCParams[5] fICC['B'] = fICCParams[6] fICC['R'] = fICCParams[7] fICC['T0'] = fICCParams[8] fICC['scale'] = fICCParams[9] fICC['hatWidth'] = fICCParams[10] fICC['k_conv'] = fICCParams[11] goodIDX, _ = ICCFT.getBGRemovedIndices( n_events, pp_lambda=pp_lambda, qMask=qMask) # Get the 3D TOF component, YTOF if oldICCFit is not None: x_lims = [np.min(oldICCFit[0]), np.max(oldICCFit[0])] tofxx = oldICCFit[0] tofyy = oldICCFit[2] else: dtSpread = 0.03 x_lims = [(1 - dtSpread) * peak.getTOF(), (1 + dtSpread) * peak.getTOF()] tofxx = np.arange(x_lims[0], x_lims[1], 5) tofyy = fICC.function1D(tofxx) ftof = interp1d(tofxx, tofyy, bounds_error=False, fill_value=0.0) XTOF = boxToTOFThetaPhi(box, peak)[:, :, :, 0] YTOF = ftof(XTOF) # Get YBVG - the detector component if goodIDX is not None: goodIDX *= qMask X = boxToTOFThetaPhi(box, peak) dEdge = edgeCutoff useForceParams = peak.getIntensity() < forceCutoff or peak.getRow() <= dEdge or peak.getRow( ) >= 255 - dEdge or peak.getCol() <= dEdge or peak.getCol() >= 255 - dEdge if strongPeakParams is not None and useForceParams: # We will force parameters on this fit ph = np.arctan2(q0[1], q0[0]) th = np.arctan2(q0[2], np.hypot(q0[0], q0[1])) phthPeak = np.array([ph, th]) tmp = strongPeakParams[:, :2] - phthPeak distSq = tmp[:, 0]**2 + tmp[:, 1]**2 nnIDX = np.argmin(distSq) #print 'Using [ph, th] =', strongPeakParams[nnIDX, # :2], 'for ', phthPeak, '; nnIDX = ', nnIDX params, h, t, p = doBVGFit(box, nTheta=nTheta, nPhi=nPhi, fracBoxToHistogram=fracBoxToHistogram, goodIDX=goodIDX, forceParams=strongPeakParams[nnIDX]) else: # Just do the fit - no nearest neighbor assumptions params, h, t, p = doBVGFit( box, nTheta=nTheta, nPhi=nPhi, fracBoxToHistogram=fracBoxToHistogram, goodIDX=goodIDX) if plotResults: compareBVGFitData( box, params[0], nTheta, nPhi, fracBoxToHistogram=fracBoxToHistogram, useIDX=goodIDX, figNumber=figureNumber) # set up the BVG # A = params[0][0] # never used mu0 = params[0][1] mu1 = params[0][2] sigX = params[0][3] sigY = params[0][4] p = params[0][5] bgBVG = params[0][6] sigma = np.array([[sigX**2, p * sigX * sigY], [p * sigX * sigY, sigY**2]]) mu = np.array([mu0, mu1]) XTOF = X[:, :, :, 0] XTHETA = X[:, :, :, 1] XPHI = X[:, :, :, 2] YBVG = bvg(1.0, mu, sigma, XTHETA, XPHI, 0) # Do scaling to the data Y, redChiSq, scaleFactor = fitScaling(n_events, box, YTOF, YBVG) YBVG2 = bvg(1.0, mu, sigma, XTHETA, XPHI, 0) YTOF2 = getYTOF(fICC, XTOF, x_lims) Y2 = YTOF2 * YBVG2 Y2 = scaleFactor * Y2 / Y2.max() QX, QY, QZ = ICCFT.getQXQYQZ(box) fitMaxIDX = tuple(np.array(np.unravel_index(Y2.argmax(), Y2.shape))) newCenter = np.array([QX[fitMaxIDX], QY[fitMaxIDX], QZ[fitMaxIDX]]) # Set a dictionary with the parameters to return retParams = {} retParams['Alpha'] = fICC['A'] retParams['Beta'] = fICC['B'] retParams['R'] = fICC['R'] retParams['T0'] = fICC['T0'] retParams['scale'] = fICC['scale'] retParams['k_conv'] = fICC['k_conv'] retParams['muTH'] = mu0 retParams['muPH'] = mu1 retParams['sigX'] = sigX retParams['sigY'] = sigY retParams['sigP'] = p retParams['bgBVG'] = bgBVG retParams['scale3d'] = scaleFactor retParams['chiSq3d'] = redChiSq retParams['dQ'] = np.linalg.norm(newCenter - q0) retParams['newQ'] = newCenter return Y2, goodIDX, pp_lambda, retParams
event_ws = Load(Filename=eventFileName, OutputWorkspace='event_ws') MDdata = ConvertToMD(InputWorkspace='event_ws', QDimensions='Q3D', dEAnalysisMode='Elastic', Q3DFrames=Q3DFrame, QConversionScales='Q in A^-1', MinValues='%f, %f, %f' % (qLow, qLow, qLow), Maxvalues='%f, %f, %f' % (qHigh, qHigh, qHigh), MaxRecursionDepth=10, LorentzCorrection=False) MDdata.setComment('MD_%i' % peaks_ws.getPeak(peakNumber).getRunNumber()) addInstrumentParameters(peaks_ws) LoadIsawUB(InputWorkspace=peaks_ws, Filename=UBFile) UBMatrix = peaks_ws.sample().getOrientedLattice().getUB() dQ = np.abs(ICCFT.getDQFracHKL(UBMatrix, frac=0.5)) dQ[dQ > DQMax] = DQMax if strongPeakParamsFile is None: strongPeakParams = None else: strongPeakParams = pickle.load(open(strongPeakParamsFile, 'rb')) padeCoefficients = ICCFT.getModeratorCoefficients(moderatorCoefficientsFile) NTheta = peaks_ws.getInstrument().getIntParameter("numBinsTheta")[0] NPhi = peaks_ws.getInstrument().getIntParameter("numBinsPhi")[0] MindtBinWidth = peaks_ws.getInstrument().getNumberParameter("minDTBinWidth")[0] MaxdtBinWidth = peaks_ws.getInstrument().getNumberParameter("maxDTBinWidth")[0] FracHKL = 0.4 # Fraction of HKL to consider for profile fitting. DQPixel = peaks_ws.getInstrument().getNumberParameter("DQPixel")[0] peakMaskSize = peaks_ws.getInstrument().getIntParameter("peakMaskSize")[0]
def get3DPeak(peak, peaks_ws, box, padeCoefficients, qMask, nTheta=150, nPhi=150, fracBoxToHistogram=1.0, plotResults=False, zBG=1.96, bgPolyOrder=1, fICCParams=None, oldICCFit=None, strongPeakParams=None, forceCutoff=250, edgeCutoff=15, neigh_length_m=3, q_frame='sample', dtSpread=0.03, pplmin_frac=0.8, pplmax_frac=1.5, mindtBinWidth=1, maxdtBinWidth=50, figureNumber=2, peakMaskSize=5, iccFitDict=None): n_events = box.getNumEventsArray() if q_frame == 'lab': q0 = peak.getQLabFrame() elif q_frame == 'sample': q0 = peak.getQSampleFrame() else: raise ValueError( 'BVGFT:get3DPeak - q_frame must be either \'lab\' or \'sample\'; %s was provided' % q_frame) if fICCParams is None: goodIDX, pp_lambda = ICCFT.getBGRemovedIndices( n_events, peak=peak, box=box, qMask=qMask, calc_pp_lambda=True, padeCoefficients=padeCoefficients, neigh_length_m=neigh_length_m, pp_lambda=None, pplmin_frac=pplmin_frac, pplmax_frac=pplmax_frac, mindtBinWidth=mindtBinWidth, maxdtBinWidth=maxdtBinWidth, peakMaskSize=peakMaskSize, iccFitDict=iccFitDict) YTOF, fICC, x_lims = fitTOFCoordinate( box, peak, padeCoefficients, dtSpread=dtSpread, qMask=qMask, bgPolyOrder=bgPolyOrder, zBG=zBG, plotResults=plotResults, pp_lambda=pp_lambda, neigh_length_m=neigh_length_m, pplmin_frac=pplmin_frac, pplmax_frac=pplmax_frac, mindtBinWidth=mindtBinWidth, maxdtBinWidth=maxdtBinWidth, peakMaskSize=peakMaskSize, iccFitDict=iccFitDict) chiSqTOF = mtd['fit_Parameters'].column(1)[-1] else: # we already did I-C profile, so we'll just read the parameters pp_lambda = fICCParams[-1] fICC = ICC.IkedaCarpenterConvoluted() fICC.init() fICC['A'] = fICCParams[5] fICC['B'] = fICCParams[6] fICC['R'] = fICCParams[7] fICC['T0'] = fICCParams[8] fICC['Scale'] = fICCParams[9] fICC['HatWidth'] = fICCParams[10] fICC['KConv'] = fICCParams[11] goodIDX, _ = ICCFT.getBGRemovedIndices( n_events, pp_lambda=pp_lambda, qMask=qMask, peakMaskSize=peakMaskSize, iccFitDict=iccFitDict) chiSqTOF = fICCParams[4] #Last entry # Get the 3D TOF component, YTOF if oldICCFit is not None: x_lims = [np.min(oldICCFit[0]), np.max(oldICCFit[0])] tofxx = oldICCFit[0] tofyy = oldICCFit[2] else: dtSpread = 0.03 x_lims = [(1 - dtSpread) * peak.getTOF(), (1 + dtSpread) * peak.getTOF()] tofxx = np.arange(x_lims[0], x_lims[1], 5) tofyy = fICC.function1D(tofxx) ftof = interp1d(tofxx, tofyy, bounds_error=False, fill_value=0.0) XTOF = boxToTOFThetaPhi(box, peak)[:, :, :, 0] YTOF = ftof(XTOF) # Get YBVG - the detector component if goodIDX is not None: goodIDX *= qMask X = boxToTOFThetaPhi(box, peak) dEdge = edgeCutoff # This section defines detector size to determine if a peak is too # close to the edge. Order is [NROWS, NCOLS]. try: numDetRows = peaks_ws.getInstrument().getIntParameter("numDetRows")[0] numDetCols = peaks_ws.getInstrument().getIntParameter("numDetCols")[0] nPixels = [numDetRows, numDetCols] except: UserWarning('Detector size not found in instrument parameters file. Assuming a 255*255 detector!') nPixels = [255,255] useForceParams = peak.getIntensity() < forceCutoff or peak.getRow() <= dEdge or peak.getRow( ) >= nPixels[0] - dEdge or peak.getCol() <= dEdge or peak.getCol() >= nPixels[1] - dEdge #Here we retrieve some instrument specific parameters try: doPeakConvolution = peaks_ws.getInstrument().getBoolParameter("fitConvolvedPeak")[0] except: doPeakConvolution = False try: sigX0Scale = peaks_ws.getInstrument().getNumberParameter("sigX0Scale")[0] except: sigX0Scale = 1.0 try: sigY0Scale = peaks_ws.getInstrument().getNumberParameter("sigY0Scale")[0] except: sigY0Scale = 1.0 if strongPeakParams is not None and useForceParams: # We will force parameters on this fit ph = np.arctan2(q0[1], q0[0]) th = np.arctan2(q0[2], np.hypot(q0[0], q0[1])) phthPeak = np.array([ph, th]) tmp = strongPeakParams[:, :2] - phthPeak distSq = tmp[:, 0]**2 + tmp[:, 1]**2 nnIDX = np.argmin(distSq) #logger.information('Using [ph, th] = [{:2.2f},{:2.2f}] for [{:2.2f},{:2.2f}]'.format(strongPeakParams[nnIDX,0], # strongPeakParams[nnIDX,1], # phthPeak[0], # phthPeak[1])) params, h, t, p = doBVGFit(box, nTheta=nTheta, nPhi=nPhi, fracBoxToHistogram=fracBoxToHistogram, goodIDX=goodIDX, forceParams=strongPeakParams[nnIDX], doPeakConvolution=doPeakConvolution, sigX0Scale=sigX0Scale, sigY0Scale=sigY0Scale) else: # Just do the fit - no nearest neighbor assumptions params, h, t, p = doBVGFit( box, nTheta=nTheta, nPhi=nPhi, fracBoxToHistogram=fracBoxToHistogram, goodIDX=goodIDX, doPeakConvolution=doPeakConvolution, sigX0Scale=sigX0Scale, sigY0Scale=sigY0Scale) if plotResults: compareBVGFitData( box, params[0], nTheta, nPhi, fracBoxToHistogram=fracBoxToHistogram, useIDX=goodIDX, figNumber=figureNumber) # set up the BVG # A = params[0][0] # never used mu0 = params[0][1] mu1 = params[0][2] sigX = params[0][3] sigY = params[0][4] p = params[0][5] bgBVG = params[0][6] sigma = np.array([[sigX**2, p * sigX * sigY], [p * sigX * sigY, sigY**2]]) mu = np.array([mu0, mu1]) XTOF = X[:, :, :, 0] XTHETA = X[:, :, :, 1] XPHI = X[:, :, :, 2] YBVG = bvg(1.0, mu, sigma, XTHETA, XPHI, 0) # Do scaling to the data if doPeakConvolution: #This means peaks will have gaps, so we only use good data to scale Y, redChiSq, scaleFactor = fitScaling(n_events, box, YTOF, YBVG, goodIDX=goodIDX) else: Y, redChiSq, scaleFactor = fitScaling(n_events, box, YTOF, YBVG) YBVG2 = bvg(1.0, mu, sigma, XTHETA, XPHI, 0) YTOF2 = getYTOF(fICC, XTOF, x_lims) Y2 = YTOF2 * YBVG2 Y2 = scaleFactor * Y2 / Y2.max() QX, QY, QZ = ICCFT.getQXQYQZ(box) fitMaxIDX = tuple(np.array(np.unravel_index(Y2.argmax(), Y2.shape))) newCenter = np.array([QX[fitMaxIDX], QY[fitMaxIDX], QZ[fitMaxIDX]]) # Set a dictionary with the parameters to return retParams = {} retParams['Alpha'] = fICC['A'] retParams['Beta'] = fICC['B'] retParams['R'] = fICC['R'] retParams['T0'] = fICC['T0'] retParams['Scale'] = fICC['Scale'] retParams['KConv'] = fICC['KConv'] retParams['MuTH'] = mu0 retParams['MuPH'] = mu1 retParams['SigX'] = sigX retParams['SigY'] = sigY retParams['SigP'] = p retParams['bgBVG'] = bgBVG retParams['scale3d'] = scaleFactor retParams['chiSq3d'] = redChiSq retParams['chiSq'] = chiSqTOF retParams['dQ'] = np.linalg.norm(newCenter - q0) retParams['newQ'] = newCenter return Y2, goodIDX, pp_lambda, retParams
peaksFile = '/home/ntv/mandi_preprocessing/beta_lac_july2018/beta_lac_july2018_secondxtal.integrate' UBFile = '/home/ntv/mandi_preprocessing/beta_lac_july2018/beta_lac_july2018_secondxtal.mat' DetCalFile = '/home/ntv/mandi_preprocessing/MANDI_June2018.DetCal' workDir = '/SNS/users/ntv/dropbox/' #End with '/' nxsTemplate = '/SNS/MANDI/IPTS-8776/nexus/MANDI_%i.nxs.h5' dQPixel = 0.003 q_frame = 'lab' pplmin_frac = 0.9 pplmax_frac = 1.1 mindtBinWidth = 15 maxdtBinWidth = 50 moderatorFile = '/home/ntv/integrate/bl11_moderatorCoefficients_2018.dat' # Some parameters peakToGet = 3 #Arbitrary - just has to be less than the number of peaks padeCoefficients = ICCFT.getModeratorCoefficients(moderatorFile) removeEdges = False importPeaks = True print('Loading peaks_ws') for ws in mtd.getObjectNames(): if mtd[ws].getComment() == '%s' % peaksFile: print ' using already loaded peaks file' importPeaks = False peaks_ws = mtd[ws] if importPeaks: peaks_ws = LoadIsawPeaks(Filename=peaksFile) peaks_ws.setComment(peaksFile) LoadIsawUB(InputWorkspace=peaks_ws, FileName=UBFile) UBMatrix = peaks_ws.sample().getOrientedLattice().getUB() dQ = np.abs(ICCFT.getDQFracHKL(UBMatrix, frac=0.5))
def showPeakFit(peakNumber, peaks_ws, MDData, UBMatrix, dQ, padeCoefficients, predpplCoefficients, mindtBinWidth=15, dQPixel=0.003, fracHKL=0.5, q_frame='lab', neigh_length_m=3, dtS=0.015, pplmin_frac=0.4, pplmax_frac=1.5, nTheta=50, nPhi=50, intensityCutoff=250, edgeCutoff=3, fracStop=0.05, plotResults=False, strongPeakParams=None): #Get some peak variables peak = peaks_ws.getPeak(peakNumber) wavelength = peak.getWavelength() #in Angstrom energy = 81.804 / wavelength**2 / 1000.0 #in eV flightPath = peak.getL1() + peak.getL2() #in m scatteringHalfAngle = 0.5 * peak.getScattering() Box = ICCFT.getBoxFracHKL(peak, peaks_ws, MDData, UBMatrix, peakNumber, dQ, fracHKL=0.5, dQPixel=dQPixel, q_frame=q_frame) box = Box #Set up our filters qMask = ICCFT.getHKLMask(UBMatrix, frac=fracHKL, dQPixel=dQPixel, dQ=dQ) n_events = Box.getNumEventsArray() nX, nY, nZ = n_events.shape cX = nX // 2 cY = nY // 2 cZ = nZ // 2 dP = 5 qMask[cX - dP:cX + dP, cY - dP:cY + dP, cZ - dP:cZ + dP] = 0 neigh_length_m = 3 convBox = 1.0 * \ np.ones([neigh_length_m, neigh_length_m, neigh_length_m]) / neigh_length_m**3 conv_n_events = convolve(n_events, convBox) bgMask = np.logical_and(conv_n_events > 0, qMask > 0) meanBG = np.mean(n_events[bgMask]) #predppl = np.polyval(f,meanBG)*1.96 predppl = np.polyval([1, 0], meanBG) * 1.96 qMask = ICCFT.getHKLMask(UBMatrix, frac=0.5, dQPixel=dQPixel, dQ=dQ) #Calculate the Y3D1, gIDX1, pp_lambda, params1 = BVGFT.get3DPeak( peak, box, padeCoefficients, qMask, nTheta=nTheta, nPhi=nPhi, plotResults=plotResults, zBG=1.96, fracBoxToHistogram=1.0, bgPolyOrder=1, strongPeakParams=strongPeakParams, predCoefficients=predpplCoefficients, q_frame=q_frame, mindtBinWidth=mindtBinWidth, pplmin_frac=pplmin_frac, pplmax_frac=pplmax_frac, forceCutoff=intensityCutoff, edgeCutoff=edgeCutoff) given_ppl = predppl predpplCoefficients2 = [0, 0, predppl] Y3D2, gIDX2, pp_lambda2, params2 = BVGFT.get3DPeak( peak, box, padeCoefficients, qMask, nTheta=nTheta, nPhi=nPhi, plotResults=False, zBG=1.96, fracBoxToHistogram=1.0, bgPolyOrder=1, strongPeakParams=strongPeakParams, predCoefficients=predpplCoefficients2, q_frame=q_frame, mindtBinWidth=mindtBinWidth, pplmin_frac=0.99999, pplmax_frac=1.0001, forceCutoff=intensityCutoff, edgeCutoff=edgeCutoff, figureNumber=3) I1 = np.sum(Y3D1[Y3D1 / Y3D1.max() > fracStop]) I2 = np.sum(Y3D2[Y3D2 / Y3D2.max() > fracStop]) print('Peak %i: Old: %i; New: %i; Ell: %i' % (peakNumber, I1, I2, peak.getIntensity())) slider = widgets.IntSlider(value=Y3D1.shape[1] // 2, min=0, max=Y3D1.shape[2] - 1, step=1, description='z Slice:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='d') return slider, n_events, Y3D1, Y3D2
def PyExec(self): import ICCFitTools as ICCFT import BVGFitTools as BVGFT from mantid.simpleapi import LoadIsawUB import pickle from scipy.ndimage.filters import convolve MDdata = self.getProperty('InputWorkspace').value peaks_ws = self.getProperty('PeaksWorkspace').value fracHKL = self.getProperty('FracHKL').value fracStop = self.getProperty('FracStop').value dQMax = self.getProperty('DQMax').value UBFile = self.getProperty('UBFile').value padeFile = self.getProperty('ModeratorCoefficientsFile').value strongPeaksParamsFile = self.getProperty('StrongPeakParamsFile').value forceCutoff = self.getProperty('IntensityCutoff').value edgeCutoff = self.getProperty('EdgeCutoff').value peakNumberToFit = self.getProperty('PeakNumber').value LoadIsawUB(InputWorkspace=peaks_ws, FileName=UBFile) UBMatrix = peaks_ws.sample().getOrientedLattice().getUB() dQ = np.abs(ICCFT.getDQFracHKL(UBMatrix, frac=0.5)) dQ[dQ > dQMax] = dQMax dQPixel = self.getProperty('DQPixel').value q_frame = 'lab' mtd['MDdata'] = MDdata padeCoefficients = ICCFT.getModeratorCoefficients(padeFile) if sys.version_info[0] == 3: strongPeakParams = pickle.load(open(strongPeaksParamsFile, 'rb'), encoding='latin1') else: strongPeakParams = pickle.load(open(strongPeaksParamsFile, 'rb')) predpplCoefficients = self.getProperty('PredPplCoefficients').value nTheta = self.getProperty('NTheta').value nPhi = self.getProperty('NPhi').value zBG = 1.96 mindtBinWidth = self.getProperty('MindtBinWidth').value pplmin_frac = self.getProperty('MinpplFrac').value pplmax_frac = self.getProperty('MaxpplFrac').value sampleRun = self.getProperty('RunNumber').value neigh_length_m = 3 qMask = ICCFT.getHKLMask(UBMatrix, frac=fracHKL, dQPixel=dQPixel, dQ=dQ) numgood = 0 numerrors = 0 # Create the parameters workspace keys = [ 'peakNumber', 'Alpha', 'Beta', 'R', 'T0', 'bgBVG', 'chiSq3d', 'dQ', 'KConv', 'MuPH', 'MuTH', 'newQ', 'Scale', 'scale3d', 'SigP', 'SigX', 'SigY', 'Intens3d', 'SigInt3d' ] datatypes = ['float'] * len(keys) datatypes[np.where(np.array(keys) == 'newQ')[0][0]] = 'V3D' params_ws = CreateEmptyTableWorkspace() for key, datatype in zip(keys, datatypes): params_ws.addColumn(datatype, key) # Set the peak numbers we're fitting if peakNumberToFit < 0: peaksToFit = range(peaks_ws.getNumberPeaks()) else: peaksToFit = [peakNumberToFit] # And we're off! peaks_ws_out = peaks_ws.clone() np.warnings.filterwarnings( 'ignore' ) # There can be a lot of warnings for bad solutions that get rejected. for peakNumber in peaksToFit: #range(peaks_ws.getNumberPeaks()): peak = peaks_ws_out.getPeak(peakNumber) try: if peak.getRunNumber() == sampleRun: box = ICCFT.getBoxFracHKL(peak, peaks_ws, MDdata, UBMatrix, peakNumber, dQ, fracHKL=0.5, dQPixel=dQPixel, q_frame=q_frame) # Will force weak peaks to be fit using a neighboring peak profile Y3D, goodIDX, pp_lambda, params = BVGFT.get3DPeak( peak, box, padeCoefficients, qMask, nTheta=nTheta, nPhi=nPhi, plotResults=False, zBG=zBG, fracBoxToHistogram=1.0, bgPolyOrder=1, strongPeakParams=strongPeakParams, predCoefficients=predpplCoefficients, q_frame=q_frame, mindtBinWidth=mindtBinWidth, pplmin_frac=pplmin_frac, pplmax_frac=pplmax_frac, forceCutoff=forceCutoff, edgeCutoff=edgeCutoff) # First we get the peak intensity peakIDX = Y3D / Y3D.max() > fracStop intensity = np.sum(Y3D[peakIDX]) # Now the number of background counts under the peak assuming a constant bg across the box n_events = box.getNumEventsArray() convBox = 1.0 * np.ones([ neigh_length_m, neigh_length_m, neigh_length_m ]) / neigh_length_m**3 conv_n_events = convolve(n_events, convBox) bgIDX = reduce(np.logical_and, [~goodIDX, qMask, conv_n_events > 0]) bgEvents = np.mean(n_events[bgIDX]) * np.sum(peakIDX) # Now we consider the variation of the fit. These are done as three independent fits. So we need to consider # the variance within our fit sig^2 = sum(N*(yFit-yData)) / sum(N) and scale by the number of parameters that go into # the fit. In total: 10 (removing scale variables) # TODO: It's not clear to me if we should be normalizing by #params - so we'll leave it for now. w_events = n_events.copy() w_events[w_events == 0] = 1 varFit = np.average((n_events[peakIDX] - Y3D[peakIDX]) * (n_events[peakIDX] - Y3D[peakIDX]), weights=(w_events[peakIDX])) sigma = np.sqrt(intensity + bgEvents + varFit) compStr = 'peak {:d}; original: {:4.2f} +- {:4.2f}; new: {:4.2f} +- {:4.2f}'.format( peakNumber, peak.getIntensity(), peak.getSigmaIntensity(), intensity, sigma) logger.information(compStr) # Save the results params['peakNumber'] = peakNumber params['Intens3d'] = intensity params['SigInt3d'] = sigma params['newQ'] = V3D(params['newQ'][0], params['newQ'][1], params['newQ'][2]) params_ws.addRow(params) peak.setIntensity(intensity) peak.setSigmaIntensity(sigma) numgood += 1 except KeyboardInterrupt: np.warnings.filterwarnings('default') # Re-enable on exit raise except: #raise numerrors += 1 peak.setIntensity(0.0) peak.setSigmaIntensity(1.0) # Cleanup for wsName in mtd.getObjectNames(): if 'fit_' in wsName or 'bvgWS' in wsName or 'tofWS' in wsName or 'scaleWS' in wsName: mtd.remove(wsName) np.warnings.filterwarnings('default') # Re-enable on exit # Set the output self.setProperty('OutputPeaksWorkspace', peaks_ws_out) self.setProperty('OutputParamsWorkspace', params_ws)
return isOutlier else: return 0.0 checkIDX = (df['Intens'] > 0) & (df['chiSq']<50.0) df['isOutlier'] = 0.0 #df.loc[checkIDX,'isOutlier'] = df[checkIDX].groupby('hklFam')['Intens'].transform(isOutlier) #df.loc[checkIDX,'isOutlier'] = df[checkIDX].groupby('hklFam')['lorentzInt'].transform(isOutlier) df.loc[checkIDX,'isOutlier'] = df[checkIDX].groupby('hklFam')['scaledIntens'].transform(isOutlier) df['isOutlier'] = df['isOutlier'].astype(bool) df['notOutlier'] = ~df['isOutlier'] df.loc[checkIDX,'isOutlierEll'] = df[checkIDX].groupby('hklFam')['IntensEll'].transform(isOutlier) df['isOutlierEll'] = df['isOutlierEll'].astype(bool) df['notOutlierEll'] = ~df['isOutlierEll'] df['predppl'] = ICCFT.oldScatFun(df['Scattering']/df['Wavelength'],5.24730283, 7.23719321, 0.27449887) #----------------Set the output conditions and make files goodIDX = (df['chiSq'] < 50.0) & (df['Intens'] > 0) & (df['notOutlier']) & (df['Intens']<3.0e7) tooFarIDX = (df['Intens'] > 100) & ((np.abs(df['Intens']-df['IntensEll']) > 2.0*df['IntensEll']) | (np.abs(df['Intens']-df['IntensEll']) > 2*df['Intens'])) goodIDX = goodIDX & ~tooFarIDX dEdge = 3 edgeIDX = (df['Row'] <= dEdge) | (df['Row'] >= 255-dEdge) | (df['Col'] <= dEdge) | (df['Col'] >= 255-dEdge) goodIDX = goodIDX & ~edgeIDX plt.figure(2); plt.clf(); plt.plot(df[goodIDX]['Intens'], df[goodIDX]['IntensEll'],'.',ms=3) #plt.plot(df[~goodIDX]['IntensEll'], 1/0.7*df[~goodIDX]['IntensEll'],'.',ms=3) laueOutput = (df['DSpacing'] > 2.0) & (df['Wavelength'] > 2.0) & (df['Wavelength']<4.0) & (df['Intens']/df['SigInt'] > 1.0)
UBFile = '/home/ntv/Desktop/beta_lac_firstxtal_july18beamtime_pf.mat' DetCalFile = '/home/ntv/mandi_preprocessing/MANDI_June2018.DetCal' nxsTemplate = '/SNS/MANDI/IPTS-21326/nexus/MANDI_%i.nxs.h5' dQPixel=0.003 q_frame = 'lab' baseDirectory = '/data/ml_peak_sets/beta_lac_secondcrystal_0p4qMask/' # Some parameters try: print('Which peak? (Current is %i)'%peakToGet) except: print('Which peak?') peakToGet = int(input()) peaks_ws = mltools.getPeaksWorkspace(peaksFile, UBFile) UBMatrix = peaks_ws.sample().getOrientedLattice().getUB() dQ = np.abs(ICCFT.getDQFracHKL(UBMatrix, frac=0.5)) dQ[dQ>0.2]=0.2 dQ = np.abs(ICCFT.getDQFracHKL(UBMatrix, frac=0.5)) dQ[0,:] = 0.05088 dQ[dQ>0.2]=0.2 nX = 32; nY = 32; nZ = 32 #qMask = pickle.load(open('/data/ml_peak_sets/peaks_tf_mltoolstest_limitedNoise_0p025_cutoff_0p5MaxNoise/qMask.pkl', 'rb')) qMask = pickle.load(open(baseDirectory+'qMask.pkl', 'rb')) cX, cY, cZ = np.array(qMask.shape)//2 dX, dY, dZ = nX//2, nY//2, nZ//2 qMaskSimulated = qMask[cX-dX:cX+dX, cY-dY:cY+dY, cZ-dZ:cZ+dZ] peak = peaks_ws.getPeak(peakToGet)
# Some parameters importPeaks = True print('Loading peaks_ws') for ws in mtd.getObjectNames(): if mtd[ws].getComment() == '%s' % peaksFile: print ' using already loaded peaks file' importPeaks = False peaks_ws = mtd[ws] if importPeaks: peaks_ws = LoadIsawPeaks(Filename=peaksFile) peaks_ws.setComment(peaksFile) LoadIsawUB(InputWorkspace=peaks_ws, FileName=UBFile) UBMatrix = peaks_ws.sample().getOrientedLattice().getUB() dQ = np.abs(ICCFT.getDQFracHKL(UBMatrix, frac=0.5)) df = pd.DataFrame(peaks_ws.toDict()) df['IntensML'] = np.zeros(len(df)) df['SigIntML'] = np.ones(len(df), dtype=float) df['meanBG'] = np.zeros(len(df)) df['numVoxelsInPeak'] = np.zeros(len(df)) if len(sys.argv) == 1: runNumbers = df['RunNumber'].unique() else: runNumbers = map(int, sys.argv[1:]) print('Integrating run numbers:', runNumbers) qMask = pickle.load( open('/data/ml_peak_sets/beta_lac_firstxtal/qMask.pkl', 'rb')) cX, cY, cZ = np.array(qMask.shape) // 2
def doBVGFit(box, nTheta=200, nPhi=200, zBG=1.96, fracBoxToHistogram=1.0, goodIDX=None, forceParams=None, forceTolerance=0.1, dth=10, dph=10, doPeakConvolution=False, sigX0Scale=1., sigY0Scale=1.): """ doBVGFit takes a binned MDbox and returns the fit of the peak shape along the non-TOF direction. This is done in one of two ways: 1) Standard least squares fit of the 2D histogram. 2) Forcing a set of parameters. Under this, parameters are tightly constrained. The peak center may move by (dth, dph) from predicted position (in units of histogram pixels) and sigma parameters can change by a factor of forceTolerance. Input: box: a binned 'MDbox'. nTheta, nPhi: integer, number of bins to use when creating 2D BVG histogram zBG: Z score at which we consider events to be above BG #TODO: I think this can be removed since we pass in goodIDX? fracBoxToHistrogram: Leave at 1.0 to histogram whole box. Any values lower will remove the edges of box before histogramming. goodIDX: a numpy array of shape box.getNumEventsArray().shape. True for voxels we will histogram (i.e. False if the events in this voxel are background.) forceParams: set of parameters to force. These are the same format as a row in strongPeaksParams forceTolerance: the factor we allow sigX, sigY, sigP to change when forcing peaks. Not used if forceParams is None. dth, dph: The peak center may move by (dth, dph) from predicted position (in units of histogram pixels). doPeakConvolution: boolean stating whether we should fit a convolved (smoothed) peak. This is useful for filling in gaps for 3He detector tube packs. """ h, thBins, phBins = getAngularHistogram( box, nTheta=nTheta, nPhi=nPhi, zBG=zBG, fracBoxToHistogram=fracBoxToHistogram, useIDX=goodIDX) thCenters = 0.5 * (thBins[1:] + thBins[:-1]) phCenters = 0.5 * (phBins[1:] + phBins[:-1]) TH, PH = np.meshgrid(thCenters, phCenters, indexing='ij', copy=False) weights = np.sqrt(h) weights[weights < 1] = 1 pos = np.empty(TH.shape + (2, )) pos[:, :, 0] = TH pos[:, :, 1] = PH H = np.empty(h.shape + (2, )) H[:, :, 0] = h H[:, :, 1] = h def fSigP(x, a, k, phi, b): return a * np.sin((k * x) - phi) + b * x if forceParams is None: meanTH = TH.mean() meanPH = PH.mean() # sigX0 = 0.0018 # sigX0 = 0.002#ICCFT.oldScatFun(meanPH, 1.71151521e-02, 6.37218400e+00, 3.39439675e-03) sigX0 = ICCFT.oldScatFun( #meanPH, 1.71151521e-02, 6.37218400e+00, 3.39439675e-03) #beta lac mutant #meanPH, 1.84007779e-02, 1.01624062e+01, 3.64142682e-03) #beta lac WT?? #meanPH, 3.10309118e-02, 1.24508256e+01, 3.21767842e-03) #pth #meanPH, 1.59536758e-02, 6.88234629e+00, 3.08787261e-03) #psbo #meanPH, 1.73513725e-02, 7.75527609e+00, 4.20886173e-03) #NaK meanPH, 1.85095184e-02, 8.00966015e+00, 3.42932014e-03) #second xtal sigX0 = sigX0 / 2. #beta lac july2018 second xtal #sigY0 = 0.0025 sigY0 = 0.00185 #Second xtal #sigY0 = 0.0029 #NaK #sigY0 = 0.0023 #sigY0 = 0.0022 sigP0 = fSigP(meanTH, 0.1460775, 1.85816592, 0.26850086, -0.00725352) # Set some constraints boundsDict = {} boundsDict['A'] = [0.0, np.inf] boundsDict['MuX'] = [ thBins[thBins.size // 2 - dth], thBins[thBins.size // 2 + dth] ] boundsDict['MuY'] = [ phBins[phBins.size // 2 - dph], phBins[phBins.size // 2 + dph] ] # boundsDict['sigX'] = [0.7*sigX0, 1.3*sigX0] boundsDict['SigX'] = [0., 0.02] boundsDict['SigY'] = [0., 0.02] boundsDict['SigP'] = [-1., 1.] boundsDict['Bg'] = [0, np.inf] # Here we can make instrument-specific changes to our initial guesses and boundaries sigX0 = sigX0 * sigX0Scale sigY0 = sigY0 * sigY0Scale if doPeakConvolution: neigh_length_m = 5 convBox = 1.0 * np.ones([neigh_length_m, neigh_length_m ]) / neigh_length_m**2 conv_h = convolve(h, convBox) H[:, :, 0] = conv_h H[:, :, 1] = conv_h # Set our initial guess m = BivariateGaussian.BivariateGaussian() m.init() m['A'] = 1. #m['MuX'] = meanTH #m['MuY'] = meanPH m['MuX'] = TH[np.unravel_index(h.argmax(), h.shape)] m['MuY'] = PH[np.unravel_index(h.argmax(), h.shape)] m['SigX'] = sigX0 m['SigY'] = sigY0 m['SigP'] = sigP0 m.setAttributeValue('nX', h.shape[0]) m.setAttributeValue('nY', h.shape[1]) m.setConstraints(boundsDict) # Do the fit #bvgWS = CreateWorkspace(OutputWorkspace='bvgWS', DataX=pos.ravel( #), DataY=H.ravel(), DataE=np.sqrt(H.ravel())) bvgWS = CreateWorkspace(OutputWorkspace='bvgWS', DataX=pos.ravel(), DataY=H.ravel(), DataE=np.sqrt(H.ravel())) fitResults = Fit(Function=m, InputWorkspace='bvgWS', Output='bvgfit', Minimizer='Levenberg-MarquardtMD') elif forceParams is not None: p0 = np.zeros(7) p0[0] = np.max(h) p0[1] = TH.mean() p0[2] = PH.mean() p0[3] = forceParams[5] p0[4] = forceParams[6] p0[5] = forceParams[7] # Set some constraints isPos = np.sign(p0) bounds = ((1.0 - isPos * forceTolerance) * p0, (1.0 + isPos * forceTolerance) * p0) bounds[0][0] = 0.0 bounds[1][0] = np.inf # Amplitude bounds[0][1] = min(thBins[thBins.size // 2 - dth], thBins[thBins.size // 2 + dth]) bounds[1][1] = max(thBins[thBins.size // 2 - dth], thBins[thBins.size // 2 + dth]) bounds[0][2] = min(phBins[phBins.size // 2 - dph], phBins[phBins.size // 2 + dph]) bounds[1][2] = max(phBins[phBins.size // 2 - dph], phBins[phBins.size // 2 + dph]) bounds[1][-1] = np.inf boundsDict = {} boundsDict['A'] = [0.0, np.inf] boundsDict['MuX'] = [ thBins[thBins.size // 2 - dth], thBins[thBins.size // 2 + dth] ] boundsDict['MuY'] = [ phBins[phBins.size // 2 - dph], phBins[phBins.size // 2 + dph] ] boundsDict['SigX'] = [bounds[0][3], bounds[1][3]] boundsDict['SigY'] = [bounds[0][4], bounds[1][4]] boundsDict['SigP'] = [bounds[0][5], bounds[1][5]] # Here we can make instrument-specific changes to our initial guesses and boundaries if doPeakConvolution: neigh_length_m = 5 convBox = 1.0 * np.ones([neigh_length_m, neigh_length_m ]) / neigh_length_m**2 conv_h = convolve(h, convBox) H[:, :, 0] = conv_h H[:, :, 1] = conv_h # Set our initial guess m = BivariateGaussian.BivariateGaussian() m.init() m['A'] = 0.1 #m['muX'] = np.average(thCenters,weights=np.sum(h,axis=1)) #m['muY'] = np.average(phCenters,weights=np.sum(h,axis=0)) #m['muX'] = TH.mean() #m['muY'] = PH.mean() m['MuX'] = TH[np.unravel_index(h.argmax(), h.shape)] m['MuY'] = PH[np.unravel_index(h.argmax(), h.shape)] m['SigX'] = forceParams[5] m['SigY'] = forceParams[6] m['SigP'] = forceParams[7] m.setAttributeValue('nX', h.shape[0]) m.setAttributeValue('nY', h.shape[1]) m.setConstraints(boundsDict) # Do the fit #plt.figure(18); plt.clf(); plt.imshow(m.function2D(pos)); plt.title('BVG Initial guess') bvgWS = CreateWorkspace(OutputWorkspace='bvgWS', DataX=pos.ravel(), DataY=H.ravel(), DataE=np.sqrt(H.ravel())) fitFun = m fitResults = Fit(Function=fitFun, InputWorkspace=bvgWS, Output='bvgfit', Minimizer='Levenberg-MarquardtMD') # Recover the result m = BivariateGaussian.BivariateGaussian() m.init() m['A'] = mtd['bvgfit_Parameters'].row(0)['Value'] m['MuX'] = mtd['bvgfit_Parameters'].row(1)['Value'] m['MuY'] = mtd['bvgfit_Parameters'].row(2)['Value'] m['SigX'] = mtd['bvgfit_Parameters'].row(3)['Value'] m['SigY'] = mtd['bvgfit_Parameters'].row(4)['Value'] m['SigP'] = mtd['bvgfit_Parameters'].row(5)['Value'] m['Bg'] = mtd['bvgfit_Parameters'].row(6)['Value'] m.setAttributeValue('nX', h.shape[0]) m.setAttributeValue('nY', h.shape[1]) chiSq = fitResults[1] params = [[ m['A'], m['MuX'], m['MuY'], m['SigX'], m['SigY'], m['SigP'], m['Bg'] ], chiSq] return params, h, thBins, phBins
def doBVGFit(box, nTheta=200, nPhi=200, zBG=1.96, fracBoxToHistogram=1.0, goodIDX=None, forceParams=None, forceTolerance=0.1, dth=10, dph=10): """ doBVGFit takes a binned MDbox and returns the fit of the peak shape along the non-TOF direction. This is done in one of two ways: 1) Standard least squares fit of the 2D histogram. 2) Forcing a set of parameters. Under this, parameters are tightly constrained. The peak center may move by (dth, dph) from predicted position (in units of histogram pixels) and sigma parameters can change by a factor of forceTolerance. Input: box: a binned 'MDbox'. nTheta, nPhi: integer, number of bins to use when creating 2D BVG histogram zBG: Z score at which we consider events to be above BG #TODO: I think this can be removed since we pass in goodIDX? fracBoxToHistrogram: Leave at 1.0 to histogram whole box. Any values lower will remove the edges of box before histogramming. goodIDX: a numpy array of shape box.getNumEventsArray().shape. True for voxels we will histogram (i.e. False if the events in this voxel are background.) forceParams: set of parameters to force. These are the same format as a row in strongPeaksParams forceTolerance: the factor we allow sigX, sigY, sigP to change when forcing peaks. Not used if forceParams is None. dth, dph: The peak center may move by (dth, dph) from predicted position (in units of histogram pixels). """ h, thBins, phBins = getAngularHistogram( box, nTheta=nTheta, nPhi=nPhi, zBG=zBG, fracBoxToHistogram=fracBoxToHistogram, useIDX=goodIDX) thCenters = 0.5 * (thBins[1:] + thBins[:-1]) phCenters = 0.5 * (phBins[1:] + phBins[:-1]) TH, PH = np.meshgrid(thCenters, phCenters, indexing='ij', copy=False) weights = np.sqrt(h) weights[weights < 1] = 1 pos = np.empty(TH.shape + (2,)) pos[:, :, 0] = TH pos[:, :, 1] = PH H = np.empty(h.shape + (2,)) H[:, :, 0] = h H[:, :, 1] = h def fSigP(x, a, k, phi, b): return a * np.sin((k * x) - phi) + b * x if forceParams is None: meanTH = TH.mean() meanPH = PH.mean() # sigX0 = 0.0018 # sigX0 = 0.002#ICCFT.oldScatFun(meanPH, 1.71151521e-02, 6.37218400e+00, 3.39439675e-03) sigX0 = ICCFT.oldScatFun( meanPH, 1.71151521e-02, 6.37218400e+00, 3.39439675e-03) sigY0 = 0.0025 sigP0 = fSigP(meanTH, 0.1460775, 1.85816592, 0.26850086, -0.00725352) # Set some constraints boundsDict = {} boundsDict['A'] = [0.0, np.inf] boundsDict['muX'] = [thBins[thBins.size // 2 - dth], thBins[thBins.size // 2 + dth]] boundsDict['muY'] = [phBins[phBins.size // 2 - dph], phBins[phBins.size // 2 + dph]] # boundsDict['sigX'] = [0.7*sigX0, 1.3*sigX0] boundsDict['sigX'] = [0., 0.02] boundsDict['sigY'] = [0., 0.02] boundsDict['sigP'] = [-1., 1.] boundsDict['bg'] = [0, np.inf] # Set our initial guess m = BivariateGaussian.BivariateGaussian() m.init() m['A'] = 1. #m['muX'] = meanTH #m['muY'] = meanPH m['muX'] = TH[np.unravel_index(h.argmax(), h.shape)] m['muY'] = PH[np.unravel_index(h.argmax(), h.shape)] m['sigX'] = sigX0 m['sigY'] = sigY0 m['sigP'] = sigP0 m.setAttributeValue('nX', h.shape[0]) m.setAttributeValue('nY', h.shape[1]) m.setConstraints(boundsDict) # print('before: ') # print(m) # Do the fit bvgWS = CreateWorkspace(OutputWorkspace='bvgWS', DataX=pos.ravel( ), DataY=H.ravel(), DataE=np.sqrt(H.ravel())) fitResults = Fit(Function=m, InputWorkspace='bvgWS', Output='bvgfit', Minimizer='Levenberg-MarquardtMD') #print('after') #print(m) elif forceParams is not None: p0 = np.zeros(7) p0[0] = np.max(h) p0[1] = TH.mean() p0[2] = PH.mean() p0[3] = forceParams[5] p0[4] = forceParams[6] p0[5] = forceParams[7] # Set some constraints isPos = np.sign(p0) bounds = ((1.0 - isPos * forceTolerance) * p0, (1.0 + isPos * forceTolerance) * p0) bounds[0][0] = 0.0 bounds[1][0] = np.inf # Amplitude bounds[0][1] = min(thBins[thBins.size // 2 - dth], thBins[thBins.size // 2 + dth]) bounds[1][1] = max(thBins[thBins.size // 2 - dth], thBins[thBins.size // 2 + dth]) bounds[0][2] = min(phBins[phBins.size // 2 - dph], phBins[phBins.size // 2 + dph]) bounds[1][2] = max(phBins[phBins.size // 2 - dph], phBins[phBins.size // 2 + dph]) bounds[1][-1] = np.inf boundsDict = {} boundsDict['A'] = [0.0, np.inf] boundsDict['muX'] = [thBins[thBins.size // 2 - dth], thBins[thBins.size // 2 + dth]] boundsDict['muY'] = [phBins[phBins.size // 2 - dph], phBins[phBins.size // 2 + dph]] boundsDict['sigX'] = [bounds[0][3], bounds[1][3]] boundsDict['sigY'] = [bounds[0][4], bounds[1][4]] boundsDict['sigP'] = [bounds[0][5], bounds[1][5]] # Set our initial guess m = BivariateGaussian.BivariateGaussian() m.init() m['A'] = 0.1 #m['muX'] = np.average(thCenters,weights=np.sum(h,axis=1)) #m['muY'] = np.average(phCenters,weights=np.sum(h,axis=0)) #m['muX'] = TH.mean() #m['muY'] = PH.mean() m['muX'] = TH[np.unravel_index(h.argmax(), h.shape)] m['muY'] = PH[np.unravel_index(h.argmax(), h.shape)] m['sigX'] = forceParams[5] m['sigY'] = forceParams[6] m['sigP'] = forceParams[7] m.setAttributeValue('nX', h.shape[0]) m.setAttributeValue('nY', h.shape[1]) m.setConstraints(boundsDict) #print('before:') #print(m) # Do the fit #plt.figure(18); plt.clf(); plt.imshow(m.function2D(pos)); plt.title('BVG Initial guess') bvgWS = CreateWorkspace(OutputWorkspace='bvgWS', DataX=pos.ravel(), DataY=H.ravel(), DataE=np.sqrt(H.ravel())) fitFun = m fitResults = Fit(Function=fitFun, InputWorkspace=bvgWS, Output='bvgfit', Minimizer='Levenberg-MarquardtMD') #print 'after:' #print m # Recover the result m = BivariateGaussian.BivariateGaussian() m.init() m['A'] = mtd['bvgfit_Parameters'].row(0)['Value'] m['muX'] = mtd['bvgfit_Parameters'].row(1)['Value'] m['muY'] = mtd['bvgfit_Parameters'].row(2)['Value'] m['sigX'] = mtd['bvgfit_Parameters'].row(3)['Value'] m['sigY'] = mtd['bvgfit_Parameters'].row(4)['Value'] m['sigP'] = mtd['bvgfit_Parameters'].row(5)['Value'] m['bg'] = mtd['bvgfit_Parameters'].row(6)['Value'] m.setAttributeValue('nX', h.shape[0]) m.setAttributeValue('nY', h.shape[1]) chiSq = fitResults[1] params = [[m['A'], m['muX'], m['muY'], m['sigX'], m['sigY'], m['sigP'], m['bg']], chiSq] # print params return params, h, thBins, phBins
loadDir = '/SNS/TOPAZ/shared/PeakIntegration/data/' DetCalFile = '/SNS/TOPAZ/shared/PeakIntegration/calibration/TOPAZ_2016A.DetCal' peaks_ws, UBMatrix = getPeaksWS(peaksFile, UBFile) #UBMatrix = peaks_ws.sample().getOrientedLattice().getUB() instrumentFile = getInstrumentFile(peaks_ws, peaksFile) #panelDict = pickle.load(open('panelDict_15647.pkl','rb')) panelDict = getInstrumentDict(instrumentFile, peaks_ws, fitOrder=2) peak = peaks_ws.getPeak(peakToGet) MDdata = getMDData(peak, loadDir, DetCalFile) Box = ICCFT.getBoxFracHKL(peak, peaks_ws, MDdata, UBMatrix, peakToGet, fracHKL=0.5, refineCenter=False) n_events = Box.getNumEventsArray() mask = getMask(peak, Box, panelDict) iiid = mask.shape[1] // 2 q = mask[:, mask.shape[1] // 2, :] r = n_events[:, n_events.shape[1] // 2, :] plt.clf() plt.imshow(r) plt.hold('on') plt.imshow(q, cmap='gray', alpha=0.2)
def get3DPeak(peak, peaks_ws, box, padeCoefficients, qMask, nTheta=150, nPhi=150, fracBoxToHistogram=1.0, plotResults=False, zBG=1.96, bgPolyOrder=1, fICCParams=None, oldICCFit=None, strongPeakParams=None, forceCutoff=250, edgeCutoff=15, neigh_length_m=3, q_frame='sample', dtSpread=0.03, pplmin_frac=0.8, pplmax_frac=1.5, mindtBinWidth=1, maxdtBinWidth=50, figureNumber=2, peakMaskSize=5, iccFitDict=None, sigX0Params=None, sigY0=None, sigP0Params=None, fitPenalty=None): n_events = box.getNumEventsArray() if q_frame == 'lab': q0 = peak.getQLabFrame() elif q_frame == 'sample': q0 = peak.getQSampleFrame() else: raise ValueError( 'BVGFT:get3DPeak - q_frame must be either \'lab\' or \'sample\'; %s was provided' % q_frame) if fICCParams is None: goodIDX, pp_lambda = ICCFT.getBGRemovedIndices( n_events, peak=peak, box=box, qMask=qMask, calc_pp_lambda=True, padeCoefficients=padeCoefficients, neigh_length_m=neigh_length_m, pp_lambda=None, pplmin_frac=pplmin_frac, pplmax_frac=pplmax_frac, mindtBinWidth=mindtBinWidth, maxdtBinWidth=maxdtBinWidth, peakMaskSize=peakMaskSize, iccFitDict=iccFitDict, fitPenalty=fitPenalty) YTOF, fICC, x_lims = fitTOFCoordinate( box, peak, padeCoefficients, dtSpread=dtSpread, qMask=qMask, bgPolyOrder=bgPolyOrder, zBG=zBG, plotResults=plotResults, pp_lambda=pp_lambda, neigh_length_m=neigh_length_m, pplmin_frac=pplmin_frac, pplmax_frac=pplmax_frac, mindtBinWidth=mindtBinWidth, maxdtBinWidth=maxdtBinWidth, peakMaskSize=peakMaskSize, iccFitDict=iccFitDict, fitPenalty=fitPenalty) chiSqTOF = mtd['fit_Parameters'].column(1)[-1] else: # we already did I-C profile, so we'll just read the parameters pp_lambda = fICCParams[-1] fICC = ICC.IkedaCarpenterConvoluted() fICC.init() fICC['A'] = fICCParams[5] fICC['B'] = fICCParams[6] fICC['R'] = fICCParams[7] fICC['T0'] = fICCParams[8] fICC['Scale'] = fICCParams[9] fICC['HatWidth'] = fICCParams[10] fICC['KConv'] = fICCParams[11] goodIDX, _ = ICCFT.getBGRemovedIndices( n_events, pp_lambda=pp_lambda, qMask=qMask, peakMaskSize=peakMaskSize, iccFitDict=iccFitDict, fitPenalty=fitPenalty) chiSqTOF = fICCParams[4] #Last entry # Get the 3D TOF component, YTOF if oldICCFit is not None: x_lims = [np.min(oldICCFit[0]), np.max(oldICCFit[0])] tofxx = oldICCFit[0] tofyy = oldICCFit[2] else: dtSpread = 0.03 x_lims = [(1 - dtSpread) * peak.getTOF(), (1 + dtSpread) * peak.getTOF()] tofxx = np.arange(x_lims[0], x_lims[1], 5) tofyy = fICC.function1D(tofxx) ftof = interp1d(tofxx, tofyy, bounds_error=False, fill_value=0.0) XTOF = boxToTOFThetaPhi(box, peak)[:, :, :, 0] YTOF = ftof(XTOF) # Get YBVG - the detector component if goodIDX is not None: goodIDX *= qMask X = boxToTOFThetaPhi(box, peak) dEdge = edgeCutoff # This section defines detector size to determine if a peak is too # close to the edge. Order is [NROWS, NCOLS]. try: numDetRows = peaks_ws.getInstrument().getIntParameter("numDetRows")[0] numDetCols = peaks_ws.getInstrument().getIntParameter("numDetCols")[0] nPixels = [numDetRows, numDetCols] except: UserWarning('Detector size not found in instrument parameters file. Assuming a 255*255 detector!') nPixels = [255,255] useForceParams = peak.getIntensity() < forceCutoff or peak.getRow() <= dEdge or peak.getRow( ) >= nPixels[0] - dEdge or peak.getCol() <= dEdge or peak.getCol() >= nPixels[1] - dEdge sigX0Params, sigY0, sigP0Params, doPeakConvolution = getBVGGuesses(peaks_ws, sigX0Params, sigY0, sigP0Params) if strongPeakParams is not None and useForceParams: # We will force parameters on this fit ph = np.arctan2(q0[1], q0[0]) th = np.arctan2(q0[2], np.hypot(q0[0], q0[1])) phthPeak = np.array([ph, th]) tmp = strongPeakParams[:, :2] - phthPeak distSq = tmp[:, 0]**2 + tmp[:, 1]**2 nnIDX = np.argmin(distSq) params, h, t, p = doBVGFit(box, nTheta=nTheta, nPhi=nPhi, fracBoxToHistogram=fracBoxToHistogram, goodIDX=goodIDX, forceParams=strongPeakParams[nnIDX], doPeakConvolution=doPeakConvolution, sigX0Params=sigX0Params, sigY0=sigY0, sigP0Params=sigP0Params, fitPenalty=fitPenalty) else: # Just do the fit - no nearest neighbor assumptions params, h, t, p = doBVGFit( box, nTheta=nTheta, nPhi=nPhi, fracBoxToHistogram=fracBoxToHistogram, goodIDX=goodIDX, doPeakConvolution=doPeakConvolution, sigX0Params=sigX0Params, sigY0=sigY0, sigP0Params=sigP0Params, fitPenalty=fitPenalty) if plotResults: compareBVGFitData( box, params[0], nTheta, nPhi, fracBoxToHistogram=fracBoxToHistogram, useIDX=goodIDX, figNumber=figureNumber) # set up the BVG # A = params[0][0] # never used mu0 = params[0][1] mu1 = params[0][2] sigX = params[0][3] sigY = params[0][4] p = params[0][5] bgBVG = params[0][6] sigma = np.array([[sigX**2, p * sigX * sigY], [p * sigX * sigY, sigY**2]]) mu = np.array([mu0, mu1]) XTOF = X[:, :, :, 0] XTHETA = X[:, :, :, 1] XPHI = X[:, :, :, 2] YBVG = bvg(1.0, mu, sigma, XTHETA, XPHI, 0) # Do scaling to the data if doPeakConvolution: #This means peaks will have gaps, so we only use good data to scale Y, redChiSq, scaleFactor = fitScaling(n_events, box, YTOF, YBVG, goodIDX=goodIDX) else: Y, redChiSq, scaleFactor = fitScaling(n_events, box, YTOF, YBVG) YBVG2 = bvg(1.0, mu, sigma, XTHETA, XPHI, 0) YTOF2 = getYTOF(fICC, XTOF, x_lims) Y2 = YTOF2 * YBVG2 Y2 = scaleFactor * Y2 / Y2.max() QX, QY, QZ = ICCFT.getQXQYQZ(box) fitMaxIDX = tuple(np.array(np.unravel_index(Y2.argmax(), Y2.shape))) newCenter = np.array([QX[fitMaxIDX], QY[fitMaxIDX], QZ[fitMaxIDX]]) # Set a dictionary with the parameters to return retParams = {} retParams['Alpha'] = fICC['A'] retParams['Beta'] = fICC['B'] retParams['R'] = fICC['R'] retParams['T0'] = fICC['T0'] retParams['Scale'] = fICC['Scale'] retParams['KConv'] = fICC['KConv'] retParams['MuTH'] = mu0 retParams['MuPH'] = mu1 retParams['SigX'] = sigX retParams['SigY'] = sigY retParams['SigP'] = p retParams['bgBVG'] = bgBVG retParams['scale3d'] = scaleFactor retParams['chiSq3d'] = redChiSq retParams['chiSq'] = chiSqTOF retParams['dQ'] = np.linalg.norm(newCenter - q0) retParams['newQ'] = newCenter return Y2, goodIDX, pp_lambda, retParams
def PyExec(self): import ICCFitTools as ICCFT import BVGFitTools as BVGFT from scipy.ndimage.filters import convolve MDdata = self.getProperty('InputWorkspace').value peaks_ws = self.getProperty('PeaksWorkspace').value fracStop = self.getProperty('FracStop').value dQMax = self.getProperty('DQMax').value UBFile = self.getProperty('UBFile').value padeFile = self.getProperty('ModeratorCoefficientsFile').value strongPeaksParamsFile = self.getProperty('StrongPeakParamsFile').value forceCutoff = self.getProperty('IntensityCutoff').value edgeCutoff = self.getProperty('EdgeCutoff').value peakNumberToFit = self.getProperty('PeakNumber').value pplmin_frac = self.getProperty('MinpplFrac').value pplmax_frac = self.getProperty('MaxpplFrac').value sampleRun = peaks_ws.getPeak(0).getRunNumber() q_frame='lab' mtd['MDdata'] = MDdata zBG = 1.96 neigh_length_m=3 iccFitDict = ICCFT.parseConstraints(peaks_ws) #Contains constraints and guesses for ICC Fitting padeCoefficients = ICCFT.getModeratorCoefficients(padeFile) # There are a few instrument specific parameters that we define here. In some cases, # it may improve fitting to set tweak these parameters, but for simplicity we define these here # The default values are good for MaNDi - new instruments can be added by adding a different elif # statement. # If you change these values or add an instrument, documentation should also be changed. try: numDetRows = peaks_ws.getInstrument().getIntParameter("numDetRows")[0] numDetCols = peaks_ws.getInstrument().getIntParameter("numDetCols")[0] nPhi = peaks_ws.getInstrument().getIntParameter("numBinsPhi")[0] nTheta = peaks_ws.getInstrument().getIntParameter("numBinsTheta")[0] nPhi = peaks_ws.getInstrument().getIntParameter("numBinsPhi")[0] mindtBinWidth = peaks_ws.getInstrument().getNumberParameter("mindtBinWidth")[0] maxdtBinWidth = peaks_ws.getInstrument().getNumberParameter("maxdtBinWidth")[0] fracHKL = peaks_ws.getInstrument().getNumberParameter("fracHKL")[0] dQPixel = peaks_ws.getInstrument().getNumberParameter("dQPixel")[0] peakMaskSize = peaks_ws.getInstrument().getIntParameter("peakMaskSize")[0] except: logger.error("Cannot find all parameters in instrument parameters file.") raise UBMatrix = self.getUBMatrix(peaks_ws, UBFile) dQ = np.abs(ICCFT.getDQFracHKL(UBMatrix, frac=0.5)) dQ[dQ>dQMax] = dQMax qMask = ICCFT.getHKLMask(UBMatrix, frac=fracHKL, dQPixel=dQPixel,dQ=dQ) generateStrongPeakParams, strongPeakParams, strongPeakParams_ws, needsForcedProfile, \ needsForcedProfileIDX, canFitProfileIDX, numPeaksCanFit, peaksToFit = \ self.initializeStrongPeakSettings(strongPeaksParamsFile, peaks_ws, sampleRun, forceCutoff, edgeCutoff, numDetRows, numDetCols) if peakNumberToFit>-1: peaksToFit = [peakNumberToFit] # Create the parameters workspace keys = ['peakNumber','Alpha', 'Beta', 'R', 'T0', 'bgBVG', 'chiSq3d', 'chiSq', 'dQ', 'KConv', 'MuPH', 'MuTH', 'newQ', 'Scale', 'scale3d', 'SigP', 'SigX', 'SigY', 'Intens3d', 'SigInt3d'] datatypes = ['float']*len(keys) datatypes[np.where(np.array(keys)=='newQ')[0][0]] = 'V3D' params_ws = CreateEmptyTableWorkspace() for key, datatype in zip(keys,datatypes): params_ws.addColumn(datatype, key) # And we're off! peaks_ws_out = peaks_ws.clone() np.warnings.filterwarnings('ignore') # There can be a lot of warnings for bad solutions that get rejected. progress = Progress(self, 0.0, 1.0, len(peaksToFit)) sigX0Params, sigY0, sigP0Params = self.getBVGInitialGuesses(peaks_ws, strongPeakParams_ws) for fitNumber, peakNumber in enumerate(peaksToFit):#range(peaks_ws.getNumberPeaks()): peak = peaks_ws_out.getPeak(peakNumber) progress.report(' ') if peak.getRunNumber() != MDdata.getExperimentInfo(0).getRunNumber(): logger.warning('Peak number %i has run number %i but MDWorkspace is from run number %i. Skipping this peak.'%( peakNumber, peak.getRunNumber(), MDdata.getExperimentInfo(0).getRunNumber())) continue try: box = ICCFT.getBoxFracHKL(peak, peaks_ws, MDdata, UBMatrix, peakNumber, dQ, fracHKL=0.5, dQPixel=dQPixel, q_frame=q_frame) if ~needsForcedProfile[peakNumber]: strongPeakParamsToSend = None else: strongPeakParamsToSend = strongPeakParams # Will allow forced weak and edge peaks to be fit using a neighboring peak profile Y3D, goodIDX, pp_lambda, params = BVGFT.get3DPeak(peak, peaks_ws, box, padeCoefficients,qMask, nTheta=nTheta, nPhi=nPhi, plotResults=False, zBG=zBG,fracBoxToHistogram=1.0,bgPolyOrder=1, strongPeakParams=strongPeakParamsToSend, q_frame=q_frame, mindtBinWidth=mindtBinWidth, maxdtBinWidth=maxdtBinWidth, pplmin_frac=pplmin_frac, pplmax_frac=pplmax_frac, forceCutoff=forceCutoff, edgeCutoff=edgeCutoff, peakMaskSize=peakMaskSize, iccFitDict=iccFitDict, sigX0Params=sigX0Params, sigY0=sigY0, sigP0Params=sigP0Params, fitPenalty=1.e7) # First we get the peak intensity peakIDX = Y3D/Y3D.max() > fracStop intensity = np.sum(Y3D[peakIDX]) # Now the number of background counts under the peak assuming a constant bg across the box n_events = box.getNumEventsArray() convBox = 1.0*np.ones([neigh_length_m, neigh_length_m,neigh_length_m]) / neigh_length_m**3 conv_n_events = convolve(n_events,convBox) bgIDX = np.logical_and.reduce(np.array([~goodIDX, qMask, conv_n_events>0])) bgEvents = np.mean(n_events[bgIDX])*np.sum(peakIDX) # Now we consider the variation of the fit. These are done as three independent fits. So we need to consider # the variance within our fit sig^2 = sum(N*(yFit-yData)) / sum(N) and scale by the number of parameters that go into # the fit. In total: 10 (removing scale variables) w_events = n_events.copy() w_events[w_events==0] = 1 varFit = np.average((n_events[peakIDX]-Y3D[peakIDX])*(n_events[peakIDX]-Y3D[peakIDX]), weights=(w_events[peakIDX])) sigma = np.sqrt(intensity + bgEvents + varFit) compStr = 'peak {:d}; original: {:4.2f} +- {:4.2f}; new: {:4.2f} +- {:4.2f}'.format(peakNumber, peak.getIntensity(), peak.getSigmaIntensity(), intensity, sigma) logger.information(compStr) # Save the results params['peakNumber'] = peakNumber params['Intens3d'] = intensity params['SigInt3d'] = sigma params['newQ'] = V3D(params['newQ'][0],params['newQ'][1],params['newQ'][2]) params_ws.addRow(params) peak.setIntensity(intensity) peak.setSigmaIntensity(sigma) if generateStrongPeakParams and ~needsForcedProfile[peakNumber]: qPeak = peak.getQLabFrame() theta = np.arctan2(qPeak[2], np.hypot(qPeak[0],qPeak[1])) #2theta try: p = mtd['__fitSigX0_Parameters'].column(1)[:-1] tol = 0.2 #We should have a good idea now - only allow 20% variation except: p = peaks_ws.getInstrument().getStringParameter("sigSC0Params") p = np.array(str(p).strip('[]\'').split(),dtype=float) tol = 5.0 #High tolerance since we don't know what the answer will be predSigX = BVGFT.coshPeakWidthModel(theta, p[0],p[1],p[2],p[3]) if np.abs((params['SigX'] - predSigX)/1./predSigX) < tol: strongPeakParams[fitNumber, 0] = np.arctan2(qPeak[1], qPeak[0]) # phi strongPeakParams[fitNumber, 1] = np.arctan2(qPeak[2], np.hypot(qPeak[0],qPeak[1])) #theta strongPeakParams[fitNumber, 2] = params['scale3d'] strongPeakParams[fitNumber, 3] = params['MuTH'] strongPeakParams[fitNumber, 4] = params['MuPH'] strongPeakParams[fitNumber, 5] = params['SigX'] strongPeakParams[fitNumber, 6] = params['SigY'] strongPeakParams[fitNumber, 7] = params['SigP'] strongPeakParams[fitNumber, 8] = peakNumber strongPeakParams_ws.addRow(strongPeakParams[fitNumber]) sigX0Params, sigY0, sigP0Params = self.getBVGInitialGuesses(peaks_ws, strongPeakParams_ws) except KeyboardInterrupt: np.warnings.filterwarnings('default') # Re-enable on exit raise except: #raise logger.warning('Error fitting peak number ' + str(peakNumber)) peak.setIntensity(0.0) peak.setSigmaIntensity(1.0) # Cleanup for wsName in mtd.getObjectNames(): if 'fit_' in wsName or 'bvgWS' in wsName or 'tofWS' in wsName or 'scaleWS' in wsName: mtd.remove(wsName) np.warnings.filterwarnings('default') # Re-enable on exit # Set the output self.setProperty('OutputPeaksWorkspace', peaks_ws_out) self.setProperty('OutputParamsWorkspace', params_ws)
importFlag = True for ws in mtd.getObjectNames(): if mtd[ws].getComment() == 'BSGETBOX%i' % peak.getRunNumber(): print ' Using already loaded MDdata' MDdata = mtd[ws] importFlag = False break if importFlag: try: fileName = nxsTemplate % peak.getRunNumber() except: fileName = nxsTemplate.format(0, peak.getRunNumber()) MDdata = ICCFT.getSample(peak.getRunNumber(), DetCalFile, workDir, fileName, q_frame=q_frame) MDdata.setComment('BSGETBOX%i' % peak.getRunNumber()) figNumber = 1 fracHKL = 0.5 #dQPixel = ICCFT.getPixelStep(peak) dtSpread = 0.015 dtSpreadToPlot = [0.01] wavelength = peak.getWavelength() #in Angstrom energy = 81.804 / wavelength**2 / 1000.0 #in eV flightPath = peak.getL1() + peak.getL2() #in m scatteringHalfAngle = 0.5 * peak.getScattering() dQ = np.abs(ICCFT.getDQFracHKL(UBMatrix, frac=fracHKL))
def PyExec(self): import ICCFitTools as ICCFT import BVGFitTools as BVGFT from scipy.ndimage.filters import convolve MDdata = self.getProperty('InputWorkspace').value peaks_ws = self.getProperty('PeaksWorkspace').value fracStop = self.getProperty('FracStop').value dQMax = self.getProperty('DQMax').value UBFile = self.getProperty('UBFile').value padeFile = self.getProperty('ModeratorCoefficientsFile').value strongPeaksParamsFile = self.getProperty('StrongPeakParamsFile').value forceCutoff = self.getProperty('IntensityCutoff').value edgeCutoff = self.getProperty('EdgeCutoff').value peakNumberToFit = self.getProperty('PeakNumber').value pplmin_frac = self.getProperty('MinpplFrac').value pplmax_frac = self.getProperty('MaxpplFrac').value sampleRun = peaks_ws.getPeak(0).getRunNumber() q_frame='lab' mtd['MDdata'] = MDdata zBG = 1.96 neigh_length_m=3 iccFitDict = ICCFT.parseConstraints(peaks_ws) #Contains constraints and guesses for ICC Fitting padeCoefficients = ICCFT.getModeratorCoefficients(padeFile) # There are a few instrument specific parameters that we define here. In some cases, # it may improve fitting to set tweak these parameters, but for simplicity we define these here # The default values are good for MaNDi - new instruments can be added by adding a different elif # statement. # If you change these values or add an instrument, documentation should also be changed. try: numDetRows = peaks_ws.getInstrument().getIntParameter("numDetRows")[0] numDetCols = peaks_ws.getInstrument().getIntParameter("numDetCols")[0] nPhi = peaks_ws.getInstrument().getIntParameter("numBinsPhi")[0] nTheta = peaks_ws.getInstrument().getIntParameter("numBinsTheta")[0] nPhi = peaks_ws.getInstrument().getIntParameter("numBinsPhi")[0] mindtBinWidth = peaks_ws.getInstrument().getNumberParameter("mindtBinWidth")[0] maxdtBinWidth = peaks_ws.getInstrument().getNumberParameter("maxdtBinWidth")[0] fracHKL = peaks_ws.getInstrument().getNumberParameter("fracHKL")[0] dQPixel = peaks_ws.getInstrument().getNumberParameter("dQPixel")[0] peakMaskSize = peaks_ws.getInstrument().getIntParameter("peakMaskSize")[0] except: logger.error("Cannot find all parameters in instrument parameters file.") raise UBMatrix = self.getUBMatrix(peaks_ws, UBFile) dQ = np.abs(ICCFT.getDQFracHKL(UBMatrix, frac=0.5)) dQ[dQ>dQMax] = dQMax qMask = ICCFT.getHKLMask(UBMatrix, frac=fracHKL, dQPixel=dQPixel,dQ=dQ) generateStrongPeakParams, strongPeakParams, strongPeakParams_ws, needsForcedProfile, \ needsForcedProfileIDX, canFitProfileIDX, numPeaksCanFit, peaksToFit = \ self.initializeStrongPeakSettings(strongPeaksParamsFile, peaks_ws, sampleRun, forceCutoff, edgeCutoff, numDetRows, numDetCols) if peakNumberToFit>-1: peaksToFit = [peakNumberToFit] # Create the parameters workspace keys = ['peakNumber','Alpha', 'Beta', 'R', 'T0', 'bgBVG', 'chiSq3d', 'chiSq', 'dQ', 'KConv', 'MuPH', 'MuTH', 'newQ', 'Scale', 'scale3d', 'SigP', 'SigX', 'SigY', 'Intens3d', 'SigInt3d'] datatypes = ['float']*len(keys) datatypes[np.where(np.array(keys)=='newQ')[0][0]] = 'V3D' params_ws = CreateEmptyTableWorkspace() for key, datatype in zip(keys,datatypes): params_ws.addColumn(datatype, key) # And we're off! peaks_ws_out = peaks_ws.clone() np.warnings.filterwarnings('ignore') # There can be a lot of warnings for bad solutions that get rejected. progress = Progress(self, 0.0, 1.0, len(peaksToFit)) sigX0Params, sigY0, sigP0Params = self.getBVGInitialGuesses(peaks_ws, strongPeakParams_ws) for fitNumber, peakNumber in enumerate(peaksToFit):#range(peaks_ws.getNumberPeaks()): peakNumber = int(peakNumber) peak = peaks_ws_out.getPeak(peakNumber) progress.report(' ') if peak.getRunNumber() != MDdata.getExperimentInfo(0).getRunNumber(): logger.warning('Peak number %i has run number %i but MDWorkspace is from run number %i. Skipping this peak.'%( peakNumber, peak.getRunNumber(), MDdata.getExperimentInfo(0).getRunNumber())) continue try: box = ICCFT.getBoxFracHKL(peak, peaks_ws, MDdata, UBMatrix, peakNumber, dQ, fracHKL=0.5, dQPixel=dQPixel, q_frame=q_frame) if ~needsForcedProfile[peakNumber]: strongPeakParamsToSend = None else: strongPeakParamsToSend = strongPeakParams # Will allow forced weak and edge peaks to be fit using a neighboring peak profile Y3D, goodIDX, pp_lambda, params = BVGFT.get3DPeak(peak, peaks_ws, box, padeCoefficients,qMask, nTheta=nTheta, nPhi=nPhi, plotResults=False, zBG=zBG,fracBoxToHistogram=1.0,bgPolyOrder=1, strongPeakParams=strongPeakParamsToSend, q_frame=q_frame, mindtBinWidth=mindtBinWidth, maxdtBinWidth=maxdtBinWidth, pplmin_frac=pplmin_frac, pplmax_frac=pplmax_frac, forceCutoff=forceCutoff, edgeCutoff=edgeCutoff, peakMaskSize=peakMaskSize, iccFitDict=iccFitDict, sigX0Params=sigX0Params, sigY0=sigY0, sigP0Params=sigP0Params, fitPenalty=1.e7) # First we get the peak intensity peakIDX = Y3D/Y3D.max() > fracStop intensity = np.sum(Y3D[peakIDX]) # Now the number of background counts under the peak assuming a constant bg across the box n_events = box.getNumEventsArray() convBox = 1.0*np.ones([neigh_length_m, neigh_length_m,neigh_length_m]) / neigh_length_m**3 conv_n_events = convolve(n_events,convBox) bgIDX = np.logical_and.reduce(np.array([~goodIDX, qMask, conv_n_events>0])) bgEvents = np.mean(n_events[bgIDX])*np.sum(peakIDX) # Now we consider the variation of the fit. These are done as three independent fits. So we need to consider # the variance within our fit sig^2 = sum(N*(yFit-yData)) / sum(N) and scale by the number of parameters that go into # the fit. In total: 10 (removing scale variables) w_events = n_events.copy() w_events[w_events==0] = 1 varFit = np.average((n_events[peakIDX]-Y3D[peakIDX])*(n_events[peakIDX]-Y3D[peakIDX]), weights=(w_events[peakIDX])) sigma = np.sqrt(intensity + bgEvents + varFit) compStr = 'peak {:d}; original: {:4.2f} +- {:4.2f}; new: {:4.2f} +- {:4.2f}'.format(peakNumber, peak.getIntensity(), peak.getSigmaIntensity(), intensity, sigma) logger.information(compStr) # Save the results params['peakNumber'] = peakNumber params['Intens3d'] = intensity params['SigInt3d'] = sigma params['newQ'] = V3D(params['newQ'][0],params['newQ'][1],params['newQ'][2]) params_ws.addRow(params) peak.setIntensity(intensity) peak.setSigmaIntensity(sigma) if generateStrongPeakParams and ~needsForcedProfile[peakNumber]: qPeak = peak.getQLabFrame() theta = np.arctan2(qPeak[2], np.hypot(qPeak[0],qPeak[1])) #2theta try: p = mtd['__fitSigX0_Parameters'].column(1)[:-1] tol = 0.2 #We should have a good idea now - only allow 20% variation except: p = peaks_ws.getInstrument().getStringParameter("sigSC0Params") p = np.array(str(p).strip('[]\'').split(),dtype=float) tol = 5.0 #High tolerance since we don't know what the answer will be predSigX = BVGFT.coshPeakWidthModel(theta, p[0],p[1],p[2],p[3]) if np.abs((params['SigX'] - predSigX)/1./predSigX) < tol: strongPeakParams[fitNumber, 0] = np.arctan2(qPeak[1], qPeak[0]) # phi strongPeakParams[fitNumber, 1] = np.arctan2(qPeak[2], np.hypot(qPeak[0],qPeak[1])) #theta strongPeakParams[fitNumber, 2] = params['scale3d'] strongPeakParams[fitNumber, 3] = params['MuTH'] strongPeakParams[fitNumber, 4] = params['MuPH'] strongPeakParams[fitNumber, 5] = params['SigX'] strongPeakParams[fitNumber, 6] = params['SigY'] strongPeakParams[fitNumber, 7] = params['SigP'] strongPeakParams[fitNumber, 8] = peakNumber strongPeakParams_ws.addRow(strongPeakParams[fitNumber]) sigX0Params, sigY0, sigP0Params = self.getBVGInitialGuesses(peaks_ws, strongPeakParams_ws) except KeyboardInterrupt: np.warnings.filterwarnings('default') # Re-enable on exit raise except: #raise logger.warning('Error fitting peak number ' + str(peakNumber)) peak.setIntensity(0.0) peak.setSigmaIntensity(1.0) # Cleanup for wsName in mtd.getObjectNames(): if 'fit_' in wsName or 'bvgWS' in wsName or 'tofWS' in wsName or 'scaleWS' in wsName: mtd.remove(wsName) np.warnings.filterwarnings('default') # Re-enable on exit # Set the output self.setProperty('OutputPeaksWorkspace', peaks_ws_out) self.setProperty('OutputParamsWorkspace', params_ws)
def getImageFromBox(box, UBMatrix, peak, qMaskSimulated=None, rebinToHKL=False, nVoxelsPerSide=32, hklRebinFrac=0.8, returnErrorSq=False): n_events = box.getSignalArray() n_errorsq = box.getErrorSquaredArray() if rebinToHKL: if returnErrorSq: UserWarning( 'mltools - Cannot return error squared array in hkl rebin mode!' ) R = peak.getGoniometerMatrix() T = np.linalg.inv(R.dot(UBMatrix)) / 2 / np.pi QX, QY, QZ = ICCFT.getQXQYQZ(box) H, K, L = T.dot(np.array( [QX.ravel(), QY.ravel(), QZ.ravel()])).reshape([3, QX.shape[0], QX.shape[1], QX.shape[2]]) h0, k0, l0 = peak.getHKL() hBins = np.linspace(h0 - hklRebinFrac, h0 + hklRebinFrac, nVoxelsPerSide + 1) kBins = np.linspace(k0 - hklRebinFrac, k0 + hklRebinFrac, nVoxelsPerSide + 1) lBins = np.linspace(l0 - hklRebinFrac, l0 + hklRebinFrac, nVoxelsPerSide + 1) useIDX_X = n_events > 0 nVect = n_events[useIDX_X] hVect_X = H[useIDX_X] kVect_X = K[useIDX_X] lVect_X = L[useIDX_X] image, edges = np.histogramdd([hVect_X, kVect_X, lVect_X], weights=nVect, bins=np.array([hBins, kBins, lBins])) n_events_cropped = image.copy() image = image / image.max() image = (image - np.mean(image)) / np.std(image) image = np.expand_dims(image, axis=3) image = np.expand_dims(image, axis=0) #1*nX*nY*nZ*1 else: cX, cY, cZ = np.array(n_events.shape) // 2 dX, dY, dZ = nVoxelsPerSide // 2, nVoxelsPerSide // 2, nVoxelsPerSide // 2 image = n_events[cX - dX:cX + dX, cY - dY:cY + dY, cZ - dZ:cZ + dZ] #crop image_error = n_errorsq[cX - dX:cX + dX, cY - dY:cY + dY, cZ - dZ:cZ + dZ] #crop if qMaskSimulated is not None: image = image * qMaskSimulated image_error = image_error * qMaskSimulated n_events_cropped = n_events[cX - dX:cX + dX, cY - dY:cY + dY, cZ - dZ:cZ + dZ] n_errorsq_cropped = n_errorsq[cX - dX:cX + dX, cY - dY:cY + dY, cZ - dZ:cZ + dZ] #finiteImage = np.isfinite(image) #finiteInMask = np.logical_and(qMaskSimulated, finiteImage) cb = np.ones([3, 3, 3]) cb[1, 1, 1] = 0. ccc = convolve(image, cb) image[~np.isfinite(ccc)] = 0. if qMaskSimulated is not None: image *= qMaskSimulated image = image / image.max() if qMaskSimulated is not None: image = (image - np.mean(image[qMaskSimulated])) / np.std( image[qMaskSimulated]) else: image = (image - np.mean(image)) / np.std(image) """ #image = image / image[qMaskSimulated].max() if qMaskSimulated is not None: image = (image-np.mean(image[qMaskSimulated]))/np.std(image[qMaskSimulated]) else: image = (image-np.mean(image))/np.std(image) """ image = np.expand_dims(image, axis=3) image = np.expand_dims(image, axis=0) #1*nX*nY*nZ*1 if returnErrorSq: return n_events_cropped, n_errorsq_cropped, image return n_events_cropped, image
def generateTrainingPeak(peak, box, Y3D, peakDict, UBMatrix, pRotation=0.5, rebinHKL=False, addNoise=False, maxOffset=6, nVoxelsPerSide=32, peakThreshold=0.025, qMask=None, noiseScaleFactor=1.0): #qMask is only used for calculating max events n_events = box.getSignalArray() n_simulated = n_events.copy() Y_simulated = Y3D.copy() #Rotate the peak if (np.random.random() < pRotation): theta1, theta2, theta3 = (np.random.random(3) - 0.5) * 180 n_simulated = rotate(n_simulated, theta1, axes=(1, 0), reshape=False) n_simulated = rotate(n_simulated, theta2, axes=(2, 0), reshape=False) n_simulated = rotate(n_simulated, theta3, axes=(2, 1), reshape=False) Y_simulated = rotate(Y_simulated, theta1, axes=(1, 0), reshape=False) Y_simulated = rotate(Y_simulated, theta2, axes=(2, 0), reshape=False) Y_simulated = rotate(Y_simulated, theta3, axes=(2, 1), reshape=False) peakDict['theta1'] = theta1 peakDict['theta2'] = theta2 peakDict['theta3'] = theta3 else: peakDict['theta1'] = 0. peakDict['theta2'] = 0. peakDict['theta3'] = 0. if rebinHKL: R = peak.getGoniometerMatrix() T = np.linalg.inv(R.dot(UBMatrix)) / 2 / np.pi QX, QY, QZ = ICCFT.getQXQYQZ(box) H, K, L = T.dot(np.array( [QX.ravel(), QY.ravel(), QZ.ravel()])).reshape([3, QX.shape[0], QX.shape[1], QX.shape[2]]) h0, k0, l0 = peak.getHKL() #We're going to downsample to 32**3 - let's just go a little off center to train mispredicted peaks #Downsample to 32**3 hBins = np.linspace(h0 - 0.8, h0 + 0.8, nVoxelsPerSide + maxOffset + 1) kBins = np.linspace(k0 - 0.8, k0 + 0.8, nVoxelsPerSide + maxOffset + 1) lBins = np.linspace(l0 - 0.8, l0 + 0.8, nVoxelsPerSide + maxOffset + 1) useIDX_X = n_events > 0 nVect = n_events[useIDX_X] hVect_X = H[useIDX_X] kVect_X = K[useIDX_X] lVect_X = L[useIDX_X] useIDX_Y = Y3D > 0 YVect = Y3D[useIDX_Y] hVect_Y = H[useIDX_Y] kVect_Y = K[useIDX_Y] lVect_Y = L[useIDX_Y] n_simulated_hkl, edges = np.histogramdd([hVect_X, kVect_X, lVect_X], weights=nVect, bins=np.array( [hBins, kBins, lBins])) Y_simulated_hkl, edges = np.histogramdd([hVect_Y, kVect_Y, lVect_Y], weights=YVect, bins=np.array( [hBins, kBins, lBins])) nX, nY, nZ = np.array(n_simulated_hkl.shape) cX, cY, cZ = np.array(n_simulated_hkl.shape) // 2 cX += np.random.randint(low=-1 * maxOffset, high=maxOffset + 1) cY += np.random.randint(low=-1 * maxOffset, high=maxOffset + 1) cZ += np.random.randint(low=-1 * maxOffset, high=maxOffset + 1) dX, dY, dZ = nVoxelsPerSide // 2, nVoxelsPerSide // 2, nVoxelsPerSide // 2 lowX = cX - dX highX = cX + dX lowY = cY - dY highY = cY + dY lowZ = cZ - dZ highZ = cZ + dZ if lowX < 0: lowX = 0 highX = 2 * dX elif highX > n_simulated_hkl.shape[0]: highX = n_simulated_hkl.shape[0] lowX = highX - 2 * dX if lowY < 0: lowY = 0 highY = 2 * dY elif highY > n_simulated_hkl.shape[1]: highY = n_simulated_hkl.shape[1] lowY = highY - 2 * dY if lowZ < 0: lowZ = 0 highZ = 2 * dZ elif highZ > n_simulated_hkl.shape[2]: highZ = n_simulated_hkl.shape[2] lowZ = highZ - 2 * dZ peakDict['cX'] = cX peakDict['cY'] = cY peakDict['cZ'] = cZ n_simulated = n_simulated_hkl[lowX:highX, lowY:highY, lowZ:highZ] Y_simulated = Y_simulated_hkl[lowX:highX, lowY:highY, lowZ:highZ] else: #Do in reciprocal space (not HKL) nX, nY, nZ = np.array(n_events.shape) cX, cY, cZ = np.array(n_events.shape) // 2 cX += np.random.randint(low=-1 * maxOffset, high=maxOffset + 1) cY += np.random.randint(low=-1 * maxOffset, high=maxOffset + 1) cZ += np.random.randint(low=-1 * maxOffset, high=maxOffset + 1) dX, dY, dZ = nVoxelsPerSide // 2, nVoxelsPerSide // 2, nVoxelsPerSide // 2 lowX = cX - dX highX = cX + dX lowY = cY - dY highY = cY + dY lowZ = cZ - dZ highZ = cZ + dZ if lowX < 0: lowX = 0 highX = 2 * dX elif highX > n_simulated.shape[0]: highX = n_simulated.shape[0] lowX = highX - 2 * dX if lowY < 0: lowY = 0 highY = 2 * dY elif highY > n_simulated.shape[1]: highY = n_simulated.shape[1] lowY = highY - 2 * dY if lowZ < 0: lowZ = 0 highZ = 2 * dZ elif highZ > n_simulated.shape[2]: highZ = n_simulated.shape[2] lowZ = highZ - 2 * dZ peakDict['cX'] = cX peakDict['cY'] = cY peakDict['cZ'] = cZ n_simulated = n_simulated[lowX:highX, lowY:highY, lowZ:highZ] Y_simulated = Y_simulated[lowX:highX, lowY:highY, lowZ:highZ] peakIDX = Y_simulated / Y_simulated.max() > peakThreshold if qMask is not None: cX, cY, cZ = np.array(qMask.shape) // 2 dX, dY, dZ = nVoxelsPerSide // 2, nVoxelsPerSide // 2, nVoxelsPerSide // 2 qMask_simulated = qMask[cX - dX:cX + dX, cY - dY:cY + dY, cZ - dZ:cZ + dZ] peakDict['maxEvents'] = n_simulated[peakIDX].max() peakDict['bgEvents'] = n_simulated[qMask_simulated & ~peakIDX].mean() else: peakDict['maxEvents_noQMask'] = n_simulated[peakIDX].max() peakDict['bgEvents_noQMask'] = n_simulated[qMask_simulated & ~peakIDX].mean() #Add noise to the n_simulated if addNoise: #bgNoiseLevel = 10.#ICCFT.get_pp_lambda(n_simulated, n_simulated>0)[0] if qMask is not None: bgNoiseLevel = n_simulated[peakIDX].max() * noiseScaleFactor else: bgNoiseLevel = n_simulated.max() * noiseScaleFactor print( 'Warning! mltools::generateTrainingPeak has no qMask. Might add a lot of noise!' ) pp_lambda = np.random.random() * bgNoiseLevel YNoise = np.random.poisson(lam=pp_lambda, size=n_simulated.shape) peakDict['noiseAdded'] = pp_lambda n_simulated = n_simulated + YNoise else: peakDict['noiseAdded'] = 0. return n_simulated, Y_simulated, peakDict
peaksFile = '/home/ntv/Desktop/beta_lac_firstxtal_july18beamtime_pf.integrate' UBFile = '/home/ntv/Desktop/beta_lac_firstxtal_july18beamtime_pf.mat' DetCalFile = '/home/ntv/mandi_preprocessing/MANDI_June2018.DetCal' workDir = '/SNS/users/ntv/dropbox/' #End with '/' nxsTemplate = '/SNS/MANDI/IPTS-21326/nexus/MANDI_%i.nxs.h5' dQPixel = 0.003 q_frame = 'lab' pplmin_frac = 0.9 pplmax_frac = 1.1 mindtBinWidth = 15 maxdtBinWidth = 50 moderatorFile = '/home/ntv/integrate/bl11_moderatorCoefficients_2018.dat' # Some parameters peakToGet = 3 #Arbitrary - just has to be less than the number of peaks padeCoefficients = ICCFT.getModeratorCoefficients(moderatorFile) removeEdges = False importPeaks = True fractionForTesting = 0.1 for ws in mtd.getObjectNames(): if mtd[ws].getComment() == '%s' % peaksFile: print ' using already loaded peaks file' importPeaks = False peaks_ws = mtd[ws] if importPeaks: peaks_ws = LoadIsawPeaks(Filename=peaksFile) peaks_ws.setComment(peaksFile) SortPeaksWorkspace(InputWorkspace='peaks_ws', OutputWorkspace='peaks_ws', ColumnNameToSortBy='PeakNumber')
def PyExec(self): import ICCFitTools as ICCFT import BVGFitTools as BVGFT from mantid.simpleapi import LoadIsawUB import pickle from scipy.ndimage.filters import convolve MDdata = self.getProperty('InputWorkspace').value peaks_ws = self.getProperty('PeaksWorkspace').value fracStop = self.getProperty('FracStop').value dQMax = self.getProperty('DQMax').value UBFile = self.getProperty('UBFile').value padeFile = self.getProperty('ModeratorCoefficientsFile').value strongPeaksParamsFile = self.getProperty('StrongPeakParamsFile').value forceCutoff = self.getProperty('IntensityCutoff').value edgeCutoff = self.getProperty('EdgeCutoff').value peakNumberToFit = self.getProperty('PeakNumber').value pplmin_frac = self.getProperty('MinpplFrac').value pplmax_frac = self.getProperty('MaxpplFrac').value sampleRun = self.getProperty('RunNumber').value q_frame = 'lab' mtd['MDdata'] = MDdata zBG = 1.96 neigh_length_m = 3 iccFitDict = ICCFT.parseConstraints( peaks_ws) #Contains constraints and guesses for ICC Fitting padeCoefficients = ICCFT.getModeratorCoefficients(padeFile) # Load the UB Matrix if one is not already loaded if UBFile == '' and peaks_ws.sample().hasOrientedLattice(): logger.information( "Using UB file already available in PeaksWorkspace") else: try: LoadIsawUB(InputWorkspace=peaks_ws, FileName=UBFile) except: logger.error( "peaks_ws does not have a UB matrix loaded. Must provide a file" ) UBMatrix = peaks_ws.sample().getOrientedLattice().getUB() # There are a few instrument specific parameters that we define here. In some cases, # it may improve fitting to set tweak these parameters, but for simplicity we define these here # The default values are good for MaNDi - new instruments can be added by adding a different elif # statement. # If you change these values or add an instrument, documentation should also be changed. try: numDetRows = peaks_ws.getInstrument().getIntParameter( "numDetRows")[0] numDetCols = peaks_ws.getInstrument().getIntParameter( "numDetCols")[0] nPhi = peaks_ws.getInstrument().getIntParameter("numBinsPhi")[0] nTheta = peaks_ws.getInstrument().getIntParameter( "numBinsTheta")[0] nPhi = peaks_ws.getInstrument().getIntParameter("numBinsPhi")[0] mindtBinWidth = peaks_ws.getInstrument().getNumberParameter( "mindtBinWidth")[0] maxdtBinWidth = peaks_ws.getInstrument().getNumberParameter( "maxdtBinWidth")[0] fracHKL = peaks_ws.getInstrument().getNumberParameter("fracHKL")[0] dQPixel = peaks_ws.getInstrument().getNumberParameter("dQPixel")[0] peakMaskSize = peaks_ws.getInstrument().getIntParameter( "peakMaskSize")[0] except: raise logger.error( "Cannot find all parameters in instrument parameters file.") sys.exit(1) dQ = np.abs(ICCFT.getDQFracHKL(UBMatrix, frac=0.5)) dQ[dQ > dQMax] = dQMax qMask = ICCFT.getHKLMask(UBMatrix, frac=fracHKL, dQPixel=dQPixel, dQ=dQ) # Strong peak profiles - we set up the workspace and determine which peaks we'll fit. strongPeakKeys = [ 'Phi', 'Theta', 'Scale3d', 'FitPhi', 'FitTheta', 'SigTheta', 'SigPhi', 'SigP', 'PeakNumber' ] strongPeakDatatypes = ['float'] * len(strongPeakKeys) strongPeakParams_ws = CreateEmptyTableWorkspace() for key, datatype in zip(strongPeakKeys, strongPeakDatatypes): strongPeakParams_ws.addColumn(datatype, key) # Either load the provided strong peaks file or set the flag to generate it as we go if strongPeaksParamsFile != "": if sys.version_info[0] == 3: strongPeakParams = pickle.load(open(strongPeaksParamsFile, 'rb'), encoding='latin1') else: strongPeakParams = pickle.load( open(strongPeaksParamsFile, 'rb')) generateStrongPeakParams = False # A strong peaks file was provided - we don't need to generate it on the fly so we can fit in order runNumbers = np.array(peaks_ws.column('RunNumber')) peaksToFit = np.where(runNumbers == sampleRun)[0] intensities = np.array(peaks_ws.column('Intens')) rows = np.array(peaks_ws.column('Row')) cols = np.array(peaks_ws.column('Col')) runNumbers = np.array(peaks_ws.column('RunNumber')) intensIDX = intensities < forceCutoff edgeIDX = np.logical_or.reduce( np.array([ rows < edgeCutoff, rows > numDetRows - edgeCutoff, cols < edgeCutoff, cols > numDetCols - edgeCutoff ])) needsForcedProfile = np.logical_or(intensIDX, edgeIDX) needsForcedProfileIDX = np.where(needsForcedProfile)[0] canFitProfileIDX = np.where(~needsForcedProfile)[0] numPeaksCanFit = len(canFitProfileIDX) # We can populate the strongPeakParams_ws now for row in strongPeakParams: strongPeakParams_ws.addRow(row) else: generateStrongPeakParams = True #Figure out which peaks to fit without forcing a profile and set those to be fit first intensities = np.array(peaks_ws.column('Intens')) rows = np.array(peaks_ws.column('Row')) cols = np.array(peaks_ws.column('Col')) runNumbers = np.array(peaks_ws.column('RunNumber')) intensIDX = intensities < forceCutoff edgeIDX = np.logical_or.reduce( np.array([ rows < edgeCutoff, rows > numDetRows - edgeCutoff, cols < edgeCutoff, cols > numDetCols - edgeCutoff ])) needsForcedProfile = np.logical_or(intensIDX, edgeIDX) needsForcedProfileIDX = np.where(needsForcedProfile)[0] canFitProfileIDX = np.where(~needsForcedProfile)[0] numPeaksCanFit = len(canFitProfileIDX) peaksToFit = np.append( canFitProfileIDX, needsForcedProfileIDX) #Will fit in this order peaksToFit = peaksToFit[runNumbers[peaksToFit] == sampleRun] #Initialize our strong peaks dictionary strongPeakParams = np.empty([numPeaksCanFit, 9]) if peakNumberToFit > -1: peaksToFit = [peakNumberToFit] # Create the parameters workspace keys = [ 'peakNumber', 'Alpha', 'Beta', 'R', 'T0', 'bgBVG', 'chiSq3d', 'chiSq', 'dQ', 'KConv', 'MuPH', 'MuTH', 'newQ', 'Scale', 'scale3d', 'SigP', 'SigX', 'SigY', 'Intens3d', 'SigInt3d' ] datatypes = ['float'] * len(keys) datatypes[np.where(np.array(keys) == 'newQ')[0][0]] = 'V3D' params_ws = CreateEmptyTableWorkspace() for key, datatype in zip(keys, datatypes): params_ws.addColumn(datatype, key) # And we're off! peaks_ws_out = peaks_ws.clone() np.warnings.filterwarnings( 'ignore' ) # There can be a lot of warnings for bad solutions that get rejected. progress = Progress(self, 0.0, 1.0, len(peaksToFit)) for fitNumber, peakNumber in enumerate( peaksToFit): #range(peaks_ws.getNumberPeaks()): peak = peaks_ws_out.getPeak(peakNumber) progress.report(' ') try: box = ICCFT.getBoxFracHKL(peak, peaks_ws, MDdata, UBMatrix, peakNumber, dQ, fracHKL=0.5, dQPixel=dQPixel, q_frame=q_frame) if ~needsForcedProfile[peakNumber]: strongPeakParamsToSend = None else: strongPeakParamsToSend = strongPeakParams # Will allow forced weak and edge peaks to be fit using a neighboring peak profile Y3D, goodIDX, pp_lambda, params = BVGFT.get3DPeak( peak, peaks_ws, box, padeCoefficients, qMask, nTheta=nTheta, nPhi=nPhi, plotResults=False, zBG=zBG, fracBoxToHistogram=1.0, bgPolyOrder=1, strongPeakParams=strongPeakParamsToSend, q_frame=q_frame, mindtBinWidth=mindtBinWidth, maxdtBinWidth=maxdtBinWidth, pplmin_frac=pplmin_frac, pplmax_frac=pplmax_frac, forceCutoff=forceCutoff, edgeCutoff=edgeCutoff, peakMaskSize=peakMaskSize, iccFitDict=iccFitDict) # First we get the peak intensity peakIDX = Y3D / Y3D.max() > fracStop intensity = np.sum(Y3D[peakIDX]) # Now the number of background counts under the peak assuming a constant bg across the box n_events = box.getNumEventsArray() convBox = 1.0 * np.ones([ neigh_length_m, neigh_length_m, neigh_length_m ]) / neigh_length_m**3 conv_n_events = convolve(n_events, convBox) bgIDX = np.logical_and.reduce( np.array([~goodIDX, qMask, conv_n_events > 0])) bgEvents = np.mean(n_events[bgIDX]) * np.sum(peakIDX) # Now we consider the variation of the fit. These are done as three independent fits. So we need to consider # the variance within our fit sig^2 = sum(N*(yFit-yData)) / sum(N) and scale by the number of parameters that go into # the fit. In total: 10 (removing scale variables) w_events = n_events.copy() w_events[w_events == 0] = 1 varFit = np.average((n_events[peakIDX] - Y3D[peakIDX]) * (n_events[peakIDX] - Y3D[peakIDX]), weights=(w_events[peakIDX])) sigma = np.sqrt(intensity + bgEvents + varFit) compStr = 'peak {:d}; original: {:4.2f} +- {:4.2f}; new: {:4.2f} +- {:4.2f}'.format( peakNumber, peak.getIntensity(), peak.getSigmaIntensity(), intensity, sigma) logger.information(compStr) # Save the results params['peakNumber'] = peakNumber params['Intens3d'] = intensity params['SigInt3d'] = sigma params['newQ'] = V3D(params['newQ'][0], params['newQ'][1], params['newQ'][2]) params_ws.addRow(params) peak.setIntensity(intensity) peak.setSigmaIntensity(sigma) if generateStrongPeakParams and ~needsForcedProfile[peakNumber]: qPeak = peak.getQLabFrame() strongPeakParams[fitNumber, 0] = np.arctan2(qPeak[1], qPeak[0]) # phi strongPeakParams[fitNumber, 1] = np.arctan2( qPeak[2], np.hypot(qPeak[0], qPeak[1])) #2theta strongPeakParams[fitNumber, 2] = params['scale3d'] strongPeakParams[fitNumber, 3] = params['MuTH'] strongPeakParams[fitNumber, 4] = params['MuPH'] strongPeakParams[fitNumber, 5] = params['SigX'] strongPeakParams[fitNumber, 6] = params['SigY'] strongPeakParams[fitNumber, 7] = params['SigP'] strongPeakParams[fitNumber, 8] = peakNumber strongPeakParams_ws.addRow(strongPeakParams[fitNumber]) except KeyboardInterrupt: np.warnings.filterwarnings('default') # Re-enable on exit raise except: #raise peak.setIntensity(0.0) peak.setSigmaIntensity(1.0) # Cleanup for wsName in mtd.getObjectNames(): if 'fit_' in wsName or 'bvgWS' in wsName or 'tofWS' in wsName or 'scaleWS' in wsName: mtd.remove(wsName) np.warnings.filterwarnings('default') # Re-enable on exit # Set the output self.setProperty('OutputPeaksWorkspace', peaks_ws_out) self.setProperty('OutputParamsWorkspace', params_ws)