def pseudovoigtfit(xdata, ydata, x0=None):
    [m, b, CoD, yD, err, mErr,
     bErr] = dm.linReg(np.concatenate((xdata[0:5], xdata[-5:])),
                       np.concatenate((ydata[0:5], ydata[-5:])))
    [yMax, i] = bf.max(ydata - (m * xdata + b))
    if x0 is None:
        yLeft = m * xdata[0] + b
        yRight = m * xdata[-1] + b
        ySum = np.sum(ydata * np.abs(np.gradient(xdata))) - (0.5 * np.abs(
            (xdata[-1] - xdata[0]) * (yRight - yLeft)) + np.abs(
                bf.min(yLeft, yRight)[0] *
                (xdata[-1] - xdata[0])))  # estimated area of peak
        xMax = xdata[i]  # find belonging x value
        sigma = ySum / (yMax * np.pi)  # estimate width of peak
        x0 = [ySum, sigma, xMax, m, b, 0.5]  # start values Pseudo-Voigt
    x_par = op.leastsq(pseudovoigtopt, x0, (xdata, ydata))
    x = x_par[0]
    yFit = pseudovoigtfunction(x, xdata)
    # check quality of peak
    meanNoise = (yLeft + yRight) / 2
    peakQuality = (yMax + meanNoise) / (meanNoise + 3 * meanNoise**0.5)
    err = 0
    # adapt dimensions of x if x0 has different dimensions
    if bf.size(x0) != bf.size(x):
        x = np.transpose(x)
    return x, x0, yMax, yFit, peakQuality, err, meanNoise
def pearson7fit(xdata, ydata, x0=None):
    [m, b, CoD, yD, err, mErr,
     bErr] = dm.linReg(np.concatenate((xdata[0:5], xdata[-5:])),
                       np.concatenate((ydata[0:5], ydata[-5:])))
    [yMax, i] = bf.max(ydata - (m * xdata + b))
    if x0 is None:
        yLeft = m * xdata[1] + b
        yRight = m * xdata[-1] + b
        ySum = np.sum(ydata * np.abs(np.gradient(xdata))) - (0.5 * np.abs(
            (xdata[-1] - xdata[0]) * (yRight - yLeft)) + np.abs(
                bf.min(yLeft, yRight)[0] *
                (xdata[-1] - xdata[0])))  # estimated area of peak
        xMax = xdata[i]  # find belonging x value
        sigma = ySum / (yMax * (2 * np.pi)**0.5)  # estimate width of peak
        x0 = [ySum, sigma, xMax, m, b, 2.0]  # start values Pearson7
    else:
        x0 = [x0[0], x0[1], x0[2], x0[3], x0[4], 1]  # start values Pearson7
    x_par = op.leastsq(pearson7opt, x0, (xdata, ydata))
    x = x_par[0]
    yFit = pearson7function(x, xdata)
    # adapt x to fit peak description
    x = [x[0], x[1] / (2 * x[5] - 3)**0.5, x[2], x[3], x[4], x[5]]
    # check quality of peak
    meanNoise = (yLeft + yRight) / 2
    peakQuality = (yMax + meanNoise) / (meanNoise + 3 * meanNoise**0.5)
    err = 0
    # adapt dimensions of x if x0 has different dimensions
    if bf.size(x0) != bf.size(x):
        x = np.transpose(x)
    return x, x0, yMax, yFit, peakQuality, err, meanNoise
def evalPeakData(settings, peakData, n=1):
    # check values of settings -> error handling, default values
    handle = -1  # handle of figure
    # IO_Korrektur Z. 25
    # Fourierfilter Z. 29
    # evaluate each peak
    analysisData = np.zeros((bf.size(settings["peakZones"], 0), 11))
    for i in range(bf.size(settings["peakZones"], 0)):
        x = np.array(peakData[:, 0])  # channels, energies or angles
        y = np.array(peakData[:, 1])  # intensities
        #determine start and end value in actual data
        if bf.length(settings["peakZones"]) == 1 and bf.size(
                settings["peakZones"], 0) == bf.size(settings["peakZones"], 1):
            startI = 0
            stopI = len(peakData[:, 1]) - 1  # all data
        else:
            startI = np.argwhere(
                x >= settings["peakZones"][i, 0])[0]  # start value
            startI = startI[0]
            stopI = np.argwhere(
                x <= settings["peakZones"][i, 1])[-1]  # end value
            stopI = stopI[0]
        xpart = x[startI:stopI + 1]
        ypart = y[startI:stopI + 1]
        #fit current peak
        filename = 'Daten' + str(n) + '.xls'
        if settings["peakFunction"] == 'Gauss':
            func = gaussfit
        elif settings["peakFunction"] == 'Pearson7':
            func = pearson7fit
        elif settings["peakFunction"] == 'Lorentz':
            func = lorentzfit
        elif settings["peakFunction"] == 'PseudoVoigt':
            func = pseudovoigtfit
        if settings["bestWindow"] == 1:
            [par, startI, stopI] = dc.bestwindowfit(func, x, y, startI, stopI)
            xpart = x[startI:stopI + 1]  # new relevant x values
            ypart = y[startI:stopI + 1]  # new relevant y values
            [par, par0, yMax, yFit, peakQuality, err,
             meanNoise] = func(xpart, ypart, par)
        else:
            [par, par0, yMax, yFit, peakQuality, err,
             meanNoise] = func(xpart, ypart)
        analysisData[i, :] = np.concatenate(
            ([xpart[0], xpart[-1]], par[0:3], [yMax, peakQuality,
                                               err], par[3:6]))
        if settings["saveText"] == 1 or settings["saveExcel"] == 1:
            #save results in Excel
            headerStr = "Peak-Start\tPeak-End\tPeakarea-A\tPeakwidth-s\tPeakpos-xc\tPeakintMax\tPeakquality\tDeltaVal\tBackSlope-m\tBackVal-b\tpar6"
            fg.excelsave(filename, headerStr, [analysisData[i, :]])
        #show results
        if settings["withGraphic"] == 1:
            handle = ps.picture(bf.size(settings["peakZones"], 0), yFit, xpart,
                                ypart, n, i)
    return handle, analysisData
def linReg(x, y, fixParName='', fixPar=0):  # scipy.stats.linregress(x, y=None)
    n = len(x)
    x = np.array(x)
    y = np.array(y)
    # check dimensions of values
    if bf.size(x, 0) != bf.size(y, 0) or bf.size(x, 1) != bf.size(y, 1):
        # try to transpose data
        if bf.size(x, 0) == bf.size(np.transpose(y), 0) and bf.size(
                x, 1) == bf.size(np.transpose(y), 1):
            y = np.transpose(y)
    sumX = sum(x)
    sumY = sum(y)
    meanX = sumX / n
    meanY = sumY / n
    sumXY = sum(x * y)
    sumXX = sum(x**2)
    if fixParName == "b":
        b = fixPar
        m = (sumXY - b * sumX) / sumXX
    elif fixParName == "m":
        m = fixPar
        b = meanY - m * meanX
    else:
        m = (n * sumXY - sumX * sumY) / (n * sumXX - sumX**2)
        b = meanY - m * meanX
        #b = (sumXX * sumY - sumX * sumXY) / (n * sumXX - sumX**2)
        #m = (sumXY - b * sumX) / sumXX
    yD = m * x + b
    CoD = 1 - sum((y - yD)**2) / sum(
        (y - meanY)**
        2)  # RuntimeWarning: invalid value encountered in double_scalars
    err = sum((y - yD)**2) / (n * (n - 1))  # variance
    mErr = err / sum((x - meanX)**2)
    bErr = err / n * sum(x**2) / sum((x - meanX)**2)
    return m, b, CoD, yD, err, mErr, bErr
Esempio n. 5
0
def writeList(file, listData, delim='\t'):
    # write list data to file
    fid = open(file, 'w')
    for i in range(bf.size(listData, 0)):
        if bf.size(listData[i], 0) > 1:
            fid.write(str(listData[i][0]))
            for j in range(1, bf.size(listData[i], 0)):
                fid.write(delim + str(listData[i][j]))
        elif bf.size(listData[i], 0) == 1:
            fid.write(str(listData[i]))
        else:
            fid.write('')
        writeLine(fid, '')
    fid.close()
def plotStresses(data, showErr=True):
	hklList = data['hklList']
	tauMean = data['tauMean']
	stresses = bf.getDictValOrDef(data, 'stresses')
	accuracy = bf.getDictValOrDef(data, 'accuracy')
	stressNames = ['s11-s33', 's22-s33', 's13', 's23', 's12', 's33']
	if stresses is not None and accuracy is not None:
		for i in range(bf.size(stresses, 1)):
			curStresses = np.round(stresses[:, i])
			if sum(curStresses) != 0 or max(curStresses) != 0 or min(curStresses) != 0:
				if showErr:
					# pg.plotErrData(curStresses, np.round(accuracy[:, i]), tauMean, 'ro-', 'on', 'Information depths in um',
					# 	'Residual stresses in MPa', 'Residual stresses ' + stressNames[i], 'k')
					pg.plotErrData(curStresses, np.round(accuracy[:, i]), tauMean, 'ro-', 'on',
						'Information depths in um', 'Residual stresses in MPa',
						'Residual stresses ' + stressNames[i])
				else:
					pg.plotData(curStresses, tauMean, 'ro-', 'on', 'Information depths in um',
						'Residual stresses in MPa', 'Residual stresses ' + stressNames[i])
	else:
		for stressName in stressNames:
			stressVals = bf.getDictValOrDef(data, stressName)
			accuracyVals = bf.getDictValOrDef(data, 'dev_' + stressName)
			if stressVals is not None and accuracyVals is not None:
				if sum(stressVals) != 0 or max(stressVals) != 0 or min(stressVals) != 0:
					if showErr:
						# pg.plotErrData(np.round(stressVals), np.round(accuracyVals), tauMean, 'ro-', 'on', 'Information depths in um',
						# 	'Residual stresses in MPa', 'Residual stresses ' + stressName, 'k')
						pg.plotErrData(np.round(stressVals), np.round(accuracyVals), tauMean,
							'ro-', 'on', 'Information depths in um', 'Residual stresses in MPa',
							'Residual stresses ' + stressName)
					else:
						pg.plotData(np.round(stressVals), tauMean, 'rp-', 'on', 'Information depths in um',
							'Residual stresses in MPa', 'Residual stresses ' + stressName)
def pearson7function(par, x):
    if bf.size(par, 1) < 6:
        par = np.transpose(par)
    f = lambda para, x: para[0] * (1 + (
        (x - para[2]) / para[1])**2)**(-para[5]) / (para[1] * spe.beta(
            para[5] - 1 / 2, 1 / 2)) + (para[3] * x + para[4])
    return f(par, x)
def gaussfunction(par, x):
    if bf.size(par, 1) < 6:
        par = np.transpose(par)
    f = lambda para, x: (para[0] / (para[1] * (
        (2 * math.pi)**0.5))) * np.exp(-0.5 * (
            ((x - para[2])**2) /
            (para[1]**2))) + (para[3] * x + para[4])  # gauss function
    return f(par, x)
def lorentzfunction(par, x):
    if bf.size(par, 1) < 6:
        par = np.transpose(par)
    # lorentz function
    f = lambda para, x: (para[0] * para[1] /
                         (np.pi * (para[1]**2 +
                                   (x - para[2])**2)) + para[3] * x + para[4])
    return f(par, x)
def calcMue(energy, material):
    # energy in keV
    # mue in 1/um
    mue = np.zeros(bf.size(energy))
    if bf.size(material, 0) > 1:
        # matrix of absorption data
        mue = bf.ones(bf.size(energy)) * np.nan
        for i in range(bf.size(material, 0)):
            mue[energy >= material[i, 0]
                & energy < material[i, 1]] = material[i, 2] * energy[
                    energy >= material[i, 0]
                    & energy < material[i, 1]]**material[i, 3] / 10000
    else:
        # name of material
        if material == 'Fe':
            #mue = 0.0006 .* (energy ./ 1000).^-2.736 .* 7.89 / 10000; % in um
            mue[energy < 7.11138755] = 77083 * energy[
                energy < 7.11138755]**-2.66 / 10000  # in um
            mue[energy >= 7.11138755] = 739152 * energy[
                energy >= 7.11138755]**-2.755 / 10000  # in um
        elif material == 'Ni':
            mue[energy < 8.3328] = 95880 * energy[
                energy < 8.3328]**-2.556 / 10000  # in um
            mue[energy >= 8.3328] = 929809 * energy[
                energy >= 8.3328]**-2.708 / 10000  # in um
        else:
            mue = 10**6
    if bf.size(mue, 0) < bf.size(mue, 1):
        mue = np.transpose(mue)
    return mue
def pseudovoigtfunction(par, x):
    if bf.size(par, 1) < 6:
        par = np.transpose(par)
    # lorentz function
    fL = lambda para, x: (para[0] * para[1] / (math.pi * (para[1]**2 +
                                                          (x - para[2])**2)))
    # gauss function
    fG = lambda para, x: (para[0] / (para[1] * (
        (2 * math.pi)**0.5))) * np.exp(-0.5 * (((x - para[2])**2) /
                                               (para[1]**2)))
    f = lambda para, x: para[5] * fL(para, x) + (1 - para[5]) * fG(para, x) + (
        para[3] * x + para[4])
    return f(par, x)
def evalPeakFiles(settings, fileNames, saveFile=""):
    # check values of settings -> error handling
    #measureData = zeros((length(fileNames),8192,2))
    measureDataCell = []  #cell(len(fileNames),1)
    analysisData = np.zeros((len(fileNames), bf.size(settings["peakZones"],
                                                     0), 11))
    headers = []  #cell(len(fileNames),1)
    for i in range(len(fileNames)):
        # get current file
        curFileName = fileNames[i]
        [h, curAnalysisData, curMeasureData,
         curHead] = evalPeakFile(settings, curFileName, i)
        measureDataCell.append(curMeasureData)
        analysisData[i, :, :] = curAnalysisData
        headers.append(curHead)
        '''# wait one second and then close current figure with peak fits
		if settings["withGraphic"] == "1" and len(fileNames) > 1:
			plt.pause(1)
			plt.close(h)
		'''
    #transform measure data
    measureData = []
    #measureData = np.zeros((len(measureDataCell),bf.size(measureDataCell[0])))
    #	for i = 1:length(measureDataCell)
    #		measureData(i,:,:) = measureDataCell{i}
    #save results to files
    if bf.isempty(saveFile):
        saveFile = fg.requestSaveFile((("All files", "*.*"), ), 'Save as...')
        if bf.isempty(saveFile):
            return
    resFiles = []
    for i in range(len(fileNames)):
        sourceFile = 'Daten' + str(i) + '.xls'
        if fg.existsFile(sourceFile):
            pathSrc, nameSrc = fg.fileparts(fileNames[i])
            pathDest, nameDest = fg.fileparts(saveFile)
            if settings["saveExcel"] == 1:
                resFiles.append(nameDest + nameSrc)
            if settings["saveText"] == 1:
                resFiles.append(nameDest + nameSrc.replace(".xls", ".txt"))
            if len(pathDest) > 0 and pathDest != "/":
                resFiles[i] = pathDest + '/' + resFiles[i]
            util.copyfile(sourceFile, resFiles[i])
            os.remove(sourceFile)
    return analysisData, measureData, headers, resFiles
def plotUniversalPlot(data, showErr=True):
	symbols = ['s', '^', 'p', 'd', 'v', 'o', '+', 'x', '*', 'h', '<', '>', '.']
	colors = ['r', 'g', 'b', 'c', 'm', 'y', 'r', 'g', 'b', 'c', 'm']
	# extract relevant data
	tauVals = data['tauVals']
	psiVals = data['psiVals']
	stresses = bf.getDictValOrDef(data, 'stresses')
	accuracy = bf.getDictValOrDef(data, 'accuracy')
	stressNames = ['s11-s33', 's22-s33', 's13', 's23']
	if stresses is not None and accuracy is not None:
		for i in range(bf.size(stresses, 1)):
			curStresses = np.round(stresses[:, i])
			if sum(curStresses) != 0 or max(curStresses) != 0 or min(curStresses) != 0:
				if showErr:
					# pg.plotErrData(curStresses, np.round(accuracy[:, i]), tauVals, 'rp', 'on', 'Information depths in um',
					# 	'Residual stresses in MPa', 'Residual stresses ' + stressNames[i], 'k')
					pg.plotErrData(curStresses, np.round(accuracy[:, i]), tauVals, 'rp', 'on', 'Information depths in um',
						'Residual stresses in MPa', 'Residual stresses ' + stressNames[i])
				else:
					pg.plotData(curStresses, tauVals, 'rp', 'on', 'Information depths in um',
						'Residual stresses in MPa', 'Residual stresses ' + stressNames[i])
	else:
		for stressName in stressNames:
			stressVals = bf.getDictValOrDef(data, stressName)
			accuracyVals = bf.getDictValOrDef(data, 'dev_' + stressName)
			if stressVals is not None and accuracyVals is not None:
				if sum(stressVals) != 0 or max(stressVals) != 0 or min(stressVals) != 0:
					if showErr:
						# pg.plotErrData(np.round(stressVals), np.round(accuracyVals), tauVals, 'rp', 'on', 'Information depths in um',
						# 	'Residual stresses in MPa', 'Residual stresses ' + stressName, 'k')
						pg.plotErrData(np.round(stressVals), np.round(accuracyVals), tauVals, 'rp', 'on',
							'Information depths in um', 'Residual stresses in MPa',
							'Residual stresses ' + stressName)
					else:
						pg.plotData(np.round(stressVals), tauVals, 'rp', 'on', 'Information depths in um',
							'Residual stresses in MPa', 'Residual stresses ' + stressName)
def stressCurve(damping, n, tauVals, stresses, zVals=None):
    if zVals is None:
        zVals = tauVals
    if bf.size(stresses, 0) > 1 and bf.size(stresses, 1) > 1:
        if bf.size(stresses, 0) == 2 and bf.size(stresses,
                                                 1) == bf.length(tauVals):
            stressVals = stresses[0, :]
            weigthVals = stresses[1, :]
        elif bf.size(stresses, 0) == bf.length(tauVals) and bf.size(
                stresses, 1) == 2:
            stressVals = stresses[:, 0]
            weigthVals = stresses[:, 1]
        # stress accuracy values used to weigthen the stress values
        [tauValsFit,
         stressVals] = bc.createWeightedData(tauVals, stressVals, weigthVals)
    else:
        tauValsFit = tauVals
        stressVals = stresses
    if damping:
        stressDampedPolynomTauOpt = lambda par, x, y: stressDampedPolynomTau(
            par, x) - y
        res = op.leastsq(stressDampedPolynomTauOpt, bf.ones(n),
                         (tauValsFit, stressVals))
        aVals = res[0]
        err = res[1]
        tauStresses = stressDampedPolynomTau(aVals, tauVals)
        zStresses = stressDampedPolynomReal(aVals, zVals)
    else:
        aVals = np.polyfit(tauValsFit, stressVals, n)
        #err = res.normr
        tauStresses = np.polyval(aVals, tauVals)
        bVals = aVals
        for i in range(len(aVals), 0, -1):
            bVals[i - 1] = bVals[i - 1] / math.factorial(i - 1)
        zStresses = np.polyval(bVals, zVals)
    return tauStresses, zStresses, aVals  #, err
def createWeightedData(x, y, a):
    # check dimensions of values
    if bf.size(x, 0) != bf.size(y, 0) or bf.size(x, 1) != bf.size(y, 1):
        if bf.size(x, 0) == bf.size(a, 0) and bf.size(x, 1) == bf.size(a, 1):
            # try to transpose data
            if bf.size(x, 0) == bf.size(np.transpose(y), 0) and bf.size(
                    x, 1) == bf.size(np.transpose(y), 1):
                y = np.transpose(y)
        elif bf.size(y, 0) == bf.size(a, 0) and bf.size(y, 1) == bf.size(a, 1):
            # try to transpose data
            if bf.size(x, 0) == bf.size(np.transpose(y), 0) and bf.size(
                    x, 1) == bf.size(np.transpose(y), 1):
                y = np.transpose(y)
                a = np.transpose(a)
    # check validity of weigths
    a[a < 0] = 1
    # create new data set according to weights
    minA = np.min(a)
    weights = np.ceil(a / minA)
    n = int(np.sum(weights))
    xNew = bf.ones(n)
    yNew = bf.ones(n)
    pos = 0
    for i in range(len(weights)):
        xNew[pos:pos + int(weights[i])] = x[i]
        yNew[pos:pos + int(weights[i])] = y[i]
        pos = pos + int(weights[i])
    return xNew, yNew
def multiUniversalPlotAnalysis(data,
                               maxPsi=None,
                               minDistPsiStar=0.15,
                               minValPsiNormal=0.08,
                               minValPsiShear=0.8):
    keyList = bf.getKeyList(data)
    peakCount = int(
        bf.replace(keyList[-1].split('_')[0],
                   'pv')) + 1  # must be adapted in further versions!!!
    tthVal = data['tth']
    phiVals = data['phi']
    psiVals = data['psi']
    psiUni = np.unique(psiVals)
    psiUni = psiUni[psiUni != 0]  # no zero value
    psiSign = np.sign(psiUni[-1])  # sign of last psi value
    psiUni = psiUni[np.sign(psiUni) ==
                    psiSign]  # only negative or positive values
    sinpsi2Uni = bc.sind(psiUni)**2
    sin2psiUni = bc.sind(np.abs(2 * psiUni))
    tauRes = np.zeros((peakCount, len(psiUni)))
    hklRes = np.zeros((peakCount, len(psiUni)))
    psiRes = np.zeros((peakCount, len(psiUni)))
    stresses = np.zeros((peakCount, len(psiUni), 4))
    errVals = np.zeros((peakCount, len(psiUni), 4))
    tauS33 = np.zeros(peakCount)
    aStarVals = np.zeros(peakCount)
    aStarErrVals = np.zeros(peakCount)
    s33 = np.zeros(peakCount)
    dev_s33 = np.zeros(peakCount)
    hklList = np.zeros(peakCount)
    phi4 = len(np.unique(phiVals)) == 4
    validCounter = 0
    for p in range(peakCount):  # for all peaks create one plot
        centerVals = data['pv' + str(p) + '_center']
        centerErrVals = data['pv' + str(p) + '_center_err']
        dMinVals = conv.energies2latticeDists(centerVals - centerErrVals,
                                              tthVal)
        dMaxVals = conv.energies2latticeDists(centerVals + centerErrVals,
                                              tthVal)
        dErrVals = np.abs(dMaxVals - dMinVals) / 2
        tauVals = data['pv' + str(p) + '_depth']
        dVals = data['pv' + str(p) + '_dspac'] / 10  # in nm
        #hklVals = data['pv' + str(p) + '_hklList']  # first version with adaptions
        #s1Vals = data['pv' + str(p) + '_s1List']  # first version with adaptions
        #hs2Vals = data['pv' + str(p) + '_s2List']  # first version with adaptions
        #hklVal = hklVals[0]
        hVals = data['pv' + str(p) + '_h']  # second version
        kVals = data['pv' + str(p) + '_k']  # second version
        lVals = data['pv' + str(p) + '_l']  # second version
        s1Vals = data['pv' + str(p) + '_s1']  # second version
        #hs2Vals = data['pv' + str(p) + '_s2']  # second version
        hs2Vals = data['pv' + str(p) + '_hs2']  # third version
        hklVal = conv.mergeHkl(hVals[0], kVals[0], lVals[0])
        hklList[p] = hklVal
        hklRes[p] = hklVal * np.ones(len(psiUni))
        psiRes[p] = psiUni
        s1Val = s1Vals[0]
        #hs2Val = hs2Vals[0] * 0.5  # test valid for first and second version!!!!!
        hs2Val = hs2Vals[0]
        curData = {
            'tauVals': tauVals,
            'dVals': dVals,
            'dErrVals': dErrVals,
            'psiVals': psiVals,
            'phiVals': phiVals,
            'psiUni': psiUni,
            'sin2psiUni': sin2psiUni,
            'sinpsi2Uni': sinpsi2Uni,
            'phi4': phi4,
            'hklVal': hklVal,
            's1Val': s1Val,
            'hs2Val': hs2Val
        }
        bf.extendDictionary(curData, data, ('a0Val', ))
        # perform universal plot analysis for current peak data
        curResData = universalPlotAnalysis(curData, maxPsi, minDistPsiStar,
                                           minValPsiNormal, minValPsiShear)
        # remember results
        tauRes[p] = curResData['tauRes']
        stresses[p] = curResData['stresses']
        errVals[p] = curResData['errVals']
        aStarVals[p] = curResData['dStar100']
        aStarErrVals[p] = curResData['dStar100Err']
        tauS33[p] = curResData['tauS33']
        s33[p] = curResData['s33']
        dev_s33[p] = curResData['dev_s33']
        validCounter += curResData['validCounter']
    # reshape data
    tauRes = np.reshape(tauRes, np.prod(bf.size(tauRes)))
    hklRes = np.reshape(hklRes, np.prod(bf.size(hklRes)))
    psiRes = np.reshape(psiRes, np.prod(bf.size(psiRes)))
    stresses = np.reshape(
        stresses,
        (bf.size(stresses, 0) * bf.size(stresses, 1), bf.size(stresses, 2)))
    errVals = np.reshape(
        errVals,
        (bf.size(errVals, 0) * bf.size(errVals, 1), bf.size(errVals, 2)))
    # remove values with tau = 0
    hklRes = hklRes[tauRes > 0]
    psiRes = psiRes[tauRes > 0]
    stresses = stresses[tauRes > 0]
    errVals = errVals[tauRes > 0]
    tauRes = tauRes[tauRes > 0]
    # sort data concerning increasing information depth
    hklRes = hklRes[np.argsort(tauRes)]
    psiRes = psiRes[np.argsort(tauRes)]
    stresses = stresses[np.argsort(tauRes)]
    errVals = errVals[np.argsort(tauRes)]
    tauRes = tauRes[np.argsort(tauRes)]
    resData = {
        'tauVals': tauRes,
        'stresses': stresses,
        'accuracy': errVals,
        'hklVals': hklRes,
        'psiVals': psiRes,
        'validCount': validCounter
    }
    resDataS33 = {
        'tauMean': tauS33,
        'dStar100': aStarVals,
        'dStar100Err': aStarErrVals,
        's33': s33,
        'dev_s33': dev_s33,
        'hklList': hklList
    }
    return resData, resDataS33
def latticeSpacings(spacings, angles, h, k, l):
    # check if single values are arrays or not
    if bf.length(spacings) == 1:
        if len(bf.size(spacings)) > 0:
            spacing = spacings[0]
        else:
            spacing = spacings
    if bf.length(angles) == 1:
        if len(bf.size(angles)) > 0:
            angle = angles[0]
        else:
            angle = angles
    # determine lattice spacings
    if bf.length(spacings) == 1 and bf.length(angles) == 1 and angle == 90:
        # cubic
        dVals = conv.aVals2latticeDists(spacing, h, k, l)
    elif bf.length(spacings) == 1 and bf.length(angles) == 1 and angle != 90:
        # rhomboedric
        dVals = (spacing**2 *
                 (1 - 3 * bc.cosd(angle)**2 + 2 * bc.cosd(angle)**3) /
                 ((h**2 + k**2 + l**2) * bc.sind(angle)**2 + 2 *
                  (h * k + k * l + h * l) *
                  (bc.cosd(angle)**2 - bc.cosd(angle))))**0.5
    elif bf.length(spacings) == 2 and bf.length(angles) == 1 and angle == 90:
        # tetragonal
        #dVals = spacings[0] / (h ** 2 + k ** 2 + l ** 2 * (spacings[0] ** 2 / spacings[1] ** 2)) ** 0.5
        dVals = 1 / (
            (h**2 + k**2) / spacings[0]**2 + l**2 / spacings[1]**2)**0.5
    elif bf.length(spacings) == 2 and bf.length(
            angles) == 2 and angles[0] == 90 and angles[1] == 120:
        # hexagonal
        dVals = spacings[0] / (4 / 3 * (h**2 + h * k + k**2) +
                               l**2 * spacings[0]**2 / spacings[1]**2)**0.5
    elif bf.length(spacings) == 3 and bf.length(angles) == 1 and angle == 90:
        # orthorhombic
        dVals = 1 / ((h / spacings[0])**2 + (k / spacings[1])**2 +
                     (l / spacings[2])**2)**0.5
    elif bf.length(spacings) == 3 and bf.length(
            angles) == 2 and angles[0] == 90:  # and angles[1] != 90
        # monoklin
        dVals = 1 / (h**2 / (spacings[0]**2 * bc.sind(angles[1])**2) +
                     k**2 / spacings[1]**2 + l**2 /
                     (spacings[2]**2 * bc.sind(angles[1])**2) -
                     2 * h * l * bc.cosd(angles[1]) /
                     (spacings[0] * spacings[2] * bc.sind(angles[1])**2))**0.5
    elif bf.length(spacings) == 3 and bf.length(angles) == 3:
        # triklin
        v = spacings[0] * spacings[1] * spacings[2] * (
            1 - bc.cosd(angles[0])**2 - bc.cosd(angles[1])**2 -
            bc.cosd(angles[2])**2 + 2 * bc.cosd(angles[0]) *
            bc.cosd(angles[1]) * bc.cosd(angles[2]))**0.5
        s11 = spacings[1]**2 * spacings[2]**2 * bc.sind(angles[0])**2
        s22 = spacings[0]**2 * spacings[2]**2 * bc.sind(angles[1])**2
        s33 = spacings[0]**2 * spacings[1]**2 * bc.sind(angles[2])**2
        s12 = spacings[0] * spacings[1] * spacings[2]**2 * (
            bc.cosd(angles[0]) * bc.cosd(angles[1]) - bc.cosd(angles[2]))
        s23 = spacings[0]**2 * spacings[1] * spacings[2] * (
            bc.cosd(angles[1]) * bc.cosd(angles[2]) - bc.cosd(angles[0]))
        s13 = spacings[0] * spacings[1]**2 * spacings[2] * (
            bc.cosd(angles[2]) * bc.cosd(angles[0]) - bc.cosd(angles[1]))
        dVals = (v**2 /
                 (s11 * h**2 + s22 * k**2 + s33 * l**2 + 2 * s12 * h * k +
                  2 * s23 * k * l + 2 * s13 * h * l))**0.5
    return dVals
def createPeakMeasurementFile(settings, fileNamesAxes=None, fileNamesPeaks=None, resFile=None):
	# header of new file
	fileHead = ['LNr', 'dVal[nm]', 'dVar[nm]', 'Iint', 'Integralb', 'tth', 'phiP', 'psiP', 'etaP',
		'Ringstr', 'RT', 'DT', 'xdiff', 'ydiff', 'zdiff', 'motor1', 'motor2', 'motor3',
		'temp1', 'temp2', 'wavelength', 'deltatime']
	# select files
	if fileNamesAxes is None or len(fileNamesAxes) == 0:
		multiSel = 'on'
		fileNamesAxes = fg.requestFiles((('Text file', '*.txt'),), 'Achspositionsdateien auswaehlen', multiSel)
	if fileNamesPeaks is None or len(fileNamesPeaks) == 0:
		fileNamesPeaks = fg.requestFiles((('Text file', '*.txt'),), 'Auswertedateien auswaehlen', "on")
	if fileNamesAxes is not None and len(fileNamesAxes) != 0:
		if resFile is None or len(resFile) == 0:
			# specify name of result file
			if len(fileNamesAxes) == 1 and fileNamesAxes[0].find('_positions.txt') > 0:
				resFile = bf.replace(fileNamesAxes[0], '_positions.txt', '_finalData.txt')
			elif len(fileNamesAxes) == 1 and fileNamesAxes[0].find('_pos.txt') > 0:
				resFile = bf.replace(fileNamesAxes[0], '_pos.txt', '_finalData.txt')
			elif len(fileNamesAxes) == 1:
				resFile = bf.replace(fileNamesAxes[0], '.txt', '_finalData.txt')
			else:
				pathName, file = fg.fileparts(fileNamesAxes[0])
				resFile = pathName + '/finalData.txt'
			if settings['type'] == 'P61_0':
				resFile = resFile.replace('.txt', '0.txt')
			elif settings['type'] == 'H4_1' or settings['type'] == 'P61_1':
				resFile = resFile.replace('.txt', '1.txt')
			elif settings['type'] == 'H4_2':
				resFile = resFile.replace('.txt', '2.txt')
		axesFiles = len(fileNamesAxes)
		peakFiles = len(fileNamesPeaks)
		# read data from axes files
		dataAxes = []
		for i in range(axesFiles):
			dataAxes.append(fg.dlmread(fileNamesAxes[i], '\t', 1))
		dataAxes = np.array(dataAxes)
		# read data from peak files
		dataPeaks = []
		for i in range(peakFiles):
			dataPeaks.append(fg.dlmread(fileNamesPeaks[i], '\t', 1))
		dataPeaks = np.array(dataPeaks)
		if len(bf.size(dataPeaks[0])) == 1:
			peakCount = bf.min(bf.size(dataPeaks[0], 0), bf.size(dataPeaks[0], 1))[0]  # peakFiles
		else:
			peakCount = bf.size(dataPeaks[0], 0)
		peaks = np.setdiff1d(range(0, peakCount), settings['unused'] - 1)  # function needs zero based unused values
		# create new data set
		lineCount = peakFiles * len(peaks)
		data = np.zeros((lineCount, 22))
		curAxesFile = 0
		curAxesLine = 0
		peakNum = 1
		for i in peaks:
			for j in range(peakFiles):
				curLine = (peakNum - 1) * peakFiles + j
				data[curLine, 0] = peakNum # peak number
				# angle dispersive data measured at own laboratory devices (roh, xrdml and uxd file format)
				if settings['type'] == 'AD':
					if len(bf.size(dataPeaks)) < 3:
						data[curLine, 5] = dataPeaks[j, 4]  # ttheta
					else:
						data[curLine, 5] = dataPeaks[j][i, 4]  # ttheta
					data[curLine, 6] = dataAxes[curAxesFile][curAxesLine, 2]  # phi
					data[curLine, 7] = dataAxes[curAxesFile][curAxesLine, 1]  # psi
					data[curLine, 8] = 90  # eta
					data[curLine, 9] = 45  # current
					data[curLine, 10] = dataAxes[curAxesFile][curAxesLine, 7]  # real time
					data[curLine, 12] = dataAxes[curAxesFile][curAxesLine, 3]  # x axis
					data[curLine, 13] = dataAxes[curAxesFile][curAxesLine, 4]  # y axis
					data[curLine, 14] = dataAxes[curAxesFile][curAxesLine, 5]  # z axis
					data[curLine, 20] = settings['pars']  # wavelength
					data[curLine, 1] = conv.angles2latticeDists(data[curLine, 5], data[curLine, 20])  # peak value
					if len(bf.size(dataPeaks)) < 3:
						data[curLine, 2] = data[curLine, 1] - conv.angles2latticeDists(data[curLine, 5] +
							dataPeaks[j, 7], data[curLine, 20])  # peak deviation
						data[curLine, 3] = dataPeaks[j, 5]  # peak intensity
						data[curLine, 4] = (2 * np.pi) ** 0.5 * dataPeaks[j, 3]  # peak IB [°]
					else:
						data[curLine, 2] = data[curLine, 1] - conv.angles2latticeDists(data[curLine, 5] +
							dataPeaks[j][i, 7], data[curLine, 20])  # peak deviation
						data[curLine, 3] = dataPeaks[j][i, 5]  # peak intensity
						data[curLine, 4] = (2 * np.pi)**0.5 * dataPeaks[j][i, 3]  # peak IB [°]
				# Seifert measurement data
				elif len(settings['type']) > 6 and settings['type'][0:6] == 'Seifert':
					data[curLine, 5] = dataPeaks[j][i, 4]  # ttheta
					data[curLine, 6] = dataAxes[curAxesFile][curAxesLine, 4]  # phi
					if data[curLine, 6] != 0 or data[curLine, 6] != 90 or data[curLine, 6] != 180 or data[curLine, 6] != 270:
						data[curLine, 6] = 0
					data[curLine, 7] = dataAxes[curAxesFile][curAxesLine, 3]  # psi
					if settings['type'] == 'SeifertM':  # omega measurement
						data[curLine, 8] = 0  # eta
					else:
						data[curLine, 8] = 90  # eta
					data[curLine, 9] = 45  # current
					data[curLine, 10] = dataAxes[curAxesFile][curAxesLine, 14]  # real time
					data[curLine, 12] = dataAxes[curAxesFile][curAxesLine, 5]  # x axis
					data[curLine, 13] = dataAxes[curAxesFile][curAxesLine, 6]  # y axis
					data[curLine, 14] = dataAxes[curAxesFile][curAxesLine, 7]  # z axis
					data[curLine, 20] = settings['pars']  # wavelength
					data[curLine, 1] = conv.angles2latticeDists(data[curLine, 5], data[curLine, 20])  # peak value
					data[curLine, 2] = data[curLine, 1] - conv.angles2latticeDists(data[curLine, 5] +
						dataPeaks[j][i, 7], data[curLine, 20])  # peak deviation
					data[curLine, 3] = dataPeaks[j][i, 5]  # peak intensity
					data[curLine, 4] = (2 * np.pi) ** 0.5 * dataPeaks[j][i, 3]  # peak IB [°]
				elif settings['type'] == 'ED' or settings['type'][0:2] == 'H4' or settings['type'][0:3] == 'P61':
					if settings['type'][0:2] == 'H4':
						data[curLine, 5] = settings['tth']
						data[curLine, 6] = dataAxes[curAxesFile][curAxesLine, 5]  # phi
						data[curLine, 7] = dataAxes[curAxesFile][curAxesLine, 4]  # psi
						data[curLine, 8] = 90  # eta
						data[curLine, 9] = 45  # current
						# real time
						# dead time
						data[curLine, 12] = dataAxes[curAxesFile][curAxesLine, 6]  # x axis
						data[curLine, 13] = dataAxes[curAxesFile][curAxesLine, 7]  # y axis
						data[curLine, 14] = dataAxes[curAxesFile][curAxesLine, 8]  # z axis
					elif settings['type'][0:3] == 'P61':
						if settings['type'] == 'P61_0':
							data[curLine, 5] = dataAxes[curAxesFile][curAxesLine, 2]  # ttheta
							data[curLine, 7] = dataAxes[curAxesFile][curAxesLine, 4]  # psi
						elif settings['type'] == 'P61_1':
							data[curLine, 5] = dataAxes[curAxesFile][curAxesLine, 3]  # ttheta
							data[curLine, 7] = dataAxes[curAxesFile][curAxesLine, 5]  # psi
						data[curLine, 6] = dataAxes[curAxesFile][curAxesLine, 6]  # phi
						data[curLine, 8] = 90  # eta
						data[curLine, 9] = dataAxes[curAxesFile][curAxesLine, 10]  # petracurrent
						# real time
						# dead time
						data[curLine, 12] = dataAxes[curAxesFile][curAxesLine, 7]  # x
						data[curLine, 13] = dataAxes[curAxesFile][curAxesLine, 8]  # y
						data[curLine, 14] = dataAxes[curAxesFile][curAxesLine, 9]  # z
					elif settings['type'] == 'ED':
						data[curLine, 5] = dataAxes[curAxesFile][curAxesLine, 1] + dataAxes[curAxesFile][curAxesLine, 2]  # ttheta
						data[curLine, 6] = dataAxes[curAxesFile][curAxesLine, 4]  # phi
						data[curLine, 7] = dataAxes[curAxesFile][curAxesLine, 3]  # psi
						data[curLine, 8] = 90  # eta
						data[curLine, 9] = 45  # current
						data[curLine, 10] = dataAxes[curAxesFile][curAxesLine, 14]  # real time
						# dead time
						data[curLine, 12] = dataAxes[curAxesFile][curAxesLine, 5]  # x axis
						data[curLine, 13] = dataAxes[curAxesFile][curAxesLine, 6]  # y axis
						data[curLine, 14] = dataAxes[curAxesFile][curAxesLine, 7]  # z axis
					if len(settings['pars']) == 0:
						data[curLine, 1] = conv.energies2latticeDists(dataPeaks[j][i, 4], data[curLine, 5])  # peak value
						data[curLine, 2] = data[curLine, 1] - conv.energies2latticeDists(dataPeaks[j][i, 4] +
							dataPeaks[j][i, 7] / dataPeaks[j][i, 2], data[curLine, 5])  # peak deviation
						data[curLine, 4] = (2 * np.pi) ** 0.5 * dataPeaks[j][i, 3]  # peak IB [keV]
					else:
						data[curLine, 1] = conv.energies2latticeDists(conv.channels2energies(dataPeaks[j][i, 4],
							settings['pars']), data[curLine, 5])  # peak value
						data[curLine, 2] = data[curLine, 1] - conv.energies2latticeDists(conv.channels2energies(dataPeaks[j][i, 4]
							+ dataPeaks[j][i, 7] / dataPeaks[j][i, 2], settings['pars']), data[curLine, 5])  # peak deviation
						data[curLine, 4] = (2 * np.pi) ** 0.5 * dataPeaks[j][i, 3]  # peak IB [keV]
					data[curLine, 3] = dataPeaks[j][i, 5]  # peak intensity
					if settings['type'][0:2] == 'P61':
						data[curLine, 2] = 1 / data[curLine, 3]  # error weight as intensity
				# ED data in general
				elif settings['type'] == 'ED':
					data[curLine, 5] = dataAxes[curAxesFile][curAxesLine, 1] + dataAxes[curAxesFile][curAxesLine, 1]  # ttheta
					data[curLine, 6] = dataAxes[curAxesFile][curAxesLine, 4]  # phi
					data[curLine, 7] = dataAxes[curAxesFile][curAxesLine, 3]  # psi
					data[curLine, 8] = 90  # eta
					data[curLine, 9] = 45  # current
					data[curLine, 10] = dataAxes[curAxesFile][curAxesLine, 14]  # real time
					# dead time
					data[curLine, 12] = dataAxes[curAxesFile][curAxesLine, 5]  # x axis
					data[curLine, 13] = dataAxes[curAxesFile][curAxesLine, 6]  # y axis
					data[curLine, 14] = dataAxes[curAxesFile][curAxesLine, 7]  # z axis
					if settings['pars'] is None:
						data[curLine, 1] = conv.energies2latticeDists(dataPeaks[j][i, 4], data[curLine, 5])  # peak value
						data[curLine, 2] = data[curLine, 1] - conv.energies2latticeDists(dataPeaks[j][i, 4] +
							dataPeaks[j][i, 7], data[curLine, 5])  # peak deviation
						data[curLine, 4] = (2 * np.pi)**0.5 * dataPeaks[j][i, 3]  # peak IB [keV]
					else:
						data[curLine, 1] = conv.energies2latticeDists(conv.channels2energies(dataPeaks[j][i, 4], settings['pars']),
							data[curLine, 5])  # peak value
						data[curLine, 2] = dataPeaks[j][i, 4] - conv.energies2latticeDists(conv.channels2energies(dataPeaks[j][i, 4] +
							dataPeaks[j][i, 7], settings['pars']), data[curLine, 5])  # peak deviation
						data[curLine, 4] = (2 * np.pi)**0.5 * dataPeaks[j][i, 3]  # peak IB [keV]
					data[curLine, 3] = dataPeaks[j][i, 5]  # peak intensity
				# EDDI files
				elif settings['type'] == 'EDDI':
					data[curLine, 5] = dataAxes[curAxesFile][curAxesLine, 1]  # ttheta
					data[curLine, 6] = dataAxes[curAxesFile][curAxesLine, 2]  # phi
					data[curLine, 7] = dataAxes[curAxesFile][curAxesLine, 3]  # psi
					data[curLine, 8] = dataAxes[curAxesFile][curAxesLine, 4]  # eta
					data[curLine, 9] = dataAxes[curAxesFile][curAxesLine, 5]  # current
					data[curLine, 10] = dataAxes[curAxesFile][curAxesLine, 6]  # real time
					data[curLine, 11] = dataAxes[curAxesFile][curAxesLine, 7]  # dead time
					data[curLine, 12] = dataAxes[curAxesFile][curAxesLine, 8]  # x axis
					data[curLine, 13] = dataAxes[curAxesFile][curAxesLine, 9]  # y axis
					data[curLine, 14] = dataAxes[curAxesFile][curAxesLine, 10]  # z axis
					data[curLine, 15] = dataAxes[curAxesFile][curAxesLine, 11]  # motor 1
					data[curLine, 16] = dataAxes[curAxesFile][curAxesLine, 12]  # motor 2
					data[curLine, 17] = dataAxes[curAxesFile][curAxesLine, 13]  # motor 3
					data[curLine, 19] = dataAxes[curAxesFile][curAxesLine, 14]  # temp 1
					data[curLine, 19] = dataAxes[curAxesFile][curAxesLine, 15]  # temp 2
					data[curLine, 20] = dataAxes[curAxesFile][curAxesLine, 16]  # heatrate
					if settings['pars'] is None:
						data[curLine, 1] = conv.energies2latticeDists(dataPeaks[j][i, 4], data[curLine, 5])  # peak value
						data[curLine, 2] = data[curLine,1] - conv.energies2latticeDists(dataPeaks[j][i, 4] +
							dataPeaks[j][i, 7], data[curLine, 5])  # peak deviation
						data[curLine, 4] = (2 * np.pi)**0.5 * dataPeaks[j][i, 3]  # peak IB [keV]
					else:
						data[curLine, 1] = conv.energies2latticeDists(conv.channels2energies(dataPeaks[j][i, 4], settings['pars']),
							data[curLine, 5])  # peak value
						data[curLine, 2] = data[curLine, 1] - conv.energies2latticeDists(conv.channels2energies(dataPeaks[j][i, 4]
							+ dataPeaks[j][i, 7], settings['pars']), data[curLine, 5])  # peak deviation
						data[curLine, 4] = conv.channels2energies((2 * np.pi)**0.5 * dataPeaks[j][i, 3], settings['pars'])  # peak IB [keV]
					data[curLine, 3] = dataPeaks[j][i, 5]  # peak intensity
				# go to next measurement
				curAxesLine = curAxesLine + 1
				if bf.size(dataAxes[curAxesFile], 0) <= curAxesLine:
					curAxesFile = curAxesFile + 1
					if axesFiles <= curAxesFile:
						curAxesFile = 0
					curAxesLine = 0
			peakNum = peakNum + 1
		# write new file
		fid = open(resFile, 'w')
		for i in range(len(fileHead)):
			fid.write(('%s\t' % fileHead[i]))
		fg.writeLine(fid, '')
		formatStr = '%.0f\t%.8f\t%.8f\t%.2f\t%.6f\t%.4f\t%.4f\t%.4f\t%.4f\t%.1f\t%.1f\t%.2f\t%.3f\t%.3f\t'\
			+ '%.5f\t%.3f\t%.3f\t%.3f\t%.2f\t%.2f\t%.8f\t%.0f'
		for j in range(lineCount):
			fg.writeDataLine(fid, formatStr, data[j, :])
		fid.close()
		return data, resFile