def __init__(self, axis=None, angle=None, R=None): if isinstance(R, dnp.ndarray): if R.shape != (3, 3): raise Exception('R must be a 3 by 3 matrix') self.R = R angle = dnp.arccos(0.5*(sum(dnp.diag(R))-1)) self.angle = float(dnp.rad2deg(angle)) x = (R[2][1]-R[1][2])/(2*dnp.sin(angle)) y = (R[0][2]-R[2][0])/(2*dnp.sin(angle)) z = (R[1][0]-R[0][1])/(2*dnp.sin(angle)) axis = Vector(x,y,z) axis = axis.unit() self.axis = axis return if isinstance(angle, dnp.ndarray): angle = angle[0] elif isinstance(angle, int): angle = float(angle) if not isinstance(angle, float): raise Exception('angle must be a float not {}'.format(type(angle))) axis = axis.unit() self.axis = axis self.angle = angle angle = float(dnp.radians(angle)) M = dnp.array([[0.0, -axis.z, axis.y], [axis.z, 0.0, -axis.x], [-axis.y, axis.x, 0.0]]) N = axis.tensor_product(axis) I = dnp.identity(3) self.R = I*dnp.cos(angle) + M*dnp.sin(angle) + (1-dnp.cos(angle))*N angle = 56
def get_chi_steps(two_theta, starting_chi_value=100.0, final_chi_value=0.0): theta = dnp.radians(two_theta / 2.0) # converts to radians and halfs to theta radius = LENGTH * float(dnp.sin(theta)) delta_chi = float(dnp.rad2deg((WIDTH / radius))) number_of_steps = dnp.abs(final_chi_value-starting_chi_value)/(delta_chi*0.5) number_of_steps = int(number_of_steps)+1 return dnp.linspace(starting_chi_value, final_chi_value, number_of_steps)
def rawGetPosition(self): wlen = self.getWaveLenght() # twoTheta = self.twoThOff + 2.0*theta*self.twoThSign theta = self.twoThSign * (float(self.tth.getPosition()) - self.twoThOff) / 2.0 self.currentposition = float(4.0 * dnp.pi / wlen * dnp.sin(theta * dnp.pi / 180)) return self.currentposition
def dofourier(xarray, yarray, printres=False): nvals = len(yarray) avht = yarray.mean() angles = dnp.linspace(0, 2 * dnp.pi, len(yarray)) # print angles ycarray = yarray - avht cosines = dnp.cos(angles) sines = dnp.sin(angles) cosint = cosines * ycarray sinint = sines * ycarray if printres: print "Yarray:", yarray print "Ycarray:", ycarray print "Sinint:", sinint print "Cosint:", cosint cosum = cosint[:-1].sum() sinsum = sinint[:-1].sum() cosfactor = cosum / float(nvals - 1) sinfactor = sinsum / float(nvals - 1) if printres: print "sinfactor, cosfactor: %.4f %.4f" % (sinfactor, cosfactor) ratio = sinfactor / cosfactor delta = math.atan(ratio) deltadeg = math.degrees(delta) if printres: print "Delta degrees %06.4f" % deltadeg halfmag = math.sqrt(sinfactor ** 2 + cosfactor ** 2) mag = 2.0 * halfmag if printres: print "Magnitude: %06.4f" % mag print "vertical centre %7.4f" % avht calcresult = avht + mag * dnp.cos(angles - delta) if printres: print "Calcresult:", calcresult for idx in range(0, len(yarray)): print yarray[idx], calcresult[idx] return (avht, mag, delta, calcresult)
def dofourier(xarray,yarray,printres=False): nvals=len(yarray) avht=yarray.mean() angles=dnp.linspace(0,2*dnp.pi,len(yarray)) #print angles ycarray=yarray-avht cosines=dnp.cos(angles) sines=dnp.sin(angles) cosint=cosines*ycarray sinint=sines*ycarray if printres: print "Yarray:",yarray print "Ycarray:",ycarray print "Sinint:",sinint print "Cosint:",cosint cosum=cosint[:-1].sum() sinsum=sinint[:-1].sum() cosfactor=cosum/float(nvals-1) sinfactor=sinsum/float(nvals-1) if printres: print "sinfactor, cosfactor: %.4f %.4f"%(sinfactor,cosfactor) ratio=sinfactor/cosfactor delta=math.atan(ratio) deltadeg=math.degrees(delta) if printres: print"Delta degrees %06.4f"%deltadeg halfmag=math.sqrt(sinfactor**2+cosfactor**2) mag=2.0*halfmag if printres: print"Magnitude: %06.4f"%mag print "vertical centre %7.4f"%avht calcresult=avht+mag*dnp.cos(angles-delta) if printres: print"Calcresult:",calcresult for idx in range(0,len(yarray)): print yarray[idx],calcresult[idx] return(avht,mag,delta,calcresult)
def project(self, energy, UB, pixels, gamma, delta, omega, alpha, nu): # put the detector at the right position dx, dy, dz = pixels # convert angles to radians gamma, delta, alpha, omega, nu = numpy.radians( (gamma, delta, alpha, omega, nu)) RGam = numpy.matrix([[1, 0, 0], [0, cos(gamma), -sin(gamma)], [0, sin(gamma), cos(gamma)]]) RDel = (numpy.matrix([[cos(delta), -sin(delta), 0], [sin(delta), cos(delta), 0], [0, 0, 1]])).getI() RNu = numpy.matrix([[cos(nu), 0, sin(nu)], [0, 1, 0], [-sin(nu), 0, cos(nu)]]) # calculate Cartesian coordinates for each pixel using clever matrix stuff M = numpy.mat( numpy.concatenate( (dx.flatten(0), dy.flatten(0), dz.flatten(0))).reshape(3, dx.shape[0] * dx.shape[1])) XYZp = RGam * RDel * RNu * M xp = dnp.array(XYZp[0]).reshape(dx.shape) yp = dnp.array(XYZp[1]).reshape(dy.shape) zp = dnp.array(XYZp[2]).reshape(dz.shape) # don't bother with the part about slits... # Calculate effective gamma and delta for each pixel d_ds = dnp.sqrt(xp**2 + yp**2 + zp**2) Gam = dnp.arctan2(zp, yp) Del = -1 * dnp.arcsin(-xp / d_ds) # wavenumber k = 2 * math.pi / 12.398 * energy # Define the needed matrices. The notation follows the article by Bunk & # Nielsen. J.Appl.Cryst. (2004) 37, 216-222. M1 = k * numpy.matrix( cos(omega) * sin(Del) - sin(omega) * (cos(alpha) * (cos(Gam) * cos(Del) - 1) + sin(alpha) * sin(Gam) * cos(Del))) M2 = k * numpy.matrix( sin(omega) * sin(Del) + cos(omega) * (cos(alpha) * (cos(Gam) * cos(Del) - 1) + sin(alpha) * sin(Gam) * cos(Del))) M3 = k * numpy.matrix(-sin(alpha) * (cos(Gam) * cos(Del) - 1) + cos(alpha) * sin(Gam) * cos(Del)) # invert UB matrix UBi = numpy.matrix(UB).getI() # calculate HKL H = UBi[0, 0] * M1 + UBi[0, 1] * M2 + UBi[0, 2] * M3 K = UBi[1, 0] * M1 + UBi[1, 1] * M2 + UBi[1, 2] * M3 L = UBi[2, 0] * M1 + UBi[2, 1] * M2 + UBi[2, 2] * M3 return (H, K, L)
include('workspace://Utilities/dawnplotting.py') import scisoftpy as dnp t = dnp.arange(0.0, 2.0, 0.01) s = dnp.sin(2 * dnp.pi * t) dnp.plot.line(t, s) ps = dnp.plot.getPlottingSystem() ps.setTitle("EclipseCon France Demo")
def refl(runfiles, pathtofiles, outputpath, scalar, beamheight, footprint, angularfudgefactor, wl, back=()): # scalar - scale factor to divide the data by # beamheight FWHM in microns # footprint in mm # angular offset correction in degrees # wavelength in wl # back is an optional variable to subtract a background, set back=1 to do a background subtraction qq = [] RR = [] dR = [] ii = -1 for filename in runfiles: data = dnp.io.load(pathtofiles + "/" + str(filename) + ".dat") ii += 1 theta = data.alpha # work out the q vector qqtemp = 4 * dnp.pi * dnp.sin((theta + angularfudgefactor) * dnp.pi /180) / wl #qqtemp = data.qdcd # this section is to allow users to set limits on the q range used from each file if not 'qmin' + str(ii) in refl.__dict__: qmin = qqtemp.min() else: print "USER SET", qmin = refl.__getattribute__('qmin' + str(ii)) print 'refl.qmin' + str(ii) + " = " + str(qmin) + " ;", if not 'qmax' + str(ii) in refl.__dict__: qmax = qqtemp.max() else: print "USER SET", qmax = refl.__getattribute__('qmax' + str(ii)) print 'refl.qmax' + str(ii) + " = " + str(qmax) + " ;", roi1_sum = data.roi1_sum roi1_sum = roi1_sum[dnp.where((qqtemp >= qmin) & (qqtemp <= qmax))] roi1dr = dnp.sqrt(roi1_sum) theta = theta[dnp.where((qqtemp >= qmin) & (qqtemp <= qmax))] qqtemp = qqtemp[dnp.where((qqtemp >= qmin) & (qqtemp <= qmax))] bg_sum = dnp.zeros(len(roi1_sum)) bg_dr = dnp.zeros(len(roi1dr)) # if background ROI number given as int, convert to a single-item tuple if type(back) == int: back = (back,) # subtract any background ROIs from the data if len(back) > 0 and back[0] > 0: if ii==0: print "Using background from " + str(len(back)) + " ROIs: " + str(back) for bg in back: if ('roi' + str(bg) + '_sum' in data.keys()): bg_cur = data[data.keys().index('roi' +str(bg) + '_sum')] dr_cur = dnp.sqrt(bg_cur) (bg_sum, bg_dr) = ep.EPadd(bg_sum, bg_dr, bg_cur, dr_cur) (bg_sum, bg_dr) = ep.EPmulk(bg_sum, bg_dr, 1.0/len(back)) else: if ii==0: print "Not subtracting a background" (RRtemp, drtemp) = ep.EPsub(roi1_sum, roi1dr, bg_sum, bg_dr) # do a footprint correction. # assumes that the beam is gaussian in profile, with a FWHM of "beamheight". # footprint of sample is measured in mm. areamultiplier = 2*(norm.cdf(footprint * dnp.sin((theta + angularfudgefactor) / 180 * dnp.pi) / 2, 0, 1e-3 * beamheight/ (2*dnp.sqrt(2*dnp.log(2)))) - 0.5) RRtemp /= areamultiplier drtemp /= areamultiplier # for the 2nd, 3rd, 4th q ranges have to splice the data on the end of the preexisting data if(ii > 0): # splice (scalingfactor, sferror) = nsplice.getScalingInOverlap(qq, RR, dR, qqtemp, RRtemp, drtemp) RRtemp *= scalingfactor drtemp *= scalingfactor print "Error in scaling factor: %2.3f %%" % (sferror/scalingfactor*100) # now concatenate the data. qq = dnp.concatenate((qq, qqtemp)) RR = dnp.concatenate((RR, RRtemp)) dR = dnp.concatenate((dR, drtemp)) # end of per-file loop RR /= scalar dR /= scalar RR = RR[np.argsort(qq)] dR = dR[np.argsort(qq)] qq = np.sort(qq) # write out the data. np.savetxt(outputpath+"/"+str(runfiles[0])+"_refl.dat",dnp.concatenate((qq,RR,dR)).reshape(3,qq.shape[0]).transpose())
def rerefl(runfiles): global pathtofiles, savedbean, firstfiledata qq = [] RR = [] dR = [] ii = -1 for filename in runfiles: roi1sum = [] roi1dr = [] bg_sum = [] bg_dr = [] data = dnp.io.load(pathtofiles + "/" + str(filename) + ".dat") ii += 1 if ii == 0: firstfiledata = data # define theta theta = data.alpha # define q qqtemp = 4 * dnp.pi * dnp.sin((theta + angularfudgefactor) * dnp.pi /180) / wl #qqtemp = data.qdcd qmin = qqtemp.min() qmax = qqtemp.max() # plot first image of first file if ii == 0: global image image = dnp.io.load(replace_path(data['file'][imgdisp]), warn=False) dnp.plot.image(image[0], name='Plot 1', resetaxes=False) # find ROIs from saved bean try: rois = dnp.plot.getrois(savedbean) norois = len(rois) # if we don't have any ROIs yet, ask the user to draw some except(KeyError, TypeError, NameError): if ii==0: print "\nPlease define some regions of interest then type getrois()" print "You must type getrois() after adding/changing any regions due to a bug in DAWN." norois = 0 # this section to be restored when ROIs are working again ## find ROIs from plot window #bean = dnp.plot.getbean('Plot 1') #try: # rois = dnp.plot.getrois(bean) # norois = len(rois) ## if we don't have any ROIs yet, ask the user to draw some #except(KeyError, TypeError): # if ii==0: # print "Please define some regions of interest" # norois = 0 if norois > 0: if ii == 0: print str(norois) + " ROIs defined, " + str(norois-1) + " will be used for the background" for imgfile in data['file']: imgdata = dnp.io.load(replace_path(imgfile), warn=False) dnp.plot.image(imgdata[0], name="Plot 1", resetaxes=False) image = imgdata[0].transpose() # Pilatus images load with axes transposed for some reason bg_pt = 0 bgdr_pt = 0 for j in range(0,norois): roi = image[int(rois[j].spt[0]):int(rois[j].spt[0]+rois[j].len[0]), int(rois[j].spt[1]):int(rois[j].spt[1]+rois[j].len[1])] roisum_pt = dnp.float(roi.sum()) if j == 0: roi1sum.append(roisum_pt) roi1dr.append(dnp.sqrt(roisum_pt)) else: (bg_pt, bgdr_pt) = ep.EPadd(bg_pt, bgdr_pt, roisum_pt, dnp.sqrt(roisum_pt)) bg_sum.append(bg_pt) bg_dr.append(bgdr_pt) # convert lists to arrays (roi1sum, roi1dr, bg_sum, bg_dr) = (dnp.array(roi1sum), dnp.array(roi1dr), dnp.array(bg_sum), dnp.array(bg_dr)) # normalise background if norois > 1: bgsize = 0 for k in range(1, norois): bgsize += rois[k].len[0]*rois[k].len[1] (bg_sum, bg_dr) = ep.EPmulk(bg_sum, bg_dr, rois[0].len[0]*rois[0].len[1]/bgsize) # subtract background (RRtemp, drtemp) = ep.EPsub(roi1sum, roi1dr, bg_sum, bg_dr) # do a footprint correction. # assumes that the beam is gaussian in profile, with a FWHM of "beamheight". # footprint of sample is measured in mm. areamultiplier = 2*(norm.cdf(dnp.float(footprint) * dnp.sin((theta + dnp.float(angularfudgefactor)) / 180 * dnp.pi) / 2, 0, 1e-3 * dnp.float(beamheight)/ (2*dnp.sqrt(2*dnp.log(2)))) - 0.5) RRtemp /= areamultiplier drtemp /= areamultiplier # for the 2nd, 3rd, 4th q ranges have to splice the data on the end of the preexisting data if(ii > 0): (scalingfactor, sferror) = nsplice.getScalingInOverlap(qq, RR, dR, qqtemp, RRtemp, drtemp) RRtemp *= scalingfactor drtemp *= scalingfactor # now concatenate the data. qq = dnp.concatenate((qq, qqtemp)) RR = dnp.concatenate((RR, RRtemp)) dR = dnp.concatenate((dR, drtemp)) # end of per-file loop if norois > 0: RR /= dnp.float(scalar) dR /= dnp.float(scalar) RR = RR[np.argsort(qq)] dR = dR[np.argsort(qq)] qq = np.sort(qq) # write out the data. np.savetxt(outputpath+"/"+str(runfiles[0])+"_rerefl_bkg1.dat",dnp.concatenate((qq,RR,dR)).reshape(3,qq.shape[0]).transpose(), fmt="%.10f %.10e %.10e") print "Output saved to " + outputpath+"/"+str(runfiles[0])+"_rerefl_bkg1.dat" # plot the resulting dnp.plot.line(qq,dnp.log10(RR),name='Plot 2')
def project(self, energy, UB, pixels, gamma, delta, omega, alpha, nu): # put the detector at the right position dx,dy,dz = pixels # convert angles to radians gamma, delta, alpha, omega, nu = numpy.radians((gamma, delta, alpha, omega, nu)) RGam = numpy.matrix([[1,0,0],[0,cos(gamma),-sin(gamma)],[0,sin(gamma),cos(gamma)]]) RDel = (numpy.matrix([[cos(delta),-sin(delta),0],[sin(delta),cos(delta),0],[0,0,1]])).getI() RNu = numpy.matrix([[cos(nu),0,sin(nu)],[0,1,0],[-sin(nu),0,cos(nu)]]) # calculate Cartesian coordinates for each pixel using clever matrix stuff M = numpy.mat(numpy.concatenate((dx.flatten(0), dy.flatten(0), dz.flatten(0))).reshape(3,dx.shape[0]*dx.shape[1])) XYZp = RGam * RDel * RNu * M xp = dnp.array(XYZp[0]).reshape(dx.shape) yp = dnp.array(XYZp[1]).reshape(dy.shape) zp = dnp.array(XYZp[2]).reshape(dz.shape) # don't bother with the part about slits... # Calculate effective gamma and delta for each pixel d_ds = dnp.sqrt(xp**2 + yp**2 + zp**2) Gam = dnp.arctan2(zp, yp) Del = -1 * dnp.arcsin(-xp/d_ds) # wavenumber k = 2 * math.pi / 12.398 * energy # Define the needed matrices. The notation follows the article by Bunk & # Nielsen. J.Appl.Cryst. (2004) 37, 216-222. M1 = k * numpy.matrix(cos(omega) * sin(Del) - sin(omega) * (cos(alpha) * (cos(Gam) * cos(Del)-1) + sin(alpha) * sin(Gam) * cos(Del))) M2 = k * numpy.matrix(sin(omega) * sin(Del) + cos(omega) * (cos(alpha) * (cos(Gam) * cos(Del)-1) + sin(alpha) * sin(Gam) * cos(Del))) M3 = k * numpy.matrix(-sin(alpha) * (cos(Gam) * cos(Del)-1) + cos(alpha) * sin(Gam) * cos(Del)) # invert UB matrix UBi = numpy.matrix(UB).getI() # calculate HKL H = UBi[0,0]*M1 + UBi[0,1]*M2 + UBi[0,2]*M3 K = UBi[1,0]*M1 + UBi[1,1]*M2 + UBi[1,2]*M3 L = UBi[2,0]*M1 + UBi[2,1]*M2 + UBi[2,2]*M3 return (H, K, L)
def refl(runfiles, pathtofiles, outputpath, scalar, beamheight, footprint, angularfudgefactor, wl, back=()): # scalar - scale factor to divide the data by # beamheight FWHM in microns # footprint in mm # angular offset correction in degrees # wavelength in wl # back is an optional variable to subtract a background, set back=1 to do a background subtraction qq = [] RR = [] dR = [] ii = -1 for filename in runfiles: data = dnp.io.load(pathtofiles + "/" + str(filename) + ".dat") ii += 1 theta = data.alpha # work out the q vector qqtemp = 4 * dnp.pi * dnp.sin( (theta + angularfudgefactor) * dnp.pi / 180) / wl #qqtemp = data.qdcd # this section is to allow users to set limits on the q range used from each file if not 'qmin' + str(ii) in refl.__dict__: qmin = qqtemp.min() else: print "USER SET", qmin = refl.__getattribute__('qmin' + str(ii)) print 'refl.qmin' + str(ii) + " = " + str(qmin) + " ;", if not 'qmax' + str(ii) in refl.__dict__: qmax = qqtemp.max() else: print "USER SET", qmax = refl.__getattribute__('qmax' + str(ii)) print 'refl.qmax' + str(ii) + " = " + str(qmax) + " ;", roi1_sum = data.roi1_sum roi1_sum = roi1_sum[dnp.where((qqtemp >= qmin) & (qqtemp <= qmax))] roi1dr = dnp.sqrt(roi1_sum) theta = theta[dnp.where((qqtemp >= qmin) & (qqtemp <= qmax))] qqtemp = qqtemp[dnp.where((qqtemp >= qmin) & (qqtemp <= qmax))] bg_sum = dnp.zeros(len(roi1_sum)) bg_dr = dnp.zeros(len(roi1dr)) # if background ROI number given as int, convert to a single-item tuple if type(back) == int: back = (back, ) # subtract any background ROIs from the data if len(back) > 0 and back[0] > 0: if ii == 0: print "Using background from " + str( len(back)) + " ROIs: " + str(back) for bg in back: if ('roi' + str(bg) + '_sum' in data.keys()): bg_cur = data[data.keys().index('roi' + str(bg) + '_sum')] dr_cur = dnp.sqrt(bg_cur) (bg_sum, bg_dr) = ep.EPadd(bg_sum, bg_dr, bg_cur, dr_cur) (bg_sum, bg_dr) = ep.EPmulk(bg_sum, bg_dr, 1.0 / len(back)) else: if ii == 0: print "Not subtracting a background" (RRtemp, drtemp) = ep.EPsub(roi1_sum, roi1dr, bg_sum, bg_dr) # do a footprint correction. # assumes that the beam is gaussian in profile, with a FWHM of "beamheight". # footprint of sample is measured in mm. areamultiplier = 2 * (norm.cdf( footprint * dnp.sin( (theta + angularfudgefactor) / 180 * dnp.pi) / 2, 0, 1e-3 * beamheight / (2 * dnp.sqrt(2 * dnp.log(2)))) - 0.5) RRtemp /= areamultiplier drtemp /= areamultiplier # for the 2nd, 3rd, 4th q ranges have to splice the data on the end of the preexisting data if (ii > 0): # splice (scalingfactor, sferror) = nsplice.getScalingInOverlap(qq, RR, dR, qqtemp, RRtemp, drtemp) RRtemp *= scalingfactor drtemp *= scalingfactor print "Error in scaling factor: %2.3f %%" % (sferror / scalingfactor * 100) # now concatenate the data. qq = dnp.concatenate((qq, qqtemp)) RR = dnp.concatenate((RR, RRtemp)) dR = dnp.concatenate((dR, drtemp)) # end of per-file loop RR /= scalar dR /= scalar RR = RR[np.argsort(qq)] dR = dR[np.argsort(qq)] qq = np.sort(qq) # write out the data. np.savetxt( outputpath + "/" + str(runfiles[0]) + "_refl.dat", dnp.concatenate((qq, RR, dR)).reshape(3, qq.shape[0]).transpose())