import scisoftpy as dnp # Jython script for manipulating data sets. # The variables going into this actor in the workflow are available in the script. # Expected variable sets available and set when this script is run: # energy # I0 # Iref # It # Please provide maths of these variables and other available ones in the following lines. lnI0It = dnp.log(I0 / It)
def refl(runfiles, pathtofiles, outputpath, scalar, beamheight, footprint, angularfudgefactor, wl, back=()): # scalar - scale factor to divide the data by # beamheight FWHM in microns # footprint in mm # angular offset correction in degrees # wavelength in wl # back is an optional variable to subtract a background, set back=1 to do a background subtraction qq = [] RR = [] dR = [] ii = -1 for filename in runfiles: data = dnp.io.load(pathtofiles + "/" + str(filename) + ".dat") ii += 1 theta = data.alpha # work out the q vector qqtemp = 4 * dnp.pi * dnp.sin((theta + angularfudgefactor) * dnp.pi /180) / wl #qqtemp = data.qdcd # this section is to allow users to set limits on the q range used from each file if not 'qmin' + str(ii) in refl.__dict__: qmin = qqtemp.min() else: print "USER SET", qmin = refl.__getattribute__('qmin' + str(ii)) print 'refl.qmin' + str(ii) + " = " + str(qmin) + " ;", if not 'qmax' + str(ii) in refl.__dict__: qmax = qqtemp.max() else: print "USER SET", qmax = refl.__getattribute__('qmax' + str(ii)) print 'refl.qmax' + str(ii) + " = " + str(qmax) + " ;", roi1_sum = data.roi1_sum roi1_sum = roi1_sum[dnp.where((qqtemp >= qmin) & (qqtemp <= qmax))] roi1dr = dnp.sqrt(roi1_sum) theta = theta[dnp.where((qqtemp >= qmin) & (qqtemp <= qmax))] qqtemp = qqtemp[dnp.where((qqtemp >= qmin) & (qqtemp <= qmax))] bg_sum = dnp.zeros(len(roi1_sum)) bg_dr = dnp.zeros(len(roi1dr)) # if background ROI number given as int, convert to a single-item tuple if type(back) == int: back = (back,) # subtract any background ROIs from the data if len(back) > 0 and back[0] > 0: if ii==0: print "Using background from " + str(len(back)) + " ROIs: " + str(back) for bg in back: if ('roi' + str(bg) + '_sum' in data.keys()): bg_cur = data[data.keys().index('roi' +str(bg) + '_sum')] dr_cur = dnp.sqrt(bg_cur) (bg_sum, bg_dr) = ep.EPadd(bg_sum, bg_dr, bg_cur, dr_cur) (bg_sum, bg_dr) = ep.EPmulk(bg_sum, bg_dr, 1.0/len(back)) else: if ii==0: print "Not subtracting a background" (RRtemp, drtemp) = ep.EPsub(roi1_sum, roi1dr, bg_sum, bg_dr) # do a footprint correction. # assumes that the beam is gaussian in profile, with a FWHM of "beamheight". # footprint of sample is measured in mm. areamultiplier = 2*(norm.cdf(footprint * dnp.sin((theta + angularfudgefactor) / 180 * dnp.pi) / 2, 0, 1e-3 * beamheight/ (2*dnp.sqrt(2*dnp.log(2)))) - 0.5) RRtemp /= areamultiplier drtemp /= areamultiplier # for the 2nd, 3rd, 4th q ranges have to splice the data on the end of the preexisting data if(ii > 0): # splice (scalingfactor, sferror) = nsplice.getScalingInOverlap(qq, RR, dR, qqtemp, RRtemp, drtemp) RRtemp *= scalingfactor drtemp *= scalingfactor print "Error in scaling factor: %2.3f %%" % (sferror/scalingfactor*100) # now concatenate the data. qq = dnp.concatenate((qq, qqtemp)) RR = dnp.concatenate((RR, RRtemp)) dR = dnp.concatenate((dR, drtemp)) # end of per-file loop RR /= scalar dR /= scalar RR = RR[np.argsort(qq)] dR = dR[np.argsort(qq)] qq = np.sort(qq) # write out the data. np.savetxt(outputpath+"/"+str(runfiles[0])+"_refl.dat",dnp.concatenate((qq,RR,dR)).reshape(3,qq.shape[0]).transpose())
def rerefl(runfiles): global pathtofiles, savedbean, firstfiledata qq = [] RR = [] dR = [] ii = -1 for filename in runfiles: roi1sum = [] roi1dr = [] bg_sum = [] bg_dr = [] data = dnp.io.load(pathtofiles + "/" + str(filename) + ".dat") ii += 1 if ii == 0: firstfiledata = data # define theta theta = data.alpha # define q qqtemp = 4 * dnp.pi * dnp.sin((theta + angularfudgefactor) * dnp.pi /180) / wl #qqtemp = data.qdcd qmin = qqtemp.min() qmax = qqtemp.max() # plot first image of first file if ii == 0: global image image = dnp.io.load(replace_path(data['file'][imgdisp]), warn=False) dnp.plot.image(image[0], name='Plot 1', resetaxes=False) # find ROIs from saved bean try: rois = dnp.plot.getrois(savedbean) norois = len(rois) # if we don't have any ROIs yet, ask the user to draw some except(KeyError, TypeError, NameError): if ii==0: print "\nPlease define some regions of interest then type getrois()" print "You must type getrois() after adding/changing any regions due to a bug in DAWN." norois = 0 # this section to be restored when ROIs are working again ## find ROIs from plot window #bean = dnp.plot.getbean('Plot 1') #try: # rois = dnp.plot.getrois(bean) # norois = len(rois) ## if we don't have any ROIs yet, ask the user to draw some #except(KeyError, TypeError): # if ii==0: # print "Please define some regions of interest" # norois = 0 if norois > 0: if ii == 0: print str(norois) + " ROIs defined, " + str(norois-1) + " will be used for the background" for imgfile in data['file']: imgdata = dnp.io.load(replace_path(imgfile), warn=False) dnp.plot.image(imgdata[0], name="Plot 1", resetaxes=False) image = imgdata[0].transpose() # Pilatus images load with axes transposed for some reason bg_pt = 0 bgdr_pt = 0 for j in range(0,norois): roi = image[int(rois[j].spt[0]):int(rois[j].spt[0]+rois[j].len[0]), int(rois[j].spt[1]):int(rois[j].spt[1]+rois[j].len[1])] roisum_pt = dnp.float(roi.sum()) if j == 0: roi1sum.append(roisum_pt) roi1dr.append(dnp.sqrt(roisum_pt)) else: (bg_pt, bgdr_pt) = ep.EPadd(bg_pt, bgdr_pt, roisum_pt, dnp.sqrt(roisum_pt)) bg_sum.append(bg_pt) bg_dr.append(bgdr_pt) # convert lists to arrays (roi1sum, roi1dr, bg_sum, bg_dr) = (dnp.array(roi1sum), dnp.array(roi1dr), dnp.array(bg_sum), dnp.array(bg_dr)) # normalise background if norois > 1: bgsize = 0 for k in range(1, norois): bgsize += rois[k].len[0]*rois[k].len[1] (bg_sum, bg_dr) = ep.EPmulk(bg_sum, bg_dr, rois[0].len[0]*rois[0].len[1]/bgsize) # subtract background (RRtemp, drtemp) = ep.EPsub(roi1sum, roi1dr, bg_sum, bg_dr) # do a footprint correction. # assumes that the beam is gaussian in profile, with a FWHM of "beamheight". # footprint of sample is measured in mm. areamultiplier = 2*(norm.cdf(dnp.float(footprint) * dnp.sin((theta + dnp.float(angularfudgefactor)) / 180 * dnp.pi) / 2, 0, 1e-3 * dnp.float(beamheight)/ (2*dnp.sqrt(2*dnp.log(2)))) - 0.5) RRtemp /= areamultiplier drtemp /= areamultiplier # for the 2nd, 3rd, 4th q ranges have to splice the data on the end of the preexisting data if(ii > 0): (scalingfactor, sferror) = nsplice.getScalingInOverlap(qq, RR, dR, qqtemp, RRtemp, drtemp) RRtemp *= scalingfactor drtemp *= scalingfactor # now concatenate the data. qq = dnp.concatenate((qq, qqtemp)) RR = dnp.concatenate((RR, RRtemp)) dR = dnp.concatenate((dR, drtemp)) # end of per-file loop if norois > 0: RR /= dnp.float(scalar) dR /= dnp.float(scalar) RR = RR[np.argsort(qq)] dR = dR[np.argsort(qq)] qq = np.sort(qq) # write out the data. np.savetxt(outputpath+"/"+str(runfiles[0])+"_rerefl_bkg1.dat",dnp.concatenate((qq,RR,dR)).reshape(3,qq.shape[0]).transpose(), fmt="%.10f %.10e %.10e") print "Output saved to " + outputpath+"/"+str(runfiles[0])+"_rerefl_bkg1.dat" # plot the resulting dnp.plot.line(qq,dnp.log10(RR),name='Plot 2')
import scisoftpy as dnp # Jython script for manipulating data sets. # The variables going into this actor in the workflow are available in the script. # Expected variable sets available and set when this script is run: # energy # I0 # Iref # It # Please provide maths of these variables and other available ones in the following lines. lnI0It = dnp.log(I0/It)
def refl(runfiles, pathtofiles, outputpath, scalar, beamheight, footprint, angularfudgefactor, wl, back=()): # scalar - scale factor to divide the data by # beamheight FWHM in microns # footprint in mm # angular offset correction in degrees # wavelength in wl # back is an optional variable to subtract a background, set back=1 to do a background subtraction qq = [] RR = [] dR = [] ii = -1 for filename in runfiles: data = dnp.io.load(pathtofiles + "/" + str(filename) + ".dat") ii += 1 theta = data.alpha # work out the q vector qqtemp = 4 * dnp.pi * dnp.sin( (theta + angularfudgefactor) * dnp.pi / 180) / wl #qqtemp = data.qdcd # this section is to allow users to set limits on the q range used from each file if not 'qmin' + str(ii) in refl.__dict__: qmin = qqtemp.min() else: print "USER SET", qmin = refl.__getattribute__('qmin' + str(ii)) print 'refl.qmin' + str(ii) + " = " + str(qmin) + " ;", if not 'qmax' + str(ii) in refl.__dict__: qmax = qqtemp.max() else: print "USER SET", qmax = refl.__getattribute__('qmax' + str(ii)) print 'refl.qmax' + str(ii) + " = " + str(qmax) + " ;", roi1_sum = data.roi1_sum roi1_sum = roi1_sum[dnp.where((qqtemp >= qmin) & (qqtemp <= qmax))] roi1dr = dnp.sqrt(roi1_sum) theta = theta[dnp.where((qqtemp >= qmin) & (qqtemp <= qmax))] qqtemp = qqtemp[dnp.where((qqtemp >= qmin) & (qqtemp <= qmax))] bg_sum = dnp.zeros(len(roi1_sum)) bg_dr = dnp.zeros(len(roi1dr)) # if background ROI number given as int, convert to a single-item tuple if type(back) == int: back = (back, ) # subtract any background ROIs from the data if len(back) > 0 and back[0] > 0: if ii == 0: print "Using background from " + str( len(back)) + " ROIs: " + str(back) for bg in back: if ('roi' + str(bg) + '_sum' in data.keys()): bg_cur = data[data.keys().index('roi' + str(bg) + '_sum')] dr_cur = dnp.sqrt(bg_cur) (bg_sum, bg_dr) = ep.EPadd(bg_sum, bg_dr, bg_cur, dr_cur) (bg_sum, bg_dr) = ep.EPmulk(bg_sum, bg_dr, 1.0 / len(back)) else: if ii == 0: print "Not subtracting a background" (RRtemp, drtemp) = ep.EPsub(roi1_sum, roi1dr, bg_sum, bg_dr) # do a footprint correction. # assumes that the beam is gaussian in profile, with a FWHM of "beamheight". # footprint of sample is measured in mm. areamultiplier = 2 * (norm.cdf( footprint * dnp.sin( (theta + angularfudgefactor) / 180 * dnp.pi) / 2, 0, 1e-3 * beamheight / (2 * dnp.sqrt(2 * dnp.log(2)))) - 0.5) RRtemp /= areamultiplier drtemp /= areamultiplier # for the 2nd, 3rd, 4th q ranges have to splice the data on the end of the preexisting data if (ii > 0): # splice (scalingfactor, sferror) = nsplice.getScalingInOverlap(qq, RR, dR, qqtemp, RRtemp, drtemp) RRtemp *= scalingfactor drtemp *= scalingfactor print "Error in scaling factor: %2.3f %%" % (sferror / scalingfactor * 100) # now concatenate the data. qq = dnp.concatenate((qq, qqtemp)) RR = dnp.concatenate((RR, RRtemp)) dR = dnp.concatenate((dR, drtemp)) # end of per-file loop RR /= scalar dR /= scalar RR = RR[np.argsort(qq)] dR = dR[np.argsort(qq)] qq = np.sort(qq) # write out the data. np.savetxt( outputpath + "/" + str(runfiles[0]) + "_refl.dat", dnp.concatenate((qq, RR, dR)).reshape(3, qq.shape[0]).transpose())