def _transform(self): """ Run TransformToIqt. """ from IndirectCommon import CheckHistZero, CheckHistSame, CheckAnalysers try: CheckAnalysers(self._sample, self._resolution) except ValueError: # A genuine error the shows that the two runs are incompatible raise except: # Checking could not be performed due to incomplete or no instrument logger.warning( 'Could not check for matching analyser and reflection') # Process resolution data num_res_hist = CheckHistZero(self._resolution)[0] if num_res_hist > 1: CheckHistSame(self._sample, 'Sample', self._resolution, 'Resolution') iqt = CalculateIqt(InputWorkspace=self._sample, ResolutionWorkspace=self._resolution, EnergyMin=self._e_min, EnergyMax=self._e_max, EnergyWidth=self._e_width, NumberOfIterations=self._number_of_iterations, SeedValue=self._seed, StoreInADS=False, OutputWorkspace="__ciqt") # Set Y axis unit and label iqt.setYUnit('') iqt.setYUnitLabel('Intensity') return iqt
def _transform(self): """ Run TransformToIqt. """ from IndirectCommon import CheckHistZero, CheckHistSame, CheckAnalysers try: CheckAnalysers(self._sample, self._resolution) except ValueError: # A genuine error the shows that the two runs are incompatible raise except BaseException: # Checking could not be performed due to incomplete or no # instrument logger.warning( 'Could not check for matching analyser and reflection') # Process resolution data num_res_hist = CheckHistZero(self._resolution)[0] if num_res_hist > 1: CheckHistSame(self._sample, 'Sample', self._resolution, 'Resolution') calculateiqt_alg = self.createChildAlgorithm(name='CalculateIqt', startProgress=0.3, endProgress=1.0, enableLogging=True) calculateiqt_alg.setAlwaysStoreInADS(False) args = { "InputWorkspace": self._sample, "OutputWorkspace": "iqt", "ResolutionWorkspace": self._resolution, "EnergyMin": self._e_min, "EnergyMax": self._e_max, "EnergyWidth": self._e_width, "CalculateErrors": self._calculate_errors, "NumberOfIterations": self._number_of_iterations, "SeedValue": self._seed } for key, value in args.items(): calculateiqt_alg.setProperty(key, value) calculateiqt_alg.execute() iqt = calculateiqt_alg.getProperty("OutputWorkspace").value # Set Y axis unit and label iqt.setYUnit('') iqt.setYUnitLabel('Intensity') return iqt
def PyExec(self): # Check for platform support if not is_supported_f2py_platform(): unsupported_msg = "This algorithm can only be run on valid platforms." \ + " please view the algorithm documentation to see" \ + " what platforms are currently supported" raise RuntimeError(unsupported_msg) from IndirectBayes import (CalcErange, GetXYE, ReadNormFile, ReadWidthFile, QLAddSampleLogs, C2Fw, C2Se, QuasiPlot) from IndirectCommon import (CheckXrange, CheckAnalysers, getEfixed, GetThetaQ, CheckHistZero, CheckHistSame, IndentifyDataBoundaries) setup_prog = Progress(self, start=0.0, end=0.3, nreports=5) self.log().information('BayesQuasi input') erange = [self._e_min, self._e_max] nbins = [self._sam_bins, self._res_bins] setup_prog.report('Converting to binary for Fortran') #convert true/false to 1/0 for fortran o_el = 1 if self._elastic else 0 o_w1 = 1 if self._width else 0 o_res = 1 if self._res_norm else 0 #fortran code uses background choices defined using the following numbers setup_prog.report('Encoding input options') if self._background == 'Sloping': o_bgd = 2 elif self._background == 'Flat': o_bgd = 1 elif self._background == 'Zero': o_bgd = 0 fitOp = [o_el, o_bgd, o_w1, o_res] setup_prog.report('Establishing save path') workdir = config['defaultsave.directory'] if not os.path.isdir(workdir): workdir = os.getcwd() logger.information( 'Default Save directory is not set. Defaulting to current working Directory: ' + workdir) array_len = 4096 # length of array in Fortran setup_prog.report('Checking X Range') CheckXrange(erange, 'Energy') nbin, nrbin = nbins[0], nbins[1] logger.information('Sample is ' + self._samWS) logger.information('Resolution is ' + self._resWS) # Check for trailing and leading zeros in data setup_prog.report( 'Checking for leading and trailing zeros in the data') first_data_point, last_data_point = IndentifyDataBoundaries( self._samWS) if first_data_point > self._e_min: logger.warning( "Sample workspace contains leading zeros within the energy range." ) logger.warning("Updating eMin: eMin = " + str(first_data_point)) self._e_min = first_data_point if last_data_point < self._e_max: logger.warning( "Sample workspace contains trailing zeros within the energy range." ) logger.warning("Updating eMax: eMax = " + str(last_data_point)) self._e_max = last_data_point # update erange with new values erange = [self._e_min, self._e_max] setup_prog.report('Checking Analysers') CheckAnalysers(self._samWS, self._resWS) setup_prog.report('Obtaining EFixed, theta and Q') efix = getEfixed(self._samWS) theta, Q = GetThetaQ(self._samWS) nsam, ntc = CheckHistZero(self._samWS) totalNoSam = nsam #check if we're performing a sequential fit if self._loop != True: nsam = 1 nres = CheckHistZero(self._resWS)[0] setup_prog.report('Checking Histograms') if self._program == 'QL': if nres == 1: prog = 'QLr' # res file else: prog = 'QLd' # data file CheckHistSame(self._samWS, 'Sample', self._resWS, 'Resolution') elif self._program == 'QSe': if nres == 1: prog = 'QSe' # res file else: raise ValueError('Stretched Exp ONLY works with RES file') logger.information('Version is ' + prog) logger.information(' Number of spectra = ' + str(nsam)) logger.information(' Erange : ' + str(erange[0]) + ' to ' + str(erange[1])) setup_prog.report('Reading files') Wy, We = ReadWidthFile(self._width, self._wfile, totalNoSam) dtn, xsc = ReadNormFile(self._res_norm, self._resnormWS, totalNoSam) setup_prog.report('Establishing output workspace name') fname = self._samWS[:-4] + '_' + prog probWS = fname + '_Prob' fitWS = fname + '_Fit' wrks = os.path.join(workdir, self._samWS[:-4]) logger.information(' lptfile : ' + wrks + '_' + prog + '.lpt') lwrk = len(wrks) wrks.ljust(140, ' ') wrkr = self._resWS wrkr.ljust(140, ' ') setup_prog.report('Initialising probability list') # initialise probability list if self._program == 'QL': prob0 = [] prob1 = [] prob2 = [] xQ = np.array([Q[0]]) for m in range(1, nsam): xQ = np.append(xQ, Q[m]) xProb = xQ xProb = np.append(xProb, xQ) xProb = np.append(xProb, xQ) eProb = np.zeros(3 * nsam) group = '' workflow_prog = Progress(self, start=0.3, end=0.7, nreports=nsam * 3) for m in range(0, nsam): logger.information('Group ' + str(m) + ' at angle ' + str(theta[m])) nsp = m + 1 nout, bnorm, Xdat, Xv, Yv, Ev = CalcErange(self._samWS, m, erange, nbin) Ndat = nout[0] Imin = nout[1] Imax = nout[2] if prog == 'QLd': mm = m else: mm = 0 Nb, Xb, Yb, Eb = GetXYE(self._resWS, mm, array_len) # get resolution data numb = [nsam, nsp, ntc, Ndat, nbin, Imin, Imax, Nb, nrbin] rscl = 1.0 reals = [efix, theta[m], rscl, bnorm] if prog == 'QLr': workflow_prog.report( 'Processing Sample number %i as Lorentzian' % nsam) nd, xout, yout, eout, yfit, yprob = QLr.qlres( numb, Xv, Yv, Ev, reals, fitOp, Xdat, Xb, Yb, Wy, We, dtn, xsc, wrks, wrkr, lwrk) message = ' Log(prob) : ' + str(yprob[0]) + ' ' + str( yprob[1]) + ' ' + str(yprob[2]) + ' ' + str(yprob[3]) logger.information(message) if prog == 'QLd': workflow_prog.report('Processing Sample number %i' % nsam) nd, xout, yout, eout, yfit, yprob = QLd.qldata( numb, Xv, Yv, Ev, reals, fitOp, Xdat, Xb, Yb, Eb, Wy, We, wrks, wrkr, lwrk) message = ' Log(prob) : ' + str(yprob[0]) + ' ' + str( yprob[1]) + ' ' + str(yprob[2]) + ' ' + str(yprob[3]) logger.information(message) if prog == 'QSe': workflow_prog.report( 'Processing Sample number %i as Stretched Exp' % nsam) nd,xout,yout,eout,yfit,yprob=Qse.qlstexp(numb,Xv,Yv,Ev,reals,fitOp,\ Xdat,Xb,Yb,Wy,We,dtn,xsc,\ wrks,wrkr,lwrk) dataX = xout[:nd] dataX = np.append(dataX, 2 * xout[nd - 1] - xout[nd - 2]) yfit_list = np.split(yfit[:4 * nd], 4) dataF1 = yfit_list[1] if self._program == 'QL': dataF2 = yfit_list[2] workflow_prog.report('Processing data') dataG = np.zeros(nd) datX = dataX datY = yout[:nd] datE = eout[:nd] datX = np.append(datX, dataX) datY = np.append(datY, dataF1[:nd]) datE = np.append(datE, dataG) res1 = dataF1[:nd] - yout[:nd] datX = np.append(datX, dataX) datY = np.append(datY, res1) datE = np.append(datE, dataG) nsp = 3 names = 'data,fit.1,diff.1' res_plot = [0, 1, 2] if self._program == 'QL': workflow_prog.report('Processing Lorentzian result data') datX = np.append(datX, dataX) datY = np.append(datY, dataF2[:nd]) datE = np.append(datE, dataG) res2 = dataF2[:nd] - yout[:nd] datX = np.append(datX, dataX) datY = np.append(datY, res2) datE = np.append(datE, dataG) nsp += 2 names += ',fit.2,diff.2' res_plot.append(4) prob0.append(yprob[0]) prob1.append(yprob[1]) prob2.append(yprob[2]) # create result workspace fitWS = fname + '_Workspaces' fout = fname + '_Workspace_' + str(m) workflow_prog.report('Creating OutputWorkspace') CreateWorkspace(OutputWorkspace=fout, DataX=datX, DataY=datY, DataE=datE,\ Nspec=nsp, UnitX='DeltaE', VerticalAxisUnit='Text', VerticalAxisValues=names) # append workspace to list of results group += fout + ',' comp_prog = Progress(self, start=0.7, end=0.8, nreports=2) comp_prog.report('Creating Group Workspace') GroupWorkspaces(InputWorkspaces=group, OutputWorkspace=fitWS) if self._program == 'QL': comp_prog.report('Processing Lorentzian probability data') yPr0 = np.array([prob0[0]]) yPr1 = np.array([prob1[0]]) yPr2 = np.array([prob2[0]]) for m in range(1, nsam): yPr0 = np.append(yPr0, prob0[m]) yPr1 = np.append(yPr1, prob1[m]) yPr2 = np.append(yPr2, prob2[m]) yProb = yPr0 yProb = np.append(yProb, yPr1) yProb = np.append(yProb, yPr2) probWs = CreateWorkspace(OutputWorkspace=probWS, DataX=xProb, DataY=yProb, DataE=eProb,\ Nspec=3, UnitX='MomentumTransfer') outWS = C2Fw(self._samWS[:-4], fname) if self._plot != 'None': QuasiPlot(fname, self._plot, res_plot, self._loop) if self._program == 'QSe': comp_prog.report('Runnning C2Se') outWS = C2Se(fname) if self._plot != 'None': QuasiPlot(fname, self._plot, res_plot, self._loop) log_prog = Progress(self, start=0.8, end=1.0, nreports=8) #Add some sample logs to the output workspaces log_prog.report('Copying Logs to outputWorkspace') CopyLogs(InputWorkspace=self._samWS, OutputWorkspace=outWS) log_prog.report('Adding Sample logs to Output workspace') QLAddSampleLogs(outWS, self._resWS, prog, self._background, self._elastic, erange, (nbin, nrbin), self._resnormWS, self._wfile) log_prog.report('Copying logs to fit Workspace') CopyLogs(InputWorkspace=self._samWS, OutputWorkspace=fitWS) log_prog.report('Adding sample logs to Fit workspace') QLAddSampleLogs(fitWS, self._resWS, prog, self._background, self._elastic, erange, (nbin, nrbin), self._resnormWS, self._wfile) log_prog.report('Finialising log copying') if self._save: log_prog.report('Saving workspaces') fit_path = os.path.join(workdir, fitWS + '.nxs') SaveNexusProcessed(InputWorkspace=fitWS, Filename=fit_path) out_path = os.path.join(workdir, outWS + '.nxs') # path name for nxs file SaveNexusProcessed(InputWorkspace=outWS, Filename=out_path) logger.information('Output fit file created : ' + fit_path) logger.information('Output paramter file created : ' + out_path) self.setProperty('OutputWorkspaceFit', fitWS) self.setProperty('OutputWorkspaceResult', outWS) log_prog.report('Setting workspace properties') if self._program == 'QL': self.setProperty('OutputWorkspaceProb', probWS)
def _transform(self): """ Run TransformToIqt. """ from IndirectCommon import CheckHistZero, CheckHistSame, CheckAnalysers trans_prog = Progress(self, start=0.3, end=0.8, nreports=15) try: CheckAnalysers(self._sample, self._resolution) except ValueError: # A genuine error the shows that the two runs are incompatible raise except: # Checking could not be performed due to incomplete or no instrument logger.warning( 'Could not check for matching analyser and reflection') # Process resolution data num_res_hist = CheckHistZero(self._resolution)[0] if num_res_hist > 1: CheckHistSame(self._sample, 'Sample', self._resolution, 'Resolution') rebin_param = str(self._e_min) + ',' + str(self._e_width) + ',' + str( self._e_max) trans_prog.report('Rebinning Workspace') Rebin(InputWorkspace=self._sample, OutputWorkspace='__sam_data', Params=rebin_param, FullBinsOnly=True) # Sample trans_prog.report('Rebinning sample') Rebin(InputWorkspace='__sam_data', OutputWorkspace='__sam_data', Params=rebin_param) trans_prog.report('Integrating Sample') Integration(InputWorkspace='__sam_data', OutputWorkspace='__sam_int') trans_prog.report('Converting Sample to data points') ConvertToPointData(InputWorkspace='__sam_data', OutputWorkspace='__sam_data') trans_prog.report('Extracting FFT spectrum for Sample') ExtractFFTSpectrum(InputWorkspace='__sam_data', OutputWorkspace='__sam_fft', FFTPart=2) trans_prog.report('Dividing Sample') Divide(LHSWorkspace='__sam_fft', RHSWorkspace='__sam_int', OutputWorkspace='__sam') # Resolution trans_prog.report('Rebinnig Resolution') Rebin(InputWorkspace=self._resolution, OutputWorkspace='__res_data', Params=rebin_param) trans_prog.report('Integrating Resolution') Integration(InputWorkspace='__res_data', OutputWorkspace='__res_int') trans_prog.report('Converting Resolution to data points') ConvertToPointData(InputWorkspace='__res_data', OutputWorkspace='__res_data') trans_prog.report('Extractig FFT Resolution spectrum') ExtractFFTSpectrum(InputWorkspace='__res_data', OutputWorkspace='__res_fft', FFTPart=2) trans_prog.report('Dividing Resolution') Divide(LHSWorkspace='__res_fft', RHSWorkspace='__res_int', OutputWorkspace='__res') trans_prog.report('Diving Workspaces') Divide(LHSWorkspace='__sam', RHSWorkspace='__res', OutputWorkspace=self._output_workspace) # Cleanup sample workspaces trans_prog.report('Deleting Sample temp') DeleteWorkspace('__sam_data') DeleteWorkspace('__sam_int') DeleteWorkspace('__sam_fft') DeleteWorkspace('__sam') # Crop nonsense values off workspace binning = int(math.ceil(mtd[self._output_workspace].blocksize() / 2.0)) bin_v = mtd[self._output_workspace].dataX(0)[binning] trans_prog.report('Cropping output') CropWorkspace(InputWorkspace=self._output_workspace, OutputWorkspace=self._output_workspace, XMax=bin_v) # Set Y axis unit and label mtd[self._output_workspace].setYUnit('') mtd[self._output_workspace].setYUnitLabel('Intensity') trans_prog.report('Deleting Resolution temp') # Clean up resolution workspaces DeleteWorkspace('__res_data') DeleteWorkspace('__res_int') DeleteWorkspace('__res_fft') DeleteWorkspace('__res')
def PyExec(self): run_f2py_compatibility_test() from IndirectBayes import (CalcErange, GetXYE) from IndirectCommon import (CheckXrange, CheckAnalysers, getEfixed, GetThetaQ, CheckHistZero) setup_prog = Progress(self, start=0.0, end=0.3, nreports=5) logger.information('BayesStretch input') logger.information('Sample is %s' % self._sam_name) logger.information('Resolution is %s' % self._res_name) setup_prog.report('Converting to binary for Fortran') fitOp = self._encode_fit_ops(self._elastic, self._background) setup_prog.report('Establishing save path') workdir = self._establish_save_path() setup_prog.report('Checking X Range') CheckXrange(self._erange, 'Energy') setup_prog.report('Checking Analysers') CheckAnalysers(self._sam_name, self._res_name) setup_prog.report('Obtaining EFixed, theta and Q') efix = getEfixed(self._sam_name) theta, Q = GetThetaQ(self._sam_name) setup_prog.report('Checking Histograms') nsam, ntc = CheckHistZero(self._sam_name) #check if we're performing a sequential fit if not self._loop: nsam = 1 logger.information('Version is Stretch') logger.information('Number of spectra = %s ' % nsam) logger.information('Erange : %f to %f ' % (self._erange[0], self._erange[1])) setup_prog.report('Creating FORTRAN Input') fname = self._sam_name[:-4] + '_Stretch' wrks = os.path.join(workdir, self._sam_name[:-4]) logger.information('lptfile : %s_Qst.lpt' % wrks) lwrk = len(wrks) wrks.ljust(140, ' ') wrkr = self._res_name wrkr.ljust(140, ' ') eBet0 = np.zeros(self._nbet) # set errors to zero eSig0 = np.zeros(self._nsig) # set errors to zero rscl = 1.0 Qaxis = '' workflow_prog = Progress(self, start=0.3, end=0.7, nreports=nsam * 3) # Empty arrays to hold Sigma and Bet x,y,e values xSig, ySig, eSig = [], [], [] xBet, yBet, eBet = [], [], [] for m in range(nsam): logger.information('Group %i at angle %f' % (m, theta[m])) nsp = m + 1 nout, bnorm, Xdat, Xv, Yv, Ev = CalcErange(self._sam_name, m, self._erange, self._nbins[0]) Ndat = nout[0] Imin = nout[1] Imax = nout[2] # get resolution data (4096 = FORTRAN array length) Nb, Xb, Yb, _ = GetXYE(self._res_name, 0, 4096) numb = [ nsam, nsp, ntc, Ndat, self._nbins[0], Imin, Imax, Nb, self._nbins[1], self._nbet, self._nsig ] reals = [efix, theta[m], rscl, bnorm] workflow_prog.report('Processing spectrum number %i' % m) xsout, ysout, xbout, ybout, zpout = Que.quest( numb, Xv, Yv, Ev, reals, fitOp, Xdat, Xb, Yb, wrks, wrkr, lwrk) dataXs = xsout[:self._nsig] # reduce from fixed FORTRAN array dataYs = ysout[:self._nsig] dataXb = xbout[:self._nbet] dataYb = ybout[:self._nbet] zpWS = fname + '_Zp' + str(m) if m > 0: Qaxis += ',' Qaxis += str(Q[m]) dataXz = [] dataYz = [] dataEz = [] for n in range(self._nsig): yfit_list = np.split(zpout[:self._nsig * self._nbet], self._nsig) dataYzp = yfit_list[n] dataXz = np.append(dataXz, xbout[:self._nbet]) dataYz = np.append(dataYz, dataYzp[:self._nbet]) dataEz = np.append(dataEz, eBet0) zpWS = fname + '_Zp' + str(m) self._create_workspace(zpWS, [dataXz, dataYz, dataEz], self._nsig, dataXs, True) xSig = np.append(xSig, dataXs) ySig = np.append(ySig, dataYs) eSig = np.append(eSig, eSig0) xBet = np.append(xBet, dataXb) yBet = np.append(yBet, dataYb) eBet = np.append(eBet, eBet0) if m == 0: groupZ = zpWS else: groupZ = groupZ + ',' + zpWS #create workspaces for sigma and beta workflow_prog.report('Creating OutputWorkspace') self._create_workspace(fname + '_Sigma', [xSig, ySig, eSig], nsam, Qaxis) self._create_workspace(fname + '_Beta', [xBet, yBet, eBet], nsam, Qaxis) group = fname + '_Sigma,' + fname + '_Beta' fit_ws = fname + '_Fit' s_api.GroupWorkspaces(InputWorkspaces=group, OutputWorkspace=fit_ws) contour_ws = fname + '_Contour' s_api.GroupWorkspaces(InputWorkspaces=groupZ, OutputWorkspace=contour_ws) #Add some sample logs to the output workspaces log_prog = Progress(self, start=0.8, end=1.0, nreports=6) log_prog.report('Copying Logs to Fit workspace') copy_log_alg = self.createChildAlgorithm('CopyLogs', enableLogging=False) copy_log_alg.setProperty('InputWorkspace', self._sam_name) copy_log_alg.setProperty('OutputWorkspace', fit_ws) copy_log_alg.execute() log_prog.report('Adding Sample logs to Fit workspace') self._add_sample_logs(fit_ws, self._erange, self._nbins[0]) log_prog.report('Copying logs to Contour workspace') copy_log_alg.setProperty('InputWorkspace', self._sam_name) copy_log_alg.setProperty('OutputWorkspace', contour_ws) copy_log_alg.execute() log_prog.report('Adding sample logs to Contour workspace') self._add_sample_logs(contour_ws, self._erange, self._nbins[0]) log_prog.report('Finialising log copying') self.setProperty('OutputWorkspaceFit', fit_ws) self.setProperty('OutputWorkspaceContour', contour_ws) log_prog.report('Setting workspace properties')
def _fury(self): """ Run Fury. """ from IndirectCommon import CheckHistZero, CheckHistSame, CheckAnalysers # Process resolution data CheckAnalysers(self._sample, self._resolution, self._verbose) num_res_hist = CheckHistZero(self._resolution)[0] if num_res_hist > 1: CheckHistSame(self._sample, 'Sample', self._resolution, 'Resolution') rebin_param = str(self._e_min) + ',' + str(self._e_width) + ',' + str( self._e_max) Rebin(InputWorkspace=self._sample, OutputWorkspace='__sam_rebin', Params=rebin_param, FullBinsOnly=True) Rebin(InputWorkspace=self._resolution, OutputWorkspace='__res_data', Params=rebin_param) Integration(InputWorkspace='__res_data', OutputWorkspace='__res_int') ConvertToPointData(InputWorkspace='__res_data', OutputWorkspace='__res_data') ExtractFFTSpectrum(InputWorkspace='__res_data', OutputWorkspace='__res_fft', FFTPart=2) Divide(LHSWorkspace='__res_fft', RHSWorkspace='__res_int', OutputWorkspace='__res') Rebin(InputWorkspace='__sam_rebin', OutputWorkspace='__sam_data', Params=rebin_param) Integration(InputWorkspace='__sam_data', OutputWorkspace='__sam_int') ConvertToPointData(InputWorkspace='__sam_data', OutputWorkspace='__sam_data') ExtractFFTSpectrum(InputWorkspace='__sam_data', OutputWorkspace='__sam_fft', FFTPart=2) Divide(LHSWorkspace='__sam_fft', RHSWorkspace='__sam_int', OutputWorkspace='__sam') Divide(LHSWorkspace='__sam', RHSWorkspace='__res', OutputWorkspace=self._output_workspace) # Cleanup sample workspaces DeleteWorkspace('__sam_rebin') DeleteWorkspace('__sam_data') DeleteWorkspace('__sam_int') DeleteWorkspace('__sam_fft') DeleteWorkspace('__sam') # Crop nonsense values off workspace binning = int(math.ceil(mtd[self._output_workspace].blocksize() / 2.0)) bin_v = mtd[self._output_workspace].dataX(0)[binning] CropWorkspace(InputWorkspace=self._output_workspace, OutputWorkspace=self._output_workspace, XMax=bin_v) # Clean up resolution workspaces DeleteWorkspace('__res_data') DeleteWorkspace('__res_int') DeleteWorkspace('__res_fft') DeleteWorkspace('__res')