def runTest(self): # Load raw data (bank 1) wsMD = LoadMD( "WISH38237_MD.nxs") # default so doesn't get overwrite van # For each mod vec, predict and integrate peaks and combine qs = [(0.15, 0, 0.3), (-0.15, 0, 0.3)] all_pks = CreatePeaksWorkspace(InstrumentWorkspace=wsMD, NumberOfPeaks=0, OutputWorkspace="all_pks") LoadIsawUB(InputWorkspace=all_pks, Filename='Wish_Diffuse_Scattering_ISAW_UB.mat') # PredictPeaks parent = PredictPeaks(InputWorkspace=all_pks, WavelengthMin=0.8, WavelengthMax=9.3, MinDSpacing=0.5, ReflectionCondition="Primitive") self._pfps = [] self._saved_files = [] for iq, q in enumerate(qs): wsname = f'pfp_{iq}' PredictFractionalPeaks(Peaks=parent, IncludeAllPeaksInRange=True, Hmin=0, Hmax=0, Kmin=1, Kmax=1, Lmin=0, Lmax=1, ReflectionCondition='Primitive', MaxOrder=1, ModVector1=",".join([str(qi) for qi in q]), FracPeaks=wsname) FilterPeaks(InputWorkspace=wsname, OutputWorkspace=wsname, FilterVariable='Wavelength', FilterValue=9.3, Operator='<') # should get rid of one peak in q1 table FilterPeaks(InputWorkspace=wsname, OutputWorkspace=wsname, FilterVariable='Wavelength', FilterValue=0.8, Operator='>') IntegratePeaksMD(InputWorkspace=wsMD, PeakRadius='0.1', BackgroundInnerRadius='0.1', BackgroundOuterRadius='0.15', PeaksWorkspace=wsname, OutputWorkspace=wsname, IntegrateIfOnEdge=False, UseOnePercentBackgroundCorrection=False) all_pks = CombinePeaksWorkspaces(LHSWorkspace=all_pks, RHSWorkspace=wsname) self._pfps.append(ADS.retrieve(wsname)) self._filepath = os.path.join(config['defaultsave.directory'], 'WISH_IntegratedSatellite.int') SaveReflections(InputWorkspace=all_pks, Filename=self._filepath, Format='Jana') self._all_pks = all_pks
def setUpClass(cls): # Create the workspaces needed for each test HB3AAdjustSampleNorm(Filename=cls._files, MergeInputs=True, OutputWorkspace="merged", NormaliseBy='None') HB3AFindPeaks(InputWorkspace=mtd["merged"], CellType="Orthorhombic", Centering="F", OutputWorkspace="peaks") IntegratePeaksMD(InputWorkspace=mtd["merged"], PeaksWorkspace=mtd["peaks"], PeakRadius=0.25, OutputWorkspace="int_peaksmd")
def runTest(self): # Load Empty Instrument ws = LoadEmptyInstrument(InstrumentName='WISH', OutputWorkspace='WISH') axis = ws.getAxis(0) axis.setUnit("TOF") # need this to add peak to table # CreatePeaksWorkspace with peaks in specific detectors peaks = CreatePeaksWorkspace(InstrumentWorkspace=ws, NumberOfPeaks=0, OutputWorkspace='peaks') AddPeak(PeaksWorkspace=peaks, RunWorkspace=ws, TOF=20000, DetectorID=1707204, Height=521, BinCount=0) # pixel in first tube in panel 1 AddPeak(PeaksWorkspace=peaks, RunWorkspace=ws, TOF=20000, DetectorID=1400510, Height=1, BinCount=0) # pixel at top of a central tube in panel 1 AddPeak(PeaksWorkspace=peaks, RunWorkspace=ws, TOF=20000, DetectorID=1408202, Height=598, BinCount=0) # pixel in middle of bank 1 (not near edge) AddPeak(PeaksWorkspace=peaks, RunWorkspace=ws, TOF=20000, DetectorID=1100173, Height=640, BinCount=0) # pixel in last tube of panel 1 (next to panel 2) # create dummy MD workspace for integration (don't need data as checking peak shape) MD = CreateMDWorkspace(Dimensions='3', Extents='-1,1,-1,1,-1,1', Names='Q_lab_x,Q_lab_y,Q_lab_z', Units='U,U,U', Frames='QLab,QLab,QLab', SplitInto='2', SplitThreshold='50') # Integrate peaks masking all pixels at tube end (built into IntegratePeaksMD) self._peaks_pixels = IntegratePeaksMD(InputWorkspace=MD, PeakRadius='0.02', PeaksWorkspace=peaks, IntegrateIfOnEdge=False, OutputWorkspace='peaks_pixels', MaskEdgeTubes=False) # Apply masking to specific tubes next to beam in/out (subset of all edge tubes) and integrate again MaskBTP(Workspace='peaks', Bank='5-6', Tube='152') MaskBTP(Workspace='peaks', Bank='1,10', Tube='1') self._peaks_pixels_beamTubes = IntegratePeaksMD( InputWorkspace='MD', PeakRadius='0.02', PeaksWorkspace=peaks, IntegrateIfOnEdge=False, OutputWorkspace='peaks_pixels_beamTubes', MaskEdgeTubes=False) # Integrate masking all edge tubes self._peaks_pixels_edgeTubes = IntegratePeaksMD( InputWorkspace='MD', PeakRadius='0.02', PeaksWorkspace='peaks', IntegrateIfOnEdge=False, OutputWorkspace='peaks_pixels_edgeTubes', MaskEdgeTubes=True)
# MDEW = ConvertToMD(InputWorkspace=event_ws, QDimensions="Q3D", dEAnalysisMode="Elastic", QConversionScales="Q in A^-1", LorentzCorrection='0', MinValues=minQ, MaxValues=maxQ, SplitInto='2', SplitThreshold=split_threshold, MaxRecursionDepth='11') peaks_ws = IntegratePeaksMD(InputWorkspace=MDEW, PeakRadius=peak_radius, CoordinatesToUse="Q (sample frame)", BackgroundOuterRadius=bkg_outer_radius, BackgroundInnerRadius=bkg_inner_radius, PeaksWorkspace=peaks_ws, IntegrateIfOnEdge=integrate_if_edge_peak) elif use_fit_peaks_integration: event_ws = Rebin(InputWorkspace=event_ws, Params=rebin_params, PreserveEvents=preserve_events) peaks_ws = PeakIntegration(InPeaksWorkspace=peaks_ws, InputWorkspace=event_ws, IkedaCarpenterTOF=use_ikeda_carpenter, MatchingRunNo=True, NBadEdgePixels=n_bad_edge_pixels) elif use_ellipse_integration:
def PyExec(self): input_workspaces, peak_workspaces = self._expand_groups() output_workspace_name = self.getPropertyValue("OutputWorkspace") peak_radius = self.getProperty("PeakRadius").value inner_radius = self.getProperty("BackgroundInnerRadius").value outer_radius = self.getProperty("BackgroundOuterRadius").value remove_0_intensity = self.getProperty("RemoveZeroIntensity").value use_lorentz = self.getProperty("ApplyLorentz").value multi_ws = len(input_workspaces) > 1 output_workspaces = [] for input_ws, peak_ws in zip(input_workspaces, peak_workspaces): if multi_ws: peaks_ws_name = input_ws + '_' + output_workspace_name output_workspaces.append(peaks_ws_name) else: peaks_ws_name = output_workspace_name IntegratePeaksMD(InputWorkspace=input_ws, PeakRadius=peak_radius, BackgroundInnerRadius=inner_radius, BackgroundOuterRadius=outer_radius, PeaksWorkspace=peak_ws, OutputWorkspace=peaks_ws_name) if multi_ws: peaks_ws_name = output_workspace_name CreatePeaksWorkspace( InstrumentWorkspace=input_workspaces[0], NumberOfPeaks=0, OutputWorkspace=peaks_ws_name, OutputType=mtd[peak_workspaces[0]].id().replace( 'sWorkspace', '')) CopySample(InputWorkspace=output_workspaces[0], OutputWorkspace=peaks_ws_name, CopyName=False, CopyMaterial=False, CopyEnvironment=False, CopyShape=False, CopyLattice=True) for peak_ws in output_workspaces: CombinePeaksWorkspaces(peaks_ws_name, peak_ws, OutputWorkspace=peaks_ws_name) DeleteWorkspace(peak_ws) if use_lorentz: # Apply Lorentz correction: peaks = AnalysisDataService[peaks_ws_name] for p in range(peaks.getNumberPeaks()): peak = peaks.getPeak(p) lorentz = abs( np.sin(peak.getScattering() * np.cos(peak.getAzimuthal()))) peak.setIntensity(peak.getIntensity() * lorentz) if remove_0_intensity: FilterPeaks(InputWorkspace=peaks_ws_name, OutputWorkspace=peaks_ws_name, FilterVariable='Intensity', FilterValue=0, Operator='>') # Write output only if a file path was provided if not self.getProperty("OutputFile").isDefault: out_format = self.getProperty("OutputFormat").value filename = self.getProperty("OutputFile").value if out_format == "SHELX": SaveHKL(InputWorkspace=peaks_ws_name, Filename=filename, DirectionCosines=True, OutputWorkspace="__tmp") DeleteWorkspace("__tmp") elif out_format == "Fullprof": SaveReflections(InputWorkspace=peaks_ws_name, Filename=filename, Format="Fullprof") else: # This shouldn't happen RuntimeError("Invalid output format given") self.setProperty("OutputWorkspace", AnalysisDataService[peaks_ws_name])