def find_data(file, instrument='', allow_multiple=False): """ Finds a file path for the specified data set, which can either be: - a run number - an absolute path - a file name @param file: file name or part of a file name @param instrument: if supplied, FindNeXus will be tried as a last resort """ # First, assume a file name file = str(file).strip() # If we allow multiple files, users may use ; as a separator, # which is incompatible with the FileFinder n_files = 1 if allow_multiple: file = file.replace(';', ',') toks = file.split(',') n_files = len(toks) instrument = str(instrument) file_path = FileFinder.getFullPath(file) if os.path.isfile(file_path): return file_path # Second, assume a run number and pass the instrument name as a hint try: # FileFinder doesn't like dashes... instrument = instrument.replace('-', '') f = FileFinder.findRuns(instrument + file) if os.path.isfile(f[0]): if allow_multiple: # Mantid returns its own list object type, so make a real list out if it if len(f) == n_files: return [i for i in f] else: return f[0] except: # FileFinder couldn't make sense of the the supplied information pass # Third, assume a run number, without instrument name to take care of list of full paths try: f = FileFinder.findRuns(file) if os.path.isfile(f[0]): if allow_multiple: # Mantid returns its own list object type, so make a real list out if it if len(f) == n_files: return [i for i in f] else: return f[0] except: # FileFinder couldn't make sense of the the supplied information pass # If we didn't find anything, raise an exception Logger('find_data').error( "\n\nCould not find a file for %s: check your reduction parameters\n\n" % str(file)) raise RuntimeError, "Could not find a file for %s" % str(file)
def validateASCII(self): """Validate ASCII files using difflib.""" (measured, expected) = self.validate() if not os.path.isabs(measured): measured = FileFinder.Instance().getFullPath(measured) if not os.path.isabs(expected): expected = FileFinder.Instance().getFullPath(expected) measured = self.__prepASCIIFile(measured) expected = self.__prepASCIIFile(expected) # calculate the difference diff = difflib.Differ().compare(measured, expected) result = [] for line in diff: if line.startswith('+') or line.startswith('-') or line.startswith('?'): result.append(line) # print the difference if len(result) > 0: if self.stripWhitespace: msg = "(whitespace striped from ends)" else: msg = "" print("******************* Difference in files", msg) print("\n".join(result)) print("*******************") return False else: return True
def find_data(file, instrument='', allow_multiple=False): """ Finds a file path for the specified data set, which can either be: - a run number - an absolute path - a file name @param file: file name or part of a file name @param instrument: if supplied, FindNeXus will be tried as a last resort """ # First, assume a file name file = str(file).strip() # If we allow multiple files, users may use ; as a separator, # which is incompatible with the FileFinder n_files = 1 if allow_multiple: file=file.replace(';',',') toks = file.split(',') n_files = len(toks) instrument = str(instrument) file_path = FileFinder.getFullPath(file) if os.path.isfile(file_path): return file_path # Second, assume a run number and pass the instrument name as a hint try: # FileFinder doesn't like dashes... instrument=instrument.replace('-','') f = FileFinder.findRuns(instrument+file) if os.path.isfile(f[0]): if allow_multiple: # Mantid returns its own list object type, so make a real list out if it if len(f)==n_files: return [i for i in f] else: return f[0] except: # FileFinder couldn't make sense of the the supplied information pass # Third, assume a run number, without instrument name to take care of list of full paths try: f = FileFinder.findRuns(file) if os.path.isfile(f[0]): if allow_multiple: # Mantid returns its own list object type, so make a real list out if it if len(f)==n_files: return [i for i in f] else: return f[0] except: # FileFinder couldn't make sense of the the supplied information pass # If we didn't find anything, raise an exception Logger('find_data').error("\n\nCould not find a file for %s: check your reduction parameters\n\n" % str(file)) raise RuntimeError("Could not find a file for %s" % str(file))
def FileExists(filename): if os.path.exists(filename): return True # or use filefinder to search try: FileFinder.findRuns(filename) return True except: return False
def _is_mantid_loadable(filename): from mantid.api import FileFinder if FileFinder.getFullPath(filename): return True else: try: # findRuns throws rather than return empty so need try-catch FileFinder.findRuns(filename) return True except Exception: return False
def test_that_find_runs_accepts_a_list_of_string_and_a_bool(self): try: runs = FileFinder.findRuns("CNCS7860", useExtsOnly=True) FileFinder.findRuns("CNCS7860", [".nxs", ".txt"], useExtsOnly=True) except Exception as e: if type(e).__name__ == "ArgumentError": self.assertFalse(True, "Expected findRuns to accept a list of strings and a bool as input." " {} error was raised with message {}".format(type(e).__name__, str(e))) else: # Confirm that it works as above self.assertTrue(len(runs) == 1) self.assertTrue(os.path.exists(runs[0]))
def test_runinfo_correct(self): file_path = FileFinder.findRuns('MUSR00022725.nxs')[0] ws, run, filename, _ = load_utils.load_workspace_from_filename( file_path) self.data_context._loaded_data.remove_data(run=run) self.data_context._loaded_data.add_data(run=[run], workspace=ws, filename=filename, instrument='MUSR') self.data_context.current_runs = [[22725]] self.context.update_current_data() test_pair = MuonPair('test_pair', 'top', 'bottom', alpha=0.75) self.group_context.add_pair(pair=test_pair) self.presenter.update_view_from_model() expected = [ call("Instrument : MUSR"), call("Run : 22725"), call("Title : FeTeSe T=1 F=100"), call("Comment : FC first sample"), call("Start : 2009-03-24T04:18:58"), call("End : 2009-03-24T04:56:26"), call("Counts (MEv) : 20.076704"), call("Good Frames : 88540"), call("Counts per Good Frame : 226.753"), call("Counts per Good Frame per det : 3.543"), call("Average Temperature (K) : 19.69992"), call("Sample Temperature (K) : 1.0"), call("Sample Magnetic Field (G) : 100.0"), call("Number of DAQ Periods : 1") ] self.assertEqual(self.view.add_text_line.call_args_list, expected)
def setUp(self): self._qapp = mock_widget.mockQapp() # Store an empty widget to parent all the views, and ensure they are deleted correctly self.obj = QtWidgets.QWidget() setup_context_for_tests(self) self.data_context.instrument = 'MUSR' self.gui_context.update({'RebinType': 'None'}) self.model = maxent_model.MaxEntModel() self.view = maxent_view_new.MaxEntView(self.obj) self.presenter = maxent_presenter_new.MaxEntPresenter(self.view, self.context) file_path = FileFinder.findRuns('MUSR00022725.nxs')[0] ws, run, filename = load_utils.load_workspace_from_filename(file_path) self.data_context._loaded_data.remove_data(run=run) self.data_context._loaded_data.add_data(run=[run], workspace=ws, filename=filename, instrument='MUSR') self.data_context.current_runs = [[22725]] self.context.update_current_data() test_pair = MuonPair('test_pair', 'top', 'bottom', alpha=0.75) self.group_context.add_pair(pair=test_pair) self.view.warning_popup = mock.MagicMock()
def setUp(self): self._qapp = mock_widget.mockQapp() # Store an empty widget to parent all the views, and ensure they are deleted correctly self.obj = QtGui.QWidget() ConfigService['default.instrument'] = 'MUSR' setup_context_for_tests(self) self.context.instrument = 'MUSR' self.load_file_view = BrowseFileWidgetView(self.obj) self.load_run_view = LoadRunWidgetView(self.obj) self.load_file_model = BrowseFileWidgetModel(self.loaded_data, self.context) self.load_run_model = LoadRunWidgetModel(self.loaded_data, self.context) self.view = LoadWidgetView(parent=self.obj, load_file_view=self.load_file_view, load_run_view=self.load_run_view) self.presenter = LoadWidgetPresenter(self.view, LoadWidgetModel(self.loaded_data, self.context)) self.presenter.set_load_file_widget(BrowseFileWidgetPresenter(self.load_file_view, self.load_file_model)) self.presenter.set_load_run_widget(LoadRunWidgetPresenter(self.load_run_view, self.load_run_model)) self.presenter.load_file_widget._view.warning_popup = mock.MagicMock() self.presenter.load_run_widget._view.warning_popup = mock.MagicMock() self.view.multiple_loading_check.setCheckState(1) self.presenter.handle_multiple_files_option_changed() self.runs = [15196, 15197] self.workspaces = [self.create_fake_workspace(1) for _ in self.runs] self.filenames = FileFinder.findRuns('MUSR00015196.nxs, MUSR00015197.nxs')
def __verifyRequiredFile(self, filename): '''Return True if the specified file name is findable by Mantid.''' from mantid.api import FileFinder # simple way is just getFullPath which never uses archive search if os.path.exists(FileFinder.getFullPath(filename)): return True # try full findRuns which will use archive search if it is turned on try: candidates = FileFinder.findRuns(filename) for item in candidates: if os.path.exists(item): return True except RuntimeError, e: return False
def setUp(self): # Store an empty widget to parent all the views, and ensure they are deleted correctly self.obj = QWidget() ConfigService['default.instrument'] = 'MUSR' setup_context_for_tests(self) self.context.instrument = 'MUSR' self.load_file_view = BrowseFileWidgetView(self.obj) self.load_run_view = LoadRunWidgetView(self.obj) self.load_file_model = BrowseFileWidgetModel(self.loaded_data, self.context) self.load_run_model = LoadRunWidgetModel(self.loaded_data, self.context) self.view = LoadWidgetView(parent=self.obj, load_file_view=self.load_file_view, load_run_view=self.load_run_view) self.presenter = LoadWidgetPresenter( self.view, LoadWidgetModel(self.loaded_data, self.context)) self.presenter.set_load_file_widget( BrowseFileWidgetPresenter(self.load_file_view, self.load_file_model)) self.presenter.set_load_run_widget( LoadRunWidgetPresenter(self.load_run_view, self.load_run_model)) self.presenter.load_file_widget._view.warning_popup = mock.MagicMock() self.presenter.load_run_widget._view.warning_popup = mock.MagicMock() self.view.multiple_loading_check.setCheckState(1) self.presenter.handle_multiple_files_option_changed() self.runs = [15196, 15197] self.workspaces = [self.create_fake_workspace(1) for _ in self.runs] self.filenames = FileFinder.findRuns( 'MUSR00015196.nxs, MUSR00015197.nxs')
def _run(self): '''Defines the workflow for the test''' self.tolerance = 1e-2 self.samples = [sample[:-4] for sample in self.samples] #load files into mantid for sample in self.samples: LoadNexus(sample, OutputWorkspace=sample) LoadNexus(FileFinder.getFullPath(self.resolution), OutputWorkspace=self.resolution) _, iqt_ws = TransformToIqt(SampleWorkspace=self.samples[0], ResolutionWorkspace=self.resolution, EnergyMin=self.e_min, EnergyMax=self.e_max, BinReductionFactor=self.num_bins, DryRun=False, NumberOfIterations=200) # Test IqtFitMultiple iqtfitSeq_ws, params, fit_group = IqtFitMultiple( iqt_ws.name(), self.func, self.ftype, self.startx, self.endx, self.spec_min, self.spec_max) self.result_names = [iqt_ws.name(), iqtfitSeq_ws.name()] #remove workspaces from mantid for sample in self.samples: DeleteWorkspace(sample) DeleteWorkspace(params) DeleteWorkspace(fit_group) DeleteWorkspace(self.resolution)
def runTest(self): UseCompatibilityMode() LOQ() Detector("main-detector-bank") csv_file = FileFinder.getFullPath('batch_input.csv') Set1D() MaskFile('MASK.094AA') Gravity(True) BatchReduce(csv_file, 'raw', plotresults=False, saveAlgs={ 'SaveCanSAS1D': 'xml', 'SaveNexus': 'nxs' }) LoadNexus(Filename='54433sans.nxs', OutputWorkspace='result') Plus(LHSWorkspace='result', RHSWorkspace='99630sanotrans', OutputWorkspace='result') os.remove( os.path.join(config['defaultsave.directory'], '54433sans.nxs')) os.remove( os.path.join(config['defaultsave.directory'], '99630sanotrans.nxs')) os.remove( os.path.join(config['defaultsave.directory'], '54433sans.xml')) os.remove( os.path.join(config['defaultsave.directory'], '99630sanotrans.xml'))
def setUp(self): self.context = setup_context(True) self.context.data_context.instrument = 'MUSR' self.context.gui_context.update({'RebinType': 'None'}) self.model = maxent_model.MaxEntModel() self.view = maxent_view_new.MaxEntView() self.presenter = maxent_presenter_new.MaxEntPresenter( self.view, self.context) file_path = FileFinder.findRuns('MUSR00022725.nxs')[0] ws, run, filename, _ = load_utils.load_workspace_from_filename( file_path) self.context.data_context._loaded_data.remove_data(run=run) self.context.data_context._loaded_data.add_data(run=[run], workspace=ws, filename=filename, instrument='MUSR') self.context.data_context.current_runs = [[22725]] self.context.update_current_data() test_pair = MuonPair('test_pair', 'top', 'bottom', alpha=0.75) self.context.group_pair_context.add_pair(pair=test_pair) self.view.warning_popup = mock.MagicMock()
def setUp(self): self.context = setup_context(True) self.context.data_context.instrument = 'MUSR' self.context.gui_context.update({'RebinType': 'None'}) self.view = fft_view_new.FFTView() self.model1 = fft_model.FFTModel() self.model = fft_model.FFTWrapper self.presenter = fft_presenter_new.FFTPresenter( self.view, self.model, self.context) file_path = FileFinder.findRuns('MUSR00022725.nxs')[0] ws, run, filename, _ = load_utils.load_workspace_from_filename(file_path) self.context.data_context._loaded_data.remove_data(run=run) self.context.data_context._loaded_data.add_data(run=[run], workspace=ws, filename=filename, instrument='MUSR') self.context.data_context.current_runs = [[22725]] self.context.update_current_data() test_pair = MuonPair(EXAMPLE_PAIR, 'top', 'bottom', alpha=0.75) self.context.group_pair_context.add_pair(pair=test_pair) self.context.show_all_groups() self.context.show_all_pairs() self.context.group_pair_context._selected_groups = GROUP_LIST self.context.group_pair_context._selected_pairs = [EXAMPLE_PAIR] self.view.warning_popup = mock.MagicMock()
def get_current_run_filename(instrument): """ If instrument is supported, attempts to find the file on the ISIS network which contains the data from its current (most up-to-date) run. """ instrument_directory = get_instrument_directory(instrument) if instrument_directory is None: return "" file_path = _instrument_data_directory(instrument_directory) + FILE_SEP autosave_file_name = file_path + "autosave.run" current_run_filename = "" if not check_file_exists(autosave_file_name): raise ValueError("Cannot find file : " + autosave_file_name) with open(autosave_file_name, 'r') as autosave_file: for line in autosave_file: line.replace(" ", "") file_name = file_path + line if check_file_exists(FileFinder.getFullPath(file_name)): current_run_filename = file_name if current_run_filename == "": # Default to auto_A (replicates MuonAnalysis 1.0 behaviour) current_run_filename = file_path + instrument_directory + "auto_A.tmp" warning("Failed to find latest run, defaulting to " + current_run_filename) return current_run_filename
def find_sans_file(file_name): """ Finds a SANS file. The file can be specified as: 1. file.ext or path1 path2 file.ext 2. run number :param file_name: a file name or a run number. :return: the full path. """ full_path = find_full_file_path(file_name) if not full_path and not file_name.endswith('.nxs'): full_path = find_full_file_path(file_name + '.nxs') if not full_path: # TODO: If we only provide a run number for example 98843 for LOQ measurments, but have LARMOR specified as the # Mantid instrument, then the FileFinder will search itself to death. This is a general Mantid issue. # One way to handle this graceful would be a timeout option. runs = FileFinder.findRuns(file_name) if runs: full_path = runs[0] if not full_path: error_message = "Trying to find the SANS file {0}, but cannot find it. Make sure that " \ "the relevant paths are added and the correct instrument is selected." raise RuntimeError(error_message.format(file_name)) return full_path
def find_sans_file(file_name): """ Finds a SANS file. The file can be specified as: 1. file.ext or path1 path2 file.ext 2. run number :param file_name: a file name or a run number. :return: the full path. """ error_message = "Trying to find the SANS file {0}, but cannot find it. Make sure that "\ "the relevant paths are added and the correct instrument is selected." try: full_path = find_full_file_path(file_name) if not full_path and not file_name.endswith('.nxs'): full_path = find_full_file_path(file_name + '.nxs') if not full_path: # TODO: If we only provide a run number for example 98843 for LOQ measurments, but have LARMOR specified as the # Mantid instrument, then the FileFinder will search itself to death. This is a general Mantid issue. # One way to handle this graceful would be a timeout option. file_name_as_bytes = str.encode(file_name) assert(type(file_name_as_bytes) == bytes) runs = FileFinder.findRuns(file_name_as_bytes) if runs: full_path = runs[0] except RuntimeError: raise RuntimeError(error_message.format(file_name)) if not full_path: raise RuntimeError(error_message.format(file_name)) return full_path
def _run(self): '''Defines the workflow for the test''' self.tolerance = 1e-3 self.samples = [sample[:-4] for sample in self.samples] # Load files into Mantid for sample in self.samples: LoadNexus(sample, OutputWorkspace=sample) LoadNexus(FileFinder.getFullPath(self.resolution), OutputWorkspace=self.resolution) _, iqt_ws = TransformToIqt(SampleWorkspace=self.samples[0], ResolutionWorkspace=self.resolution, EnergyMin=self.e_min, EnergyMax=self.e_max, BinReductionFactor=self.num_bins, DryRun=False, NumberOfIterations=200) # Test IqtFit Sequential iqtfitSeq_ws, params, fit_group = IqtFitSequential(InputWorkspace=iqt_ws, Function=self.func, StartX=self.startx, EndX=self.endx, SpecMin=0, SpecMax=self.spec_max) self.result_names = [iqt_ws.name(), iqtfitSeq_ws[0].name()] # Remove workspaces from Mantid for sample in self.samples: DeleteWorkspace(sample) DeleteWorkspace(params) DeleteWorkspace(fit_group) DeleteWorkspace(self.resolution)
def getDataFileNames(self, runsetupdict, advsetupdict): """ Obtain the data file names (run names + SUFFIX) Return: list of files """ runnumbers_str = str(runsetupdict["RunNumber"]) if runnumbers_str.count(':') > 0: runnumbers_str = runnumbers_str.replace(':', '-') runnumbers_str = FileFinder.findRuns('{}{}'.format(self.instrument_name, runnumbers_str)) runnumbers_str = [os.path.split(filename)[-1] for filename in runnumbers_str] # create an integer version runnumbers = [] for filename in runnumbers_str: for extension in ['_event.nxs', '.nxs.h5']: filename = filename.replace(extension, '') runnumber = filename.split('_')[-1] runnumbers.append(int(runnumber)) # put together the output datafilenames = [] for (filename, runnumber) in zip(runnumbers_str, runnumbers): datafilenames.append((runnumber, filename)) return datafilenames
def setUp(self): self._qapp = mock_widget.mockQapp() self.obj = QtGui.QWidget() ConfigService['default.instrument'] = 'MUSR' setup_context_for_tests(self) self.gui_context['RebinType'] = 'None' self.view = HomeGroupingWidgetView(self.obj) self.model = HomeGroupingWidgetModel(self.context) self.presenter = HomeGroupingWidgetPresenter(self.view, self.model) self.view.warning_popup = mock.MagicMock() self.view.instrument_changed_warning = mock.MagicMock(return_value=1) file_path = FileFinder.findRuns('MUSR00022725.nxs')[0] ws, run, filename = load_utils.load_workspace_from_filename(file_path) self.data_context._loaded_data.remove_data(run=run) self.data_context._loaded_data.add_data(run=[run], workspace=ws, filename=filename, instrument='MUSR') self.data_context.current_runs = [[22725]] self.context.update_current_data() test_pair = MuonPair('test_pair', 'top', 'bottom', alpha=0.75) self.group_context.add_pair(pair=test_pair) self.presenter.update_group_pair_list()
def setUp(self): AnalysisDataService.clear() ConfigService['MantidOptions.InvisibleWorkspaces'] = 'True' self.filepath = FileFinder.findRuns('EMU00019489.nxs')[0] self.load_result, self.run_number, self.filename, psi_data = load_workspace_from_filename( self.filepath) self.assert_(not psi_data) self.context = setup_context(True) self.context.gui_context.update({'RebinType': 'None'}) self.loaded_data = self.context.data_context._loaded_data self.data_context = self.context.data_context self.gui_context = self.context.gui_context self.group_pair_context = self.context.group_pair_context self.data_context.instrument = 'EMU' self.loaded_data.add_data(workspace=self.load_result, run=[self.run_number], filename=self.filename, instrument='EMU') self.data_context.current_runs = [[self.run_number]] self.data_context.update_current_data() self.group_pair_context.reset_group_and_pairs_to_default( self.load_result['OutputWorkspace'][0].workspace, 'EMU', '', 1)
def cleanup(self): Files = ["TOPAZ_3132.hkl", "TOPAZ_3132FFT.hkl"] for filename in Files: absfile = FileFinder.getFullPath(filename) if os.path.exists(absfile): os.remove(absfile) return True
def _create_peaks_workspace(self): """Create a dummy peaks workspace""" path = FileFinder.getFullPath("IDFs_for_UNIT_TESTING/MINITOPAZ_Definition.xml") inst = LoadEmptyInstrument(Filename=path) ws = CreatePeaksWorkspace(inst, 0) DeleteWorkspace(inst) SetUB(ws, 1, 1, 1, 90, 90, 90) # Add a bunch of random peaks that happen to fall on the # detetor bank defined in the IDF center_q = np.array([-5.1302,2.5651,3.71809]) qs = [] for i in np.arange(0, 1, 0.1): for j in np.arange(-0.5, 0, 0.1): q = center_q.copy() q[1] += j q[2] += i qs.append(q) # Add the peaks to the PeaksWorkspace with dummy values for intensity, # Sigma, and HKL for q in qs: peak = ws.createPeak(q) peak.setIntensity(100) peak.setSigmaIntensity(10) peak.setHKL(1, 1, 1) ws.addPeak(peak) return ws
def test_get_pair_phasequad_name(self): AnalysisDataService.clear() ConfigService['MantidOptions.InvisibleWorkspaces'] = 'True' filepath = FileFinder.findRuns('EMU00019489.nxs')[0] load_result, run_number, filename, psi_data = load_workspace_from_filename( filepath) context = setup_context() context.gui_context.update({'RebinType': 'None'}) context.data_context.instrument = 'EMU' context.data_context._loaded_data.add_data(workspace=load_result, run=[run_number], filename=filename, instrument='EMU') context.data_context.current_runs = [[run_number]] context.data_context.update_current_data() self.assertEqual( "EMU19489; PhaseQuad; test_Re; MA", get_pair_phasequad_name(context, "test_Re", "19489", False)) self.assertEqual( "EMU19489; PhaseQuad; test_Re; Rebin; MA", get_pair_phasequad_name(context, "test_Re", "19489", True))
def test_runinfo_correct(self): file_path = FileFinder.findRuns('MUSR00022725.nxs')[0] ws, run, filename = load_utils.load_workspace_from_filename(file_path) self.data_context._loaded_data.remove_data(run=run) self.data_context._loaded_data.add_data(run=[run], workspace=ws, filename=filename, instrument='MUSR') self.data_context.current_runs = [[22725]] self.context.update_current_data() test_pair = MuonPair('test_pair', 'top', 'bottom', alpha=0.75) self.group_context.add_pair(pair=test_pair) self.presenter.update_view_from_model() expected_string_list = [ 'Instrument:MUSR', 'Run:22725', 'Title:FeTeSeT=1F=100', 'Comment:FCfirstsample', 'Start:2009-03-24T04:18:58', 'End:2009-03-24T04:56:26', 'Counts(MEv):20.076704', 'GoodFrames:88540', 'CountsperGoodFrame:226.753', 'CountsperGoodFrameperdet:3.543', 'AverageTemperature(K):2.53386', 'SampleTemperature(K):1.0', 'SampleMagneticField(G):100.0' ] self.assertEqual( str(self.view.run_info_box.toPlainText()).replace(' ', '').splitlines(), expected_string_list)
def runTest(self): UseCompatibilityMode() config['default.instrument'] = 'SANS2D' SANS2DTUBES() Set1D() Detector("rear-detector") # This contains two MASKFILE commands, each resulting in a separate call to MaskDetectors. MaskFile('SANS2DTube_ZerroErrorFreeTest.txt') # Saves a file which produces an output file which does not contain any zero errors csv_file = FileFinder.getFullPath( "SANS2DTUBES_ZeroErrorFree_batch.csv") save_alg = {"SaveNexus": "nxs"} BatchReduce(csv_file, 'nxs', saveAlgs=save_alg, plotresults=False, save_as_zero_error_free=True) DeleteWorkspace('zero_free_out_rear_1D_1.75_12.5') # The zero correction only occurs for the saved files. Stephen King mentioned that the # original workspaces should not be tampered with self._final_output = os.path.join( config['defaultsave.directory'], 'zero_free_out_rear_1D_1.75_12.5.nxs') self._final_workspace = 'ws' Load(Filename=self._final_output, OutputWorkspace=self._final_workspace)
def setUp(self): AnalysisDataService.clear() self.filepath = FileFinder.findRuns('EMU00019489.nxs')[0] self.load_result, self.run_number, self.filename = load_workspace_from_filename( self.filepath) self.loaded_data = MuonLoadData() self.data_context = MuonDataContext(self.loaded_data) self.gui_context = MuonGuiContext() self.group_pair_context = MuonGroupPairContext() self.gui_context.update({'RebinType': 'None'}) self.context = MuonContext(muon_data_context=self.data_context, muon_gui_context=self.gui_context, muon_group_context=self.group_pair_context) self.data_context.instrument = 'EMU' self.loaded_data.add_data(workspace=self.load_result, run=[self.run_number], filename=self.filename, instrument='EMU') self.data_context.current_runs = [[self.run_number]] self.data_context.update_current_data() self.group_pair_context.reset_group_and_pairs_to_default( self.load_result['OutputWorkspace'][0]._workspace, 'EMU', '')
def _run(self): '''Defines the workflow for the test''' self.tolerance = 1e-7 self.samples = [sample[:-4] for sample in self.samples] # Load files into Mantid for sample in self.samples: LoadNexus(sample, OutputWorkspace=sample) LoadNexus(FileFinder.getFullPath(self.resolution), OutputWorkspace=self.resolution) _, iqt_ws = TransformToIqt(SampleWorkspace=self.samples[0], ResolutionWorkspace=self.resolution, EnergyMin=self.e_min, EnergyMax=self.e_max, BinReductionFactor=self.num_bins, DryRun=False) # Test IqtFit Sequential iqtfitSeq_ws, params, fit_group = IqtFitSequential( InputWorkspace=iqt_ws, Function=self.func, StartX=self.startx, EndX=self.endx, SpecMin=0, SpecMax=self.spec_max) self.result_names = [iqt_ws.name(), iqtfitSeq_ws.name()] # Remove workspaces from Mantid for sample in self.samples: DeleteWorkspace(sample) DeleteWorkspace(params) DeleteWorkspace(fit_group) DeleteWorkspace(self.resolution)
def _create_peaks_workspace(self): """Create a dummy peaks workspace""" path = FileFinder.getFullPath( "IDFs_for_UNIT_TESTING/MINITOPAZ_Definition.xml") inst = LoadEmptyInstrument(Filename=path) ws = CreatePeaksWorkspace(inst, 0) DeleteWorkspace(inst) SetUB(ws, 1, 1, 1, 90, 90, 90) # Add a bunch of random peaks that happen to fall on the # detetor bank defined in the IDF center_q = np.array([-5.1302, 2.5651, 3.71809]) qs = [] for i in np.arange(0, 1, 0.1): for j in np.arange(-0.5, 0, 0.1): q = center_q.copy() q[1] += j q[2] += i qs.append(q) # Add the peaks to the PeaksWorkspace with dummy values for intensity, # Sigma, and HKL for q in qs: peak = ws.createPeak(q) peak.setIntensity(100) peak.setSigmaIntensity(10) peak.setHKL(1, 1, 1) ws.addPeak(peak) return ws
def setUp(self): # Store an empty widget to parent all the views, and ensure they are deleted correctly self.obj = QWidget() setup_context_for_tests(self) self.context.instrument = 'EMU' self.load_file_view = BrowseFileWidgetView(self.obj) self.load_run_view = LoadRunWidgetView(self.obj) self.load_file_model = BrowseFileWidgetModel(self.loaded_data, self.context) self.load_run_model = LoadRunWidgetModel(self.loaded_data, self.context) self.presenter = LoadWidgetPresenter( LoadWidgetView(parent=self.obj, load_file_view=self.load_file_view, load_run_view=self.load_run_view), LoadWidgetModel(self.loaded_data, self.context)) self.presenter.set_load_file_widget( BrowseFileWidgetPresenter(self.load_file_view, self.load_file_model)) self.presenter.set_load_run_widget( LoadRunWidgetPresenter(self.load_run_view, self.load_run_model)) self.filepath = FileFinder.findRuns('MUSR00022725.nxs')[0] self.load_patcher = mock.patch( 'mantidqtinterfaces.Muon.GUI.Common.load_file_widget.model.load_utils.load_workspace_from_filename' ) self.addCleanup(self.load_patcher.stop) self.load_mock = self.load_patcher.start() self.load_run_patcher = mock.patch( 'mantidqtinterfaces.Muon.GUI.Common.load_run_widget.load_run_model.load_utils.load_workspace_from_filename' ) self.addCleanup(self.load_run_patcher.stop) self.load_run_mock = self.load_run_patcher.start() self.mock_workspace = self.create_fake_workspace(1) self.mock_loading_from_browse(self.mock_workspace, r"C:\dir1\dir2\dir3\EMU0001234.nxs", 1234) file_utils.get_current_run_filename = mock.Mock( return_value=r"C:\dir1\dir2\dir3\EMU0001234.nxs") self.presenter.load_file_widget._view.warning_popup = mock.MagicMock() self.presenter.load_run_widget._view.warning_popup = mock.MagicMock() self.popup_patcher = mock.patch( 'mantidqtinterfaces.Muon.GUI.Common.thread_model.warning') self.addCleanup(self.popup_patcher.stop) self.popup_mock = self.popup_patcher.start() def setGroupAndPairsToEmptyList(grouping_context): grouping_context._groups = [] grouping_context._pairs = [] self.group_context.reset_group_and_pairs_to_default = mock.MagicMock( side_effect=setGroupAndPairsToEmptyList(self.group_context))
def find_full_file_path(file_name): """ Gets the full path of a file name if it is available on the Mantid paths. :param file_name: the name of the file. :return: the full file path. """ return FileFinder.getFullPath(file_name)
def cleanup(self): Files = ["TOPAZ_3132.hkl", "TOPAZ_3132FFT.hkl"] for file in Files: absfile = FileFinder.getFullPath(file) if os.path.exists(absfile): os.remove(absfile) return True
def _get_workspace(self, file_name): full_file_name = FileFinder.findRuns(file_name)[0] load_name = "Load" load_options = {"Filename": full_file_name, "OutputWorkspace": EMPTY_NAME} load_alg = create_unmanaged_algorithm(load_name, **load_options) load_alg.execute() return load_alg.getProperty("OutputWorkspace").value
def runTest(self): UseCompatibilityMode() SANS2D() MaskFile('MaskSANS2DReductionGUI.txt') SetEventSlices("0.0-451, 5-10") batch_file = FileFinder.getFullPath('sans2d_reduction_gui_batch.csv') BatchReduce(batch_file, '.nxs', saveAlgs={}, combineDet='rear')
def find_file(filename=None): """ Calculates path of filename with the testing data. Path is determined in the platform independent way. :param filename: name of file to find :return: full path for the file with the testing data """ from mantid.api import FileFinder return FileFinder.Instance().getFullPath(filename)
def do_cleanup(): Files = ["PG3_9829.gsa", "PG3_9829.py", "PG3_9830.gsa", "PG3_9830.py"] for file in Files: absfile = FileFinder.getFullPath(file) if os.path.exists(absfile): os.remove(absfile) return True
def do_cleanup(): Files = ["BioSANS_test_data_reduction.log", "BioSANS_test_data_Iq.xml", "BioSANS_test_data_Iq.txt", "BioSANS_test_data_Iqxy.dat"] for filename in Files: absfile = FileFinder.getFullPath(filename) if os.path.exists(absfile): os.remove(absfile) return True
def test_browse_clicked_suceeds_if_table_in_ADS(self): filename = FileFinder.findRuns('MUSR00015196.nxs')[0] self.view.show_file_browser_and_return_selection = mock.MagicMock(return_value=[filename]) self.model.check_dead_time_file_selection = mock.MagicMock(return_value=True) self.view.dead_time_browse_button.clicked.emit(True) self.assertEqual(self.view.dead_time_selector.currentIndex(), 2) self.view.warning_popup.assert_not_called() self.assertEqual(self.view.dead_time_file_selector.currentText(), 'MUSR00015196_deadTimes') self.gui_variable_observer.update.assert_called_once_with(self.gui_context.gui_variables_notifier, None)
def runTest(self): UseCompatibilityMode() SANS2D() Set1D() Detector("rear-detector") MaskFile('MASKSANS2Doptions.091A') Gravity(True) csv_file = FileFinder.getFullPath('SANS2D_multiPeriodTests.csv') BatchReduce(csv_file, 'nxs', saveAlgs={}) self.reduced = '5512_SANS2DBatch'
def runTest(self): UseCompatibilityMode() SANS2D() Set1D() Detector("rear-detector") MaskFile('MASKSANS2Doptions.091A') Gravity(True) csv_file = FileFinder.getFullPath('SANS2D_periodTests.csv') BatchReduce(csv_file, 'nxs', plotresults=False, saveAlgs={'SaveCanSAS1D': 'xml', 'SaveNexus': 'nxs'}) os.remove(os.path.join(config['defaultsave.directory'], '5512p7_SANS2DBatch.xml'))
def __init__(self): ISISIndirectInelasticConvFit.__init__(self) self.sample = 'osi97935_graphite002_red.nxs' self.resolution = FileFinder.getFullPath('osi97935_graphite002_res.nxs') #ConvFit fit function self.func = 'name=LinearBackground,A0=0,A1=0;(composite=Convolution,FixResolution=true,NumDeriv=true;'\ 'name=Resolution,Workspace=\"%s\";name=Lorentzian,Amplitude=2,PeakCentre=0,FWHM=0.05)' % self.resolution self.startx = -0.2 self.endx = 0.2 self.bg = 'Fit Linear' self.spectra_min = 0 self.spectra_max = 41 self.ties = False self.result_names = ['osi97935_graphite002_conv_1LFitL_s0_to_41_Result']
def _run(self): '''Defines the workflow for the test''' self.tolerance = 1e-4 LoadNexus(self.sample, OutputWorkspace=self.sample) LoadNexus(FileFinder.getFullPath(self.resolution), OutputWorkspace=self.resolution) convfitSeq_ws, params, fit_group = ConvolutionFitSequential(InputWorkspace=self.sample, Function=self.func, PassWSIndexToFunction=self.passWSIndexToFunction, StartX=self.startx, EndX=self.endx, SpecMin=self.spectra_min, SpecMax=self.spectra_max, PeakRadius=5) self.result_names = [convfitSeq_ws[0].name()]
def __init__(self): ISISIndirectInelasticConvFit.__init__(self) self.sample = 'irs53664_graphite002_red.nxs' self.resolution = FileFinder.getFullPath('irs53664_graphite002_res.nxs') #ConvFit fit function self.func = 'name=LinearBackground,A0=0.060623,A1=0.001343;(composite=Convolution,FixResolution=true,NumDeriv=true;'\ 'name=Resolution,Workspace=\"%s\";name=Lorentzian,Amplitude=1.033150,PeakCentre=-0.000841,FWHM=0.001576)'\ % (self.resolution) self.startx = -0.2 self.endx = 0.2 self.bg = 'Fit Linear' self.spectra_min = 0 self.spectra_max = 50 self.ties = False self.result_names = ['irs53664_graphite002_conv_1LFitL_s0_to_50_Result']
def find_sans_file(file_name): """ Finds a SANS file. The file can be specified as: 1. file.ext or path1 path2 file.ext 2. run number :param file_name: a file name or a run number. :return: the full path. """ full_path = find_full_file_path(file_name) if not full_path: runs = FileFinder.findRuns(file_name) if runs: full_path = runs[0] if not full_path: raise RuntimeError("Trying to find the SANS file {0}, but cannot find it. Make sure that " "the relevant paths are added.".format(file_name)) return full_path
def setUp(self): setup_context_for_tests(self) self.frequency_context = FrequencyContext(self.context) self.gui_variable_observer = Observer() self.gui_variable_observer.update = mock.MagicMock() self.gui_context.gui_variables_notifier.add_subscriber(self.gui_variable_observer) self.data_context.instrument = 'CHRONUS' self.gui_variable_observer = Observer() self.gui_variable_observer.update = mock.MagicMock() self.gui_context.gui_variables_notifier.add_subscriber(self.gui_variable_observer) filepath = FileFinder.findRuns('CHRONUS00003422.nxs')[0] load_result, run, filename = load_workspace_from_filename(filepath) self.loaded_data.add_data(workspace=load_result, run=[run], filename=filename, instrument='CHRONUS') self.data_context.current_runs = [[run]] self.context.update_current_data()
def do_cleanup(): Files = ["PG3_9829.getn", "PG3_9829.gsa", "PG3_9829.py", 'sum_PG3_9829.gsa', 'sum_PG3_9829.py', "PG3_9830.gsa", "PG3_9830.py", "PG3_4844-1.dat", "PG3_4844.getn", "PG3_4844.gsa", "PG3_4844.py", "PG3_4866.gsa"] for filename in Files: absfile = FileFinder.getFullPath(filename) if os.path.exists(absfile): os.remove(absfile) return True
def test_runinfo_correct(self): file_path = FileFinder.findRuns('MUSR00022725.nxs')[0] ws, run, filename = load_utils.load_workspace_from_filename(file_path) self.data_context._loaded_data.remove_data(run=run) self.data_context._loaded_data.add_data(run=[run], workspace=ws, filename=filename, instrument='MUSR') self.data_context.current_runs = [[22725]] self.context.update_current_data() test_pair = MuonPair('test_pair', 'top', 'bottom', alpha=0.75) self.group_context.add_pair(pair=test_pair) self.presenter.update_view_from_model() expected_string_list = ['Instrument:MUSR', 'Run:22725', 'Title:FeTeSeT=1F=100', 'Comment:FCfirstsample', 'Start:2009-03-24T04:18:58', 'End:2009-03-24T04:56:26', 'Counts(MEv):20.076704', 'GoodFrames:88540', 'CountsperGoodFrame:226.753', 'CountsperGoodFrameperdet:3.543', 'AverageTemperature(K):2.53386', 'SampleTemperature(K):1.0', 'SampleMagneticField(G):100.0'] self.assertEqual(str(self.view.run_info_box.toPlainText()).replace(' ', '').splitlines(), expected_string_list)
def runTest(self): UseCompatibilityMode() LOQ() Detector("main-detector-bank") csv_file = FileFinder.getFullPath('batch_input.csv') Set1D() MaskFile('MASK.094AA') Gravity(True) BatchReduce(csv_file, 'raw', plotresults=False, saveAlgs={'SaveCanSAS1D': 'xml', 'SaveNexus': 'nxs'}) LoadNexus(Filename='54433sans.nxs', OutputWorkspace='result') Plus(LHSWorkspace='result', RHSWorkspace='99630sanotrans', OutputWorkspace= 'result') os.remove(os.path.join(config['defaultsave.directory'],'54433sans.nxs')) os.remove(os.path.join(config['defaultsave.directory'],'99630sanotrans.nxs')) os.remove(os.path.join(config['defaultsave.directory'],'54433sans.xml')) os.remove(os.path.join(config['defaultsave.directory'],'99630sanotrans.xml'))
def runTest(self): UseCompatibilityMode() config['default.instrument'] = 'SANS2D' SANS2DTUBES() Set1D() Detector("rear-detector") # This contains two MASKFILE commands, each resulting in a seperate call to MaskDetectors. MaskFile('SANS2DTube_ZerroErrorFreeTest.txt') # Saves a file which produces an output file which does not contain any zero errors csv_file = FileFinder.getFullPath("SANS2DTUBES_ZeroErrorFree_batch.csv") save_alg = {"SaveNexus": "nxs"} BatchReduce(csv_file, 'nxs', saveAlgs=save_alg, plotresults=False, save_as_zero_error_free=True) DeleteWorkspace('zero_free_out') # The zero correction only occurs for the saved files. Stephen King mentioned that the # original workspaces should not be tampered with self._final_output = os.path.join(config['defaultsave.directory'], 'zero_free_out.nxs') self._final_workspace = 'ws' Load(Filename=self._final_output, OutputWorkspace=self._final_workspace)
def runTest(self): UseCompatibilityMode() config['default.instrument'] = 'SANS2D' SANS2D() Set1D() Detector("rear-detector") # This contains two MASKFILE commands, each resulting in a separate call to MaskDetectors. MaskFile('MaskSANS2DReductionGUI_MaskFiles.txt') Gravity(True) # This does 2 separate reductions of the same data, but saving the result of each to a different workspace. csv_file = FileFinder.getFullPath("SANS2D_mask_batch.csv") BatchReduce(csv_file, 'nxs', plotresults=False) path1 = os.path.join(config['defaultsave.directory'], 'iteration_1.xml') path2 = os.path.join(config['defaultsave.directory'], 'iteration_2.xml') if os.path.exists(path1): os.remove(path1) if os.path.exists(path2): os.remove(path2)
def setUp(self): AnalysisDataService.clear() self.filepath = FileFinder.findRuns('EMU00019489.nxs')[0] self.load_result, self.run_number, self.filename = load_workspace_from_filename(self.filepath) self.loaded_data = MuonLoadData() self.data_context = MuonDataContext(self.loaded_data) self.gui_context = MuonGuiContext() self.group_pair_context = MuonGroupPairContext() self.gui_context.update({'RebinType': 'None'}) self.context = MuonContext(muon_data_context=self.data_context, muon_gui_context=self.gui_context, muon_group_context=self.group_pair_context) self.data_context.instrument = 'EMU' self.loaded_data.add_data(workspace=self.load_result, run=[self.run_number], filename=self.filename, instrument='EMU') self.data_context.current_runs = [[self.run_number]] self.data_context.update_current_data() self.group_pair_context.reset_group_and_pairs_to_default(self.load_result['OutputWorkspace'][0]._workspace, 'EMU', '')
def setUp(self): self._qapp = mock_widget.mockQapp() # Store an empty widget to parent all the views, and ensure they are deleted correctly self.obj = QtGui.QWidget() setup_context_for_tests(self) self.context.instrument = 'EMU' self.load_file_view = BrowseFileWidgetView(self.obj) self.load_run_view = LoadRunWidgetView(self.obj) self.load_file_model = BrowseFileWidgetModel(self.loaded_data, self.context) self.load_run_model = LoadRunWidgetModel(self.loaded_data, self.context) self.presenter = LoadWidgetPresenter( LoadWidgetView(parent=self.obj, load_file_view=self.load_file_view, load_run_view=self.load_run_view), LoadWidgetModel(self.loaded_data, self.context)) self.presenter.set_load_file_widget(BrowseFileWidgetPresenter(self.load_file_view, self.load_file_model)) self.presenter.set_load_run_widget(LoadRunWidgetPresenter(self.load_run_view, self.load_run_model)) self.filepath = FileFinder.findRuns('MUSR00022725.nxs')[0] self.load_patcher = mock.patch('Muon.GUI.Common.load_file_widget.model.load_utils.load_workspace_from_filename') self.addCleanup(self.load_patcher.stop) self.load_mock = self.load_patcher.start() self.load_run_patcher = mock.patch( 'Muon.GUI.Common.load_run_widget.load_run_model.load_utils.load_workspace_from_filename') self.addCleanup(self.load_run_patcher.stop) self.load_run_mock = self.load_run_patcher.start() self.mock_workspace = self.create_fake_workspace(1) self.mock_loading_from_browse(self.mock_workspace, "C:\dir1\dir2\dir3\EMU0001234.nxs", 1234) file_utils.get_current_run_filename = mock.Mock(return_value="C:\dir1\dir2\dir3\EMU0001234.nxs") self.presenter.load_file_widget._view.warning_popup = mock.MagicMock() self.presenter.load_run_widget._view.warning_popup = mock.MagicMock() self.popup_patcher = mock.patch('Muon.GUI.Common.thread_model.warning') self.addCleanup(self.popup_patcher.stop) self.popup_mock = self.popup_patcher.start()
def findFile(self, instrument, runnumber): # start with run and check the five before it runIds = list(range(runnumber, runnumber-6, -1)) # check for one after as well runIds.append(runnumber + 1) runIds = [str(runId) for runId in runIds if runId > 0] # prepend non-empty instrument name for FileFinder if len(instrument) > 0: runIds = ['%s_%s' % (instrument, runId) for runId in runIds] # look for a file for runId in runIds: self.log().information("Looking for '%s'" % runId) try: return FileFinder.findRuns(runId)[0] except RuntimeError: pass # just keep looking # failed to find any is an error raise RuntimeError("Cannot find IPTS directory for '%s'" % runnumber)