def save_mantid_nexus(workspace_name, file_name, title=''): """ save workspace to NeXus for Mantid to import :param workspace_name: :param file_name: :param title: :return: """ # check input checkdatatypes.check_file_name(file_name, check_exist=False, check_writable=True, is_dir=False) checkdatatypes.check_string_variable('Workspace title', title) # check workspace checkdatatypes.check_string_variable('Workspace name', workspace_name) if mtd.doesExist(workspace_name): SaveNexusProcessed(InputWorkspace=workspace_name, Filename=file_name, Title=title) else: raise RuntimeError( 'Workspace {0} does not exist in Analysis data service. Available ' 'workspaces are {1}.' ''.format(workspace_name, mtd.getObjectNames()))
def test_good_case(self): good_names = [self._wrk_name, self._wrk_name + "_Si", self._wrk_name + "_Si_total"] Abins(PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name) names = mtd.getObjectNames() # Builtin cmp has been removed in Python 3 def _cmp(a, b): return (a > b) - (a < b) self.assertAlmostEqual(0, _cmp(good_names, names))
def test_good_case(self): good_names = [self._wrk_name, self._wrk_name + "_Si", self._wrk_name + "_Si_total"] Abins(VibrationalOrPhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name) names = mtd.getObjectNames() # Builtin cmp has been removed in Python 3 def _cmp(a, b): return (a > b) - (a < b) self.assertAlmostEqual(0, _cmp(good_names, names))
def test_Workspace2D_with_include_monitors(self): from mantid.simpleapi import mtd mtd.clear() filename = MantidDataHelper.find_file("WISH00016748.raw") ds = mantidcompat.load(filename, mantid_args={"LoadMonitors": "Include"}) self.assertEqual(len(mtd), 0, mtd.getObjectNames()) attrs = ds.attrs.keys() expected_monitor_attrs = set( ["monitor1", "monitor2", "monitor3", "monitor4", "monitor5"]) assert expected_monitor_attrs.issubset(attrs) for monitor_name in expected_monitor_attrs: monitors = ds.attrs[monitor_name].values assert isinstance(monitors, sc.DataArray) assert monitors.shape == [4471]
def test_EventWorkspace_with_monitors(self): from mantid.simpleapi import mtd mtd.clear() ds = scn.load(scn.data.get_path("CNCS_51936_event.nxs"), mantid_args={ "LoadMonitors": True, "SpectrumMax": 1 }) self.assertEqual(len(mtd), 0, mtd.getObjectNames()) attrs = [str(key) for key in ds.attrs.keys()] expected_monitor_attrs = {"monitor2", "monitor3"} assert expected_monitor_attrs.issubset(attrs) for monitor_name in expected_monitor_attrs: monitor = ds.attrs[monitor_name].value assert isinstance(monitor, sc.DataArray) assert monitor.shape == [200001] self.check_monitor_metadata(monitor)
def test_Workspace2D_with_include_monitors(self): from mantid.simpleapi import mtd mtd.clear() # This test would use 20 GB of memory if "SpectrumMax" was not set ds = scn.load(scn.data.get_path("WISH00016748.raw"), mantid_args={ "LoadMonitors": "Include", "SpectrumMax": 100 }) self.assertEqual(len(mtd), 0, mtd.getObjectNames()) attrs = [str(key) for key in ds.attrs.keys()] expected_monitor_attrs = { "monitor1", "monitor2", "monitor3", "monitor4", "monitor5" } assert expected_monitor_attrs.issubset(attrs) for monitor_name in expected_monitor_attrs: monitor = ds.attrs[monitor_name].value assert isinstance(monitor, sc.DataArray) assert monitor.shape == [4471] self.check_monitor_metadata(monitor)
def test_fit_executes(self): """ Tests that the fit executes, and the outputs are moved into the dataset. Does not check the fit values. """ from mantid.simpleapi import Load, mtd mtd.clear() ws = Load(MantidDataHelper.find_file("iris26176_graphite002_sqw.nxs"), StoreInADS=False) fit_ds = sc.compat.mantid.fit(ws, 'name=LinearBackground,A0=0,A1=1', 0, 0, 3) # check that no workspaces have been leaked in the ADS self.assertEqual(len(mtd), 0, mtd.getObjectNames()) self.assertTrue("workspace" in fit_ds) self.assertTrue("normalised_covariance_matrix" in fit_ds) self.assertTrue("parameters" in fit_ds) self.assertTrue("cost_function" in fit_ds.attrs) self.assertTrue("function" in fit_ds.attrs) self.assertTrue("status" in fit_ds.attrs) self.assertTrue("chi2_over_DoF" in fit_ds.attrs)
def test_EventWorkspace_with_monitors(self): from mantid.simpleapi import mtd mtd.clear() filename = MantidDataHelper.find_file("CNCS_51936_event.nxs") ds = mantidcompat.load(filename, mantid_args={"LoadMonitors": True}) self.assertEqual(len(mtd), 0, mtd.getObjectNames()) attrs = ds.attrs.keys() expected_monitor_attrs = set(["monitor2", "monitor3"]) assert expected_monitor_attrs.issubset(attrs) for monitor_name in expected_monitor_attrs: monitor = ds.attrs[monitor_name].value assert isinstance(monitor, sc.DataArray) assert monitor.shape == [200001] assert 'position' in monitor.coords assert 'source_position' in monitor.coords # This is essential, otherwise unit conversion assumes scattering # from sample: assert 'sample_position' not in monitor.coords # Absence of the following is not crucial, but currently there is # no need for these, and it avoid duplication: assert 'detector_info' not in monitor.coords assert 'run' not in monitor.attrs assert 'sample' not in monitor.attrs
def int3samples(runs, name, masks, binning='0.5, 0.05, 8.0'): """ Finds the polarisation versus wavelength for a set of detector tubes. Parameters ---------- runs: list of RunData objects The runs whose polarisation we are interested in. name: string The name of this set of runs masks: list of string The file names of the masks for the sequential tubes that are being used for the SEMSANS measurements. binning: string The binning values to use for the wavelength bins. The default value is '0.5, 0.025, 10.0' """ for tube, _ in enumerate(masks): for i in [1, 2]: final_state = "{}_{}_{}".format(name, tube, i) if final_state in mtd.getObjectNames(): DeleteWorkspace(final_state) for rnum in runs: w1 = Load(BASE.format(rnum.number), LoadMonitors=True) w1mon = ExtractSingleSpectrum('w1_monitors', 0) w1 = ConvertUnits('w1', 'Wavelength', AlignBins=1) w1mon = ConvertUnits(w1mon, 'Wavelength') w1 = Rebin(w1, binning, PreserveEvents=False) w1mon = Rebin(w1mon, binning) w1 = w1 / w1mon for tube, mask in enumerate(masks): Mask_Tube = LoadMask('LARMOR', mask) w1temp = CloneWorkspace(w1) MaskDetectors(w1temp, MaskedWorkspace="Mask_Tube") Tube_Sum = SumSpectra(w1temp) for i in [1, 2]: final_state = "{}_{}_{}".format(name, tube, i) if final_state in mtd.getObjectNames(): mtd[final_state] += mtd["Tube_Sum_{}".format(i)] else: mtd[final_state] = mtd["Tube_Sum_{}".format(i)] x = mtd["{}_0_1".format(name)].extractX()[0] dx = (x[1:] + x[:-1]) / 2 pols = [] for run in runs: he_stat = he3_stats(run) start = (run.start-he_stat.dt).seconds/3600/he_stat.t1 end = (run.end-he_stat.dt).seconds/3600/he_stat.t1 for time in np.linspace(start, end, 10): temp = he3pol(he_stat.scale, time)(dx) pols.append(temp) wpol = CreateWorkspace(x, np.mean(pols, axis=0), # and the blank UnitX="Wavelength", YUnitLabel="Counts") for tube, _ in enumerate(masks): up = mtd["{}_{}_2".format(name, tube)] dn = mtd["{}_{}_1".format(name, tube)] pol = (up - dn) / (up + dn) pol /= wpol DeleteWorkspaces(["{}_{}_{}".format(name, tube, i) for i in range(1, 3)]) RenameWorkspace("pol", OutputWorkspace="{}_{}".format(name, tube)) DeleteWorkspaces(["Tube_Sum_1", "Tube_Sum_2"]) GroupWorkspaces(["{}_{}".format(name, tube) for tube, _ in enumerate(masks) for i in range(1, 3)], OutputWorkspace=str(name))
def cleanup(self): # Delete all workspaces for ws in mtd.getObjectNames(): DeleteWorkspace(Workspace=ws)
def int3samples(runs, name, masks, binning='0.5, 0.05, 8.0'): """ Finds the polarisation versus wavelength for a set of detector tubes. Parameters ---------- runs: list of RunData objects The runs whose polarisation we are interested in. name: string The name of this set of runs masks: list of string The file names of the masks for the sequential tubes that are being used for the SEMSANS measurements. binning: string The binning values to use for the wavelength bins. The default value is '0.5, 0.025, 10.0' """ for tube, _ in enumerate(masks): for i in [1, 2]: final_state = "{}_{}_{}".format(name, tube, i) if final_state in mtd.getObjectNames(): DeleteWorkspace(final_state) for rnum in runs: w1 = Load(BASE.format(rnum.number), LoadMonitors=True) w1mon = ExtractSingleSpectrum('w1_monitors', 0) w1 = ConvertUnits('w1', 'Wavelength', AlignBins=1) w1mon = ConvertUnits(w1mon, 'Wavelength') w1 = Rebin(w1, binning, PreserveEvents=False) w1mon = Rebin(w1mon, binning) w1 = w1 / w1mon for tube, mask in enumerate(masks): Mask_Tube = LoadMask('LARMOR', mask) w1temp = CloneWorkspace(w1) MaskDetectors(w1temp, MaskedWorkspace="Mask_Tube") Tube_Sum = SumSpectra(w1temp) for i in [1, 2]: final_state = "{}_{}_{}".format(name, tube, i) if final_state in mtd.getObjectNames(): mtd[final_state] += mtd["Tube_Sum_{}".format(i)] else: mtd[final_state] = mtd["Tube_Sum_{}".format(i)] x = mtd["{}_0_1".format(name)].extractX()[0] dx = (x[1:] + x[:-1]) / 2 pols = [] for run in runs: he_stat = he3_stats(run) start = (run.start - he_stat.dt).seconds / 3600 / he_stat.t1 end = (run.end - he_stat.dt).seconds / 3600 / he_stat.t1 for time in np.linspace(start, end, 10): temp = he3pol(he_stat.scale, time)(dx) pols.append(temp) wpol = CreateWorkspace( x, np.mean(pols, axis=0), # and the blank UnitX="Wavelength", YUnitLabel="Counts") for tube, _ in enumerate(masks): up = mtd["{}_{}_2".format(name, tube)] dn = mtd["{}_{}_1".format(name, tube)] pol = (up - dn) / (up + dn) pol /= wpol DeleteWorkspaces( ["{}_{}_{}".format(name, tube, i) for i in range(1, 3)]) RenameWorkspace("pol", OutputWorkspace="{}_{}".format(name, tube)) DeleteWorkspaces(["Tube_Sum_1", "Tube_Sum_2"]) GroupWorkspaces([ "{}_{}".format(name, tube) for tube, _ in enumerate(masks) for i in range(1, 3) ], OutputWorkspace=str(name))
def run(self, sample): """ Run the export algorithms on a sample. For each export algorithm, the function will try to validate the criteria (using _validCriteria()) on the output workspace that corresponds to the sample. If the criteria are valid, the export will be run on all workspaces whose name contains the sample name. Args: sample (DrillSample): sample to be exported """ exportPath = config.getString("defaultsave.directory") if not exportPath: logger.warning("Default save directory is not defined. Please " "specify one in the data directories dialog to " "enable exports.") return workspaceName = sample.getOutputName() try: outputWs = mtd[workspaceName] if isinstance(outputWs, WorkspaceGroup): names = outputWs.getNames() outputWs = names[0] else: outputWs = workspaceName except: return tasks = list() for algo,active in self._exportAlgorithms.items(): if not active: continue if not self._validCriteria(outputWs, algo): logger.notice("Export of sample {} with {} was skipped " "because workspaces are not compatible." .format(outputWs, algo)) continue for wsName in mtd.getObjectNames(contain=workspaceName): if isinstance(mtd[wsName], WorkspaceGroup): continue filename = os.path.join( exportPath, wsName + RundexSettings.EXPORT_ALGO_EXTENSION[algo]) name = wsName + ":" + filename if wsName not in self._exports: self._exports[wsName] = set() self._exports[wsName].add(filename) kwargs = {} if 'Ascii' in algo: log_list = mtd[wsName].getInstrument().getStringParameter('log_list_to_save') if log_list: log_list = log_list[0].split(',') kwargs['LogList'] = [log.strip() for log in log_list] # removes white spaces if 'Reflectometry' in algo: kwargs['WriteHeader'] = True kwargs['FileExtension'] = 'custom' else: kwargs['WriteXError'] = True task = DrillTask(name, algo, InputWorkspace=wsName, FileName=filename, **kwargs) task.addSuccessCallback(lambda wsName=wsName, filename=filename: self._onTaskSuccess(wsName, filename)) task.addErrorCallback(lambda msg, wsName=wsName, filename=filename: self._onTaskError(wsName, filename, msg)) tasks.append(task) self._pool.addProcesses(tasks)
def __del__(self): if len(mtd.getObjectNames()) > 0: raise RuntimeError('Workspaces are not cleaned')
def execute(self, source=None, hidden=False, interactive=False): """ Override super's execute() in order to emit customized signals to main application Parameters ---------- source hidden interactive Returns ------- """ # record previous information: commened out for more test if self._mainApplication is not None: prev_workspace_names = set(mtd.getObjectNames()) else: prev_workspace_names = None # interpret command: command is in self.input_buffer script = str(self.input_buffer).strip() # convert previous command "Run: vbin, ipts=18420, runs=139148, tag='C', output='\tmp'" to a property command if script.startswith('"Run: '): # strip "Run: and " away script = script.split('Run: ')[1] if script[-1] == '"': script = script[:-1] elif script.startswith('Run: '): # strip Run: away script = script.split('Run: ')[1] # main application is workspace viewer is_reserved = False if self._mainApplication.is_reserved_command(script): # reserved command: main application executes the command and return the message is_reserved = True # call main app/parent to execute the reserved command *** exec_message = self._mainApplication.execute_reserved_command( script) # create a fake command for IPython console (a do-nothing string) script_transformed = script[:] script_transformed = script_transformed.replace('"', "'") source = '\"Run: %s\"' % script_transformed else: exec_message = None # call base class to execute super(RichIPythonWidget, self).execute(source, hidden, interactive) # result message: append plain text to the console if is_reserved: # print('[DB...BAT] Append Plain Text To Console: {}'.format( exec_message)) self._append_plain_text('\n%s\n' % exec_message) # update workspaces for inline workspace operation if self._mainApplication is not None: post_workspace_names = set(mtd.getObjectNames()) diff_set = post_workspace_names - prev_workspace_names self._mainApplication.process_workspace_change(diff_set) return