def processData(self, filename, wsName): if filename != '': if self._SystemTest: Load(Filename=filename, OutputWorkspace=wsName, BankName = 'bank22') else: Load(Filename=filename, OutputWorkspace=wsName) FindDetectorsPar(InputWorkspace=wsName, ReturnLinearRanges=self._returnLinearRanges, ParFile=self._parFile, OutputParTable=self._outputParTable) FilterBadPulses(InputWorkspace=wsName, Outputworkspace=wsName, LowerCutoff=self._lowerCutoff) RemovePromptPulse(InputWorkspace=wsName, OutputWorkspace=wsName, Width=self._width, Frequency=self._frequency) LoadDiffCal(InputWorkspace=wsName, InstrumentName=self._instrumentName, InstrumentFilename=self._instrumentFilename, Filename=self._filename, MakeGroupingWorkspace=self._makeGroupingWorkspace, MakeCalWorkspace=self._makeCalWorkspace, MakeMaskWorkspace=self._makeMaskWorkspace, WorkspaceName=self._workspaceName, TofMin=self._tofMin, TofMax=self._tofMax, FixConversionIssues=self._fixConversionIssues) MaskDetectors(Workspace=wsName, SpectraList=self._spectraList, DetectorList=self._detectorList, WorkspaceIndexList=self._workspaceIndexList, MaskedWorkspace=self._maskedWorkspace, ForceInstrumentMasking=self._forceInstrumentMasking, StartWorkspaceIndex=self._startWorkspaceIndex, EndWorkspaceIndex=self._endWorkspaceIndex, ComponentList=self._componentList) AlignDetectors(InputWorkspace=wsName, OutputWorkspace=wsName, CalibrationFile=self._calibrationFile) ConvertUnits(InputWorkspace=wsName, OutputWorkspace=wsName, Target='Wavelength')
def _calibrate_runs_in_map(self, drange_map): for d_range, wrksp in drange_map.items(): normalised = NormaliseByCurrent( InputWorkspace=wrksp, OutputWorkspace="normalised_sample", StoreInADS=False, EnableLogging=False) aligned = AlignDetectors(InputWorkspace=normalised, CalibrationFile=self._cal, OutputWorkspace="aligned_sample", StoreInADS=False, EnableLogging=False) focussed = DiffractionFocussing(InputWorkspace=aligned, GroupingFileName=self._cal, OutputWorkspace="focussed_sample", StoreInADS=False, EnableLogging=False) drange_map[d_range] = CropWorkspace( InputWorkspace=focussed, XMin=d_range[0], XMax=d_range[1], OutputWorkspace="calibrated_sample", StoreInADS=False, EnableLogging=False)
def _alignAndFocus(self, params, calib, cal_File, group): # loading the ISAW detcal file will override the default instrument if calib == 'DetCal File': LoadIsawDetCal(InputWorkspace='WS', Filename=cal_File) if calib in ['Convert Units', 'DetCal File']: ConvertUnits(InputWorkspace='WS', Target='dSpacing', OutputWorkspace='WS_d') else: self.log().notice("\n calibration file : %s" % cal_File) AlignDetectors(InputWorkspace='WS', CalibrationFile=cal_File, Outputworkspace='WS_d') Rebin(InputWorkspace='WS_d', Params=params, Outputworkspace='WS_d') DiffractionFocussing(InputWorkspace='WS_d', GroupingWorkspace=group, PreserveEvents=False, OutputWorkspace='WS_red')
def PyExec(self): from mantid.simpleapi import Load, AlignDetectors, DiffractionFocussing # Load file to workspace _tmpws = Load(Filename=self.getPropertyValue("Filename")) # AlignDetectors calfile = self.getProperty("CalFilename").value _tmpws = AlignDetectors(InputWorkspace=_tmpws, CalibrationFile=calfile) # Focus _tmpws = DiffractionFocussing(InputWorkspace=_tmpws, GroupingFileName=calfile) # Store reference after algorithm has gone self.setProperty("OutputWorkspace", _tmpws) DeleteWorkspace(_tmpws)
def calibrator(d_range, workspace): normalised = NormaliseByCurrent(InputWorkspace=workspace, OutputWorkspace="normalised_sample", StoreInADS=False, EnableLogging=False) aligned = AlignDetectors(InputWorkspace=normalised, CalibrationFile=calibration_file, OutputWorkspace="aligned_sample", StoreInADS=False, EnableLogging=False) focussed = DiffractionFocussing(InputWorkspace=aligned, GroupingFileName=calibration_file, OutputWorkspace="focussed_sample", StoreInADS=False, EnableLogging=False) return CropWorkspace(InputWorkspace=focussed, XMin=d_range[0], XMax=d_range[1], OutputWorkspace="calibrated_sample", StoreInADS=False, EnableLogging=False)
def slice_focus_event_workspace(self, event_ws_name, geometry_calib_ws_name, group_ws_name, split_ws_name, info_ws_name, output_ws_base, binning_parameters, chop_overlap_mode, gsas_info_dict, gsas_writer, gsas_file_index_start, fullprof): """ Slice and diffraction focus event workspace with option to write the reduced data to GSAS file with SaveGSS(). Each workspace is 1. sliced from original event workspace 2. diffraction focused 3. optionally rebinned to IDL binning and read for SaveGSS() :param event_ws_name: name of EventWorkspace that has been masked if there is a mask :param geometry_calib_ws_name: DIFC calibration Table workspace :param group_ws_name: name of Grouping workspace :param split_ws_name: :param info_ws_name: :param output_ws_base: :param chop_overlap_mode: whether the chopped workspace will have overlapped events (in time) :param binning_parameters: None for IDL binning; otherwise, use defined binning :param gsas_info_dict: required for writing GSAS files keys (IPTS, 'parm file' = 'vulcan.prm', 'vanadium') :param gsas_writer: GSASWriter instance to export to VULCAN GSAS file :param gsas_file_index_start: starting index of GSAS file (1.gda, 2.gda.. whether 0.gda?) :param fullprof: Flag to write reduced data to Fullprof (along with GSAS) :return: tuple: [1] slicing information, [2] output workspace names """ # check inputs if binning_parameters is not None: datatypeutility.check_list('Binning parameters', binning_parameters) datatypeutility.check_dict('GSAS information', gsas_info_dict) # starting time t0 = time.time() # Align detectors: OpenMP AlignDetectors(InputWorkspace=event_ws_name, OutputWorkspace=event_ws_name, CalibrationWorkspace=geometry_calib_ws_name) t1 = time.time() # Filter events: OpenMP # is relative or not? TableWorkspace has to be relative! split_ws = mantid_helper.retrieve_workspace(split_ws_name, raise_if_not_exist=True) if split_ws.__class__.__name__.count('TableWorkspace'): is_relative_time = True else: is_relative_time = False result = FilterEvents(InputWorkspace=event_ws_name, SplitterWorkspace=split_ws_name, InformationWorkspace=info_ws_name, OutputWorkspaceBaseName=output_ws_base, FilterByPulseTime=False, GroupWorkspaces=True, OutputWorkspaceIndexedFrom1=True, SplitSampleLogs=True, RelativeTime=is_relative_time) # get output workspaces' names output_names = mantid_helper.get_filter_events_outputs(result) if output_names is None: raise RuntimeError( 'There is no workspace found in the result of FilterEvents (vulcan_slice_reduce)') t2 = time.time() # construct output GSAS names gsas_names = list() for index in range(len(output_names)): out_ws_name = output_names[index] if len(out_ws_name) == 0: gsas_name = '' else: gsas_name = out_ws_name + '_gsas_not_binned' gsas_names.append(gsas_name) # END-FOR # Now start to use multi-threading to diffraction focus the sliced event data num_outputs = len(output_names) number_ws_per_thread = int(num_outputs / self._number_threads) extra = num_outputs % self._number_threads print('[DB...IMPORTANT] Output workspace number = {0}, workspace per thread = {1}\n' 'Output workspaces names: {2}'.format(num_outputs, number_ws_per_thread, output_names)) thread_pool = dict() # create threads and start end_sliced_ws_index = 0 # exclusive last for thread_id in range(self._number_threads): start_sliced_ws_index = end_sliced_ws_index end_sliced_ws_index = min(start_sliced_ws_index + number_ws_per_thread + int(thread_id < extra), num_outputs) # call method self.focus_workspace_list() in multiple threading # Note: Tread(target=[method name], args=(method argument 0, method argument 1, ...,) workspace_names_i = output_names[start_sliced_ws_index:end_sliced_ws_index] gsas_workspace_name_list = gsas_names[start_sliced_ws_index:end_sliced_ws_index] thread_pool[thread_id] = threading.Thread(target=self.focus_workspace_list, args=(workspace_names_i, gsas_workspace_name_list, group_ws_name,)) thread_pool[thread_id].start() print('[DB] thread {0}: [{1}: {2}) ---> {3} workspaces'. format(thread_id, start_sliced_ws_index, end_sliced_ws_index, end_sliced_ws_index-start_sliced_ws_index)) # END-FOR # join the threads after the diffraction focus is finished for thread_id in range(self._number_threads): thread_pool[thread_id].join() # kill any if still alive for thread_id in range(self._number_threads): thread_i = thread_pool[thread_id] if thread_i is not None and thread_i.isAlive(): thread_i._Thread_stop() t3 = time.time() # process overlapping chop if chop_overlap_mode: # FIXME - Shan't be used anymore unless an optimized algorithm developed for DT option output_names = self.process_overlap_chopped_data(output_names) # END-IF # save ONE python script for future reference if len(output_names) > 0: python_name = os.path.join(self._output_dir, '{}_{}.py'.format(self._run_number, split_ws_name)) GeneratePythonScript(InputWorkspace=output_names[0], Filename=python_name) else: print('[ERROR] No output workspace to export to GSAS!') # write all the processed workspaces to GSAS: IPTS number and parm_file_name shall be passed run_date_time = vulcan_util.get_run_date(event_ws_name, '') self.write_to_gsas(output_names, ipts_number=gsas_info_dict['IPTS'], parm_file_name=gsas_info_dict['parm file'], vanadium_gda_name=gsas_info_dict['vanadium'], gsas_writer=gsas_writer, run_start_date=run_date_time, # ref_tof_sets=binning_parameters, gsas_file_index_start=gsas_file_index_start) if fullprof: output_dir = self._output_dir # FIXME TODO - TOMORROW 0 - Vanadium workspace for Fullprof? self.write_to_fullprof_files(output_names, None, output_dir) # TODO - TONIGHT 1 - put this section to a method # TODO FIXME - TODAY 0 -... Debug disable if True: pc_time0 = mantid_helper.get_workspace_property(event_ws_name, 'proton_charge').times[0] # user does not want to HDF5 in same directory. Need to write to a special directory if self._output_dir.startswith('/SNS/VULCAN/IPTS-'): # on the SNS server, do it in a different way output_dir = vulcan_util.generate_chopped_log_dir(self._output_dir, True) else: output_dir = self._output_dir self.export_split_logs(output_names, gsas_file_index_start=gsas_file_index_start, run_start_time=pc_time0, output_dir=output_dir) # END-IF # write to logs self.write_log_records(output_names, log_type='loadframe') tf = time.time() # processing time output process_info = '{0}: Runtime = {1} Total output workspaces = {2}' \ ''.format(event_ws_name, tf - t0, len(output_names)) process_info += 'Details for thread = {4}:\n\tLoading = {0}\n\tChopping = {1}\n\tFocusing = {2}\n\t' \ 'SaveGSS = {3}'.format(t1 - t0, t2 - t1, t3 - t2, tf - t3, self._number_threads) print('[INFO] {}'.format(process_info)) # FIXME - FUTURE - Whether this for-loop is useful? end_sliced_ws_index = 0 for thread_id in range(self._number_threads): start_sliced_ws_index = end_sliced_ws_index end_sliced_ws_index = min(start_sliced_ws_index + number_ws_per_thread + int(thread_id < extra), num_outputs) print('thread {0}: [{1}: {2}) ---> {3} workspaces' .format(thread_id, start_sliced_ws_index, end_sliced_ws_index, end_sliced_ws_index-start_sliced_ws_index)) return process_info, output_names