def merge_reflectivity(reduction_list, xs, q_min=0.001, q_step=-0.01): """ Combine the workspaces for a given cross-section into a single workspace. TODO: trim workspaces trim_first = [item.cross_sections[pol_state].configuration.cut_first_n_points for item in self.data_manager.reduction_list] trim_last = [item.cross_sections[pol_state].configuration.cut_last_n_points for item in self.data_manager.reduction_list] """ ws_list = [] scaling_factors = [] q_max = q_min for i in range(len(reduction_list)): # If we couldn't calculate the reflectivity, we won't have a workspace available if reduction_list[i].cross_sections[xs].reflectivity_workspace is None: continue _, _q_max = reduction_list[i].get_q_range() q_max = max(q_max, _q_max) ws_name = str( reduction_list[i].cross_sections[xs].reflectivity_workspace) # Stitch1DMany only scales workspaces relative to the first one if i == 0: api.Scale(InputWorkspace=ws_name, OutputWorkspace=ws_name + '_histo', factor=reduction_list[i].cross_sections[xs]. configuration.scaling_factor, Operation='Multiply') api.ConvertToHistogram(InputWorkspace=ws_name + '_histo', OutputWorkspace=ws_name + '_histo') else: scaling_factors.append(reduction_list[i].cross_sections[xs]. configuration.scaling_factor) api.ConvertToHistogram(InputWorkspace=ws_name, OutputWorkspace=ws_name + '_histo') ws_list.append(ws_name + '_histo') params = "%s, %s, %s" % (q_min, q_step, q_max) if len(ws_list) > 1: merged_ws, _ = api.Stitch1DMany(InputWorkspaces=ws_list, Params=params, UseManualScaleFactors=True, ManualScaleFactors=scaling_factors, OutputWorkspace=ws_name + "_merged") elif len(ws_list) == 1: merged_ws = api.CloneWorkspace(ws_list[0], OutputWorkspace=ws_name + "_merged") else: return None # Remove temporary workspaces for ws in ws_list: api.DeleteWorkspace(ws) api.SaveAscii(InputWorkspace=merged_ws, Filename="/tmp/test.txt") return merged_ws
def stitch_reflectivity(reduction_list, xs=None, normalize_to_unity=True, q_cutoff=0.01): """ Stitch and normalize data sets :param string xs: name of the cross-section to use :param bool normalize_to_unity: if True, the specular ridge will be normalized to 1 """ if not reduction_list: return [] # Select the cross-section we will use to determine the scaling factors if xs is None: xs = reduction_list[0].cross_sections.keys()[0] # First, determine the overall scaling factor as needed scaling_factor = 1.0 if normalize_to_unity: idx_list = reduction_list[0].cross_sections[xs].q < q_cutoff total = 0 weights = 0 for i in range(len(reduction_list[0].cross_sections[xs]._r)): if idx_list[i]: w = 1.0 / float(reduction_list[0].cross_sections[xs]._dr[i])**2 total += w * float(reduction_list[0].cross_sections[xs]._r[i]) weights += w if weights > 0 and total > 0: scaling_factor = weights / total reduction_list[0].set_parameter("scaling_factor", scaling_factor) else: scaling_factor = reduction_list[0].cross_sections[ xs].configuration.scaling_factor # Stitch the data sets together _previous_ws = None running_scale = scaling_factor scaling_factors = [running_scale] for i in range(len(reduction_list)): n_total = len(reduction_list[i].cross_sections[xs].q) p_0 = reduction_list[i].cross_sections[ xs].configuration.cut_first_n_points p_n = n_total - reduction_list[i].cross_sections[ xs].configuration.cut_last_n_points ws = api.CreateWorkspace( DataX=reduction_list[i].cross_sections[xs].q[p_0:p_n], DataY=reduction_list[i].cross_sections[xs]._r[p_0:p_n], DataE=reduction_list[i].cross_sections[xs]._dr[p_0:p_n]) ws.setDistribution(True) ws = api.ConvertToHistogram(ws) if _previous_ws is not None: _, scale = api.Stitch1D(_previous_ws, ws) running_scale *= scale scaling_factors.append(running_scale) reduction_list[i].set_parameter("scaling_factor", running_scale) _previous_ws = api.CloneWorkspace(ws) return scaling_factors
def merge_workspaces(run, workspaces): """ where workspaces is a tuple of form: (filepath, ws name) """ d_string = "{}; Detector {}" # detectors is a dictionary of {detector_name : [names_of_workspaces]} detectors = {d_string.format(run, x): [] for x in range(1, 5)} # fill dictionary for workspace in workspaces: detector_number = get_detector_num_from_ws(workspace) detectors[d_string.format(run, detector_number)].append(workspace) # initialise a group workspace tmp = mantid.CreateSampleWorkspace() overall_ws = mantid.GroupWorkspaces(tmp, OutputWorkspace=str(run)) # merge each workspace list in detectors into a single workspace for detector, workspace_list in iteritems(detectors): if workspace_list: # sort workspace list according to type_index sorted_workspace_list = [None] * num_files_per_detector # sort workspace list according to type_index for workspace in workspace_list: data_type = workspace.rsplit("_")[1] sorted_workspace_list[spectrum_index[data_type] - 1] = workspace workspace_list = sorted_workspace_list # create merged workspace merged_ws = create_merged_workspace(workspace_list) # add merged ws to ADS mantid.mtd.add(detector, merged_ws) mantid.ConvertToHistogram(InputWorkspace=detector, OutputWorkspace=detector) overall_ws.add(detector) mantid.AnalysisDataService.remove("tmp") # return list of [run; Detector detectorNumber], in ascending order of detector number detector_list = sorted(list(detectors)) return detector_list
def save_mantid_gsas(gsas_ws_name, gda_file_name, binning_parameters): """ Save temporary GSAS file :param gsas_ws_name: :param gda_file_name: :param binning_parameters: :return: """ temp1_ws = ADS.retrieve(gsas_ws_name) print('[DB...BAT] Before aligned {0}.. vec x: {1}... is histograms? {2}' ''.format(type(temp1_ws), temp1_ws.readX(2)[0], temp1_ws.isHistogramData())) aligned_gss_ws_name = '{0}_temp'.format(gsas_ws_name) if isinstance(binning_parameters, numpy.ndarray): # align to VDRIVE align_to_vdrive_bin(gsas_ws_name, binning_parameters, aligned_gss_ws_name) elif binning_parameters is not None: api.Rebin(InputWorkspace=gsas_ws_name, OutputWorkspace=aligned_gss_ws_name, Params=binning_parameters) # END-IF (rebin) aws = ADS.retrieve(aligned_gss_ws_name) print('[DB...INFO] Save Mantid GSS: {} is histogram: {}'.format( aligned_gss_ws_name, aws.isHistogramData())) # Convert from PointData to Histogram api.ConvertToHistogram(InputWorkspace=aligned_gss_ws_name, OutputWorkspace=aligned_gss_ws_name) # Save print('[DB...VERY IMPORTANT] Save to GSAS File {0} as a temporary output'. format(gda_file_name)) curr_ws = ADS.retrieve(aligned_gss_ws_name) print( '[DB...INFO] Into SaveGSS: number of histograms = {}, Bank 1/2 size = {}, Bank3 size = {}' ''.format(curr_ws.getNumberHistograms(), len(curr_ws.readX(0)), len(curr_ws.readX(2)))) print('[DB...INFO] B1[0] = {}, B1[-1] = {}, B3[0] = {}, B3[-1] = {}' ''.format( curr_ws.readX(0)[0], curr_ws.readX(0)[-1], curr_ws.readX(2)[0], curr_ws.readX(2)[-1])) api.SaveGSS(InputWorkspace=aligned_gss_ws_name, Filename=gda_file_name, SplitFiles=False, Append=False, Format="SLOG", MultiplyByBinWidth=False, ExtendedHeader=False, UseSpectrumNumberAsBankID=True) return gda_file_name
def _saveGSAS(self, gsaws, gdafilename): """ Save file """ # Convert from PointData to Histogram gsaws = api.ConvertToHistogram(InputWorkspace=gsaws, OutputWorkspace=str(gsaws)) # Save api.SaveGSS(InputWorkspace=gsaws, Filename=gdafilename, SplitFiles=False, Append=False,\ Format="SLOG", MultiplyByBinWidth=False, ExtendedHeader=False, UseSpectrumNumberAsBankID=True) return gsaws
def attenuate_workspace(attenuation_file_path, ws_to_correct): original_units = ws_to_correct.getAxis(0).getUnit().unitID() wc_attenuated = mantid.PearlMCAbsorption(attenuation_file_path) wc_attenuated = mantid.ConvertToHistogram(InputWorkspace=wc_attenuated, OutputWorkspace=wc_attenuated) ws_to_correct = mantid.ConvertUnits(InputWorkspace=ws_to_correct, OutputWorkspace=ws_to_correct, Target=wc_attenuated.getAxis(0).getUnit().unitID()) wc_attenuated = mantid.RebinToWorkspace(WorkspaceToRebin=wc_attenuated, WorkspaceToMatch=ws_to_correct, OutputWorkspace=wc_attenuated) pearl_attenuated_ws = mantid.Divide(LHSWorkspace=ws_to_correct, RHSWorkspace=wc_attenuated) common.remove_intermediate_workspace(workspaces=wc_attenuated) pearl_attenuated_ws = mantid.ConvertUnits(InputWorkspace=pearl_attenuated_ws, OutputWorkspace=pearl_attenuated_ws, Target=original_units) return pearl_attenuated_ws
def _run_attenuate_workspace(self, input_workspace): if self._old_atten_file is None: # For old API support attenuation_path = self._attenuation_full_path else: attenuation_path = self._old_atten_file wc_attenuated = mantid.PearlMCAbsorption(attenuation_path) wc_attenuated = mantid.ConvertToHistogram(InputWorkspace=wc_attenuated, OutputWorkspace=wc_attenuated) wc_attenuated = mantid.RebinToWorkspace(WorkspaceToRebin=wc_attenuated, WorkspaceToMatch=input_workspace, OutputWorkspace=wc_attenuated) pearl_attenuated_ws = mantid.Divide(LHSWorkspace=input_workspace, RHSWorkspace=wc_attenuated) common.remove_intermediate_workspace(workspace_name=wc_attenuated) return pearl_attenuated_ws
def _prepare_workspace_for_stitching(cross_section, ws_name): """ Create a workspace from a CrossSectionData object that we can call Stitch1D on. :param CrossSectionData cross_section: cross section data object """ n_total = len(cross_section.q) p_0 = cross_section.configuration.cut_first_n_points p_n = n_total - cross_section.configuration.cut_last_n_points ws = api.CreateWorkspace(DataX=cross_section.q[p_0:p_n], DataY=cross_section._r[p_0:p_n], DataE=cross_section._dr[p_0:p_n], OutputWorkspace=ws_name) ws.setDistribution(True) ws = api.ConvertToHistogram(ws, OutputWorkspace=ws_name) return ws
def get_bragg_data(self, ws_group_name, bank_id, x_unit): """ Get Bragg diffraction data of 1 bank Args: ws_group_name bank_id: x_unit: Returns: 3-tuple of numpy 1D array for X, Y and E """ # check assert isinstance(bank_id, int) and bank_id > 0 msg = 'Workspace groups {} does not exist in controller.'.format( ws_group_name) msg += 'Current existing are {}.'.format(self._braggDataDict.keys()) assert ws_group_name in self._braggDataDict, msg ws_name = '%s_bank%d' % (ws_group_name.split('_group')[0], bank_id) error_message = 'Bank %d is not found in group %s. Available bank IDs are %s.' % ( bank_id, ws_group_name, str(self._braggDataDict[ws_group_name][1])) assert ws_name in self._braggDataDict[ws_group_name][1], error_message # FIXME - It is quite messy here! Using dictionary or forming workspace name? # construct bank workspace name # ws_name = self._braggDataDict[ws_group_name][1][bank_id] assert AnalysisDataService.doesExist( ws_name), 'Workspace %s does not exist.' % ws_name # convert units if necessary bank_ws = AnalysisDataService.retrieve(ws_name) curr_unit = bank_ws.getAxis(0).getUnit().unitID() if curr_unit != x_unit: simpleapi.ConvertToHistogram(InputWorkspace=ws_name, OutputWorkspace=ws_name) simpleapi.ConvertUnits(InputWorkspace=ws_name, OutputWorkspace=ws_name, Target=x_unit, EMode='Elastic') # convert to point data for plotting simpleapi.ConvertToPointData(InputWorkspace=ws_name, OutputWorkspace=ws_name) # get workspace bank_ws = AnalysisDataService.retrieve(ws_name) return bank_ws.readX(0), bank_ws.readY(0), bank_ws.readE(0)
def get_bragg_data(self, ws_name, wkspindex, x_unit): """ Get Bragg diffraction data of 1 bank """ # check assert isinstance(wkspindex, int) and wkspindex >= 0 bank_ws = addie.utilities.workspaces.get_ws(ws_name) # convert units if necessary curr_unit = bank_ws.getAxis(0).getUnit().unitID() if curr_unit != x_unit: simpleapi.ConvertToHistogram(InputWorkspace=ws_name, OutputWorkspace=ws_name) simpleapi.ConvertUnits(InputWorkspace=ws_name, OutputWorkspace=ws_name, Target=x_unit, EMode='Elastic') return addie.utilities.workspaces.get_ws_data(ws_name, wkspindex)