def create_solid_angle_corrections(self, vanadium, run_details): """ Creates the solid angle corrections from a vanadium run, only applicable on HRPD otherwise return None :param vanadium: The vanadium used to create this :param run_details: the run details of to use """ settings = self._inst_settings if not settings.do_solid_angle: return solid_angle = mantid.SolidAngle(InputWorkspace=vanadium) solid_angle = mantid.Scale(InputWorkspace=solid_angle, Factor=100, Operation='Multiply') eff = mantid.Divide(LHSWorkspace=vanadium, RHSWorkspace=solid_angle) eff = mantid.ConvertUnits(InputWorkspace=eff, Target='Wavelength') integration_range = settings.eff_integration_range # use full range if no range is supplied integration_range = integration_range if integration_range is not None else (None, None) eff = mantid.Integration(InputWorkspace=eff, RangeLower=integration_range[0], RangeUpper=integration_range[1]) correction = mantid.Multiply(LHSWorkspace=solid_angle, RHSWorkspace=eff) correction = mantid.Scale(InputWorkspace=correction, Factor=1e-5, Operation='Multiply') name = "sac" + common.generate_splined_name(run_details.run_number, []) path = run_details.van_paths mantid.SaveNexus(InputWorkspace=correction, Filename=os.path.join(path, name)) common.remove_intermediate_workspace(eff) common.remove_intermediate_workspace(correction)
def calculate_scaled_hab_output(self, shift, scale, sample_count_secondary, sample_norm_secondary, can_count_secondary, can_norm_secondary): scaled_norm_front = mantid_api.Scale( InputWorkspace=sample_norm_secondary, Factor=1.0 / scale, Operation='Multiply', StoreInADS=False) shifted_norm_front = mantid_api.Scale( InputWorkspace=sample_norm_secondary, Factor=shift, Operation='Multiply', StoreInADS=False) numerator = mantid_api.Plus(LHSWorkspace=sample_count_secondary, RHSWorkspace=shifted_norm_front, StoreInADS=False) hab_sample = mantid_api.Divide(LHSWorkspace=numerator, RHSWorkspace=scaled_norm_front, StoreInADS=False) if can_count_secondary is not None and can_norm_secondary is not None: scaled_norm_front_can = mantid_api.Scale( InputWorkspace=can_norm_secondary, Factor=1.0 / scale, Operation='Multiply', StoreInADS=False) hab_can = mantid_api.Divide(LHSWorkspace=can_count_secondary, RHSWorkspace=scaled_norm_front_can, StoreInADS=False) hab_sample = mantid_api.Minus(LHSWorkspace=hab_sample, RHSWorkspace=hab_can, StoreInADS=False) return hab_sample else: return hab_sample
def subtract_summed_runs(ws_to_correct, empty_sample_ws_string, instrument, scale_factor=None): """ Loads the list of empty runs specified by the empty_sample_ws_string and subtracts them from the workspace specified. Returns the subtracted workspace. :param ws_to_correct: The workspace to subtract the empty instrument runs from :param empty_sample_ws_string: The empty run numbers to subtract from the workspace :param instrument: The instrument object these runs belong to :param scale_factor: The percentage to scale the loaded runs by :return: The workspace with the empty runs subtracted """ # Skip this step if an empty string was not specified # or if the workspace has no current, as subtracting empty would give us negative counts if empty_sample_ws_string is None or not workspace_has_current(ws_to_correct): return ws_to_correct empty_sample = load_current_normalised_ws_list(run_number_string=empty_sample_ws_string, instrument=instrument, input_batching=INPUT_BATCHING.Summed) empty_sample = empty_sample[0] if scale_factor: empty_sample = mantid.Scale(InputWorkspace=empty_sample, OutputWorkspace=empty_sample, Factor=scale_factor, Operation="Multiply") try: mantid.Minus(LHSWorkspace=ws_to_correct, RHSWorkspace=empty_sample, OutputWorkspace=ws_to_correct) except ValueError: raise ValueError("The empty run(s) specified for this file do not have matching binning. Do the TOF windows of" " the empty and sample match?") remove_intermediate_workspace(empty_sample) return ws_to_correct
def subtract_summed_runs(ws_to_correct, empty_sample_ws_string, instrument, scale_factor=None): """ Loads the list of empty runs specified by the empty_sample_ws_string and subtracts them from the workspace specified. Returns the subtracted workspace. :param ws_to_correct: The workspace to subtract the empty instrument runs from :param empty_sample_ws_string: The empty run numbers to subtract from the workspace :param instrument: The instrument object these runs belong to :param scale_factor: The percentage to scale the loaded runs by :return: The workspace with the empty runs subtracted """ # If an empty string was not specified just return to skip this step if empty_sample_ws_string is None: return ws_to_correct empty_sample = load_current_normalised_ws_list( run_number_string=empty_sample_ws_string, instrument=instrument, input_batching=INPUT_BATCHING.Summed) empty_sample = empty_sample[0] if scale_factor: empty_sample = mantid.Scale(InputWorkspace=empty_sample, OutputWorkspace=empty_sample, Factor=scale_factor, Operation="Multiply") mantid.Minus(LHSWorkspace=ws_to_correct, RHSWorkspace=empty_sample, OutputWorkspace=ws_to_correct) remove_intermediate_workspace(empty_sample) return ws_to_correct
def _calibData(self, sam_ws, mon_ws): sapi.MaskDetectors(Workspace=sam_ws, DetectorList=self._dMask) sapi.ModeratorTzeroLinear(InputWorkspace=sam_ws, OutputWorkspace=sam_ws) sapi.LoadParameterFile(Workspace=sam_ws, Filename=pjoin(DEFAULT_CONFIG_DIR, self._reflection["parameter_file"])) sapi.ConvertUnits(InputWorkspace=sam_ws, OutputWorkspace=sam_ws, Target='Wavelength', EMode='Indirect') if self._MonNorm: sapi.ModeratorTzeroLinear(InputWorkspace=mon_ws, OutputWorkspace=mon_ws) sapi.Rebin(InputWorkspace=mon_ws, OutputWorkspace=mon_ws, Params='10') sapi.ConvertUnits(InputWorkspace=mon_ws, OutputWorkspace=mon_ws, Target='Wavelength') sapi.OneMinusExponentialCor(InputWorkspace=mon_ws, OutputWorkspace=mon_ws, C='0.20749999999999999', C1='0.001276') sapi.Scale(InputWorkspace=mon_ws, OutputWorkspace=mon_ws, Factor='1e-06') sapi.RebinToWorkspace(WorkspaceToRebin=sam_ws, WorkspaceToMatch=mon_ws, OutputWorkspace=sam_ws) sapi.Divide(LHSWorkspace=sam_ws, RHSWorkspace=mon_ws, OutputWorkspace=sam_ws)
def _sum_groups_of_three_ws(calibrated_spectra, output_file_names): workspace_list = [] output_list = [] for outer_loop_count in range(0, 3): # First clone workspaces 1/4/7 pass_multiplier = (outer_loop_count * 3) workspace_names = "focus_mode_groups-" + str(pass_multiplier + 1) workspace_list.append( mantid.CloneWorkspace( InputWorkspace=calibrated_spectra[pass_multiplier], OutputWorkspace=workspace_names)) # Then add workspaces 1+2+3 / 4+5+6 / 7+8+9 for i in range(1, 3): input_ws_index = i + pass_multiplier # Workspaces 2/3 * n inner_workspace_names = "focus_mode_groups-" + str(input_ws_index) workspace_list[outer_loop_count] = mantid.Plus( LHSWorkspace=workspace_list[outer_loop_count], RHSWorkspace=calibrated_spectra[input_ws_index], OutputWorkspace=inner_workspace_names) # Finally scale the output workspaces mod_first_number = str((outer_loop_count * 3) + 1) # Generates 1/4/7 mod_last_number = str((outer_loop_count + 1) * 3) # Generates 3/6/9 workspace_names = output_file_names[ "output_name"] + "_mod" + mod_first_number + '-' + mod_last_number output_list.append( mantid.Scale(InputWorkspace=workspace_list[outer_loop_count], OutputWorkspace=workspace_names, Factor=0.333333333333)) for ws in workspace_list: remove_intermediate_workspace(ws) return output_list
def PyExec(self): input_ws = self.getProperty("InputWorkspace").value # Determine whether we should use the input thickness or try # to read it from the run properties thickness = self.getProperty("SampleThickness").value if thickness <= 0: if input_ws.getRun().hasProperty("sample-thickness"): thickness = input_ws.getRun().getProperty( "sample-thickness").value if thickness <= 0: Logger("NormaliseByThickness").error( "NormaliseByThickness could not get the sample thickness" ) return else: Logger("NormaliseByThickness").error( "NormaliseByThickness could not get the sample thickness") return output_ws_name = self.getPropertyValue("OutputWorkspace") api.Scale(InputWorkspace=input_ws, OutputWorkspace=output_ws_name, Factor=1.0 / thickness, Operation="Multiply") self.setProperty("OutputWorkspace", output_ws_name) self.setProperty("OutputMessage", "Normalised by thickness [%g cm]" % thickness)
def _calibData(self, sam_ws, mon_ws): api.MaskDetectors(Workspace=sam_ws, DetectorList=self._dMask) #MaskedWorkspace='BASIS_MASK') api.ModeratorTzeroLinear(InputWorkspace=sam_ws,\ OutputWorkspace=sam_ws) api.LoadParameterFile(Workspace=sam_ws, Filename=config.getInstrumentDirectory() + 'BASIS_silicon_111_Parameters.xml') api.ConvertUnits(InputWorkspace=sam_ws, OutputWorkspace=sam_ws, Target='Wavelength', EMode='Indirect') if not self._noMonNorm: api.ModeratorTzeroLinear(InputWorkspace=mon_ws,\ OutputWorkspace=mon_ws) api.Rebin(InputWorkspace=mon_ws, OutputWorkspace=mon_ws, Params='10') api.ConvertUnits(InputWorkspace=mon_ws, OutputWorkspace=mon_ws, Target='Wavelength') api.OneMinusExponentialCor(InputWorkspace=mon_ws, OutputWorkspace=mon_ws, C='0.20749999999999999', C1='0.001276') api.Scale(InputWorkspace=mon_ws, OutputWorkspace=mon_ws, Factor='9.9999999999999995e-07') api.RebinToWorkspace(WorkspaceToRebin=sam_ws, WorkspaceToMatch=mon_ws, OutputWorkspace=sam_ws) api.Divide(LHSWorkspace=sam_ws, RHSWorkspace=mon_ws, OutputWorkspace=sam_ws)
def get_scaled_workspaces(reduction_list, xs): """ Return a list of scaled workspaces :param list reduction_list: list of NexusData objects :param str xs: cross-section name """ ws_list = [] for i in range(len(reduction_list)): # If we couldn't calculate the reflectivity, we won't have a workspace available if reduction_list[i].cross_sections[xs].reflectivity_workspace is None: continue ws_name = str( reduction_list[i].cross_sections[xs].reflectivity_workspace) ws_tmp = api.Scale(InputWorkspace=ws_name, OutputWorkspace=ws_name + '_scaled', factor=reduction_list[i].cross_sections[xs]. configuration.scaling_factor, Operation='Multiply') api.AddSampleLog(Workspace=ws_tmp, LogName='scaling_factor', LogText=str(reduction_list[i].cross_sections[xs]. configuration.scaling_factor), LogType='Number', LogUnit='') ws_list.append(ws_tmp) return ws_list
def _focus_mode_groups(cycle_information, output_file_paths, save_range, calibrated_spectra): output_list = [] to_save = _sum_groups_of_three_ws(calibrated_spectra, output_file_paths) workspaces_4_to_9_name = output_file_paths["output_name"] + "_mods4-9" workspaces_4_to_9 = mantid.Plus(LHSWorkspace=to_save[1], RHSWorkspace=to_save[2]) workspaces_4_to_9 = mantid.Scale(InputWorkspace=workspaces_4_to_9, Factor=0.5, OutputWorkspace=workspaces_4_to_9_name) to_save.append(workspaces_4_to_9) append = False index = 1 for ws in to_save: if cycle_information["instrument_version"] == "new": mantid.SaveGSS(InputWorkspace=ws, Filename=output_file_paths["gss_filename"], Append=append, Bank=index) elif cycle_information["instrument_version"] == "new2": mantid.SaveGSS(InputWorkspace=ws, Filename=output_file_paths["gss_filename"], Append=False, Bank=index) workspace_names = ws.name() dspacing_ws = mantid.ConvertUnits(InputWorkspace=ws, OutputWorkspace=workspace_names, Target="dSpacing") remove_intermediate_workspace(ws) output_list.append(dspacing_ws) mantid.SaveNexus(Filename=output_file_paths["nxs_filename"], InputWorkspace=dspacing_ws, Append=append) append = True index += 1 for i in range(0, save_range): monitor_ws_name = output_file_paths["output_name"] + "_mod" + str(i + 10) monitor_ws = calibrated_spectra[i + 9] to_save = mantid.CloneWorkspace(InputWorkspace=monitor_ws, OutputWorkspace=monitor_ws_name) mantid.SaveGSS(InputWorkspace=to_save, Filename=output_file_paths["gss_filename"], Append=True, Bank=i + 5) to_save = mantid.ConvertUnits(InputWorkspace=to_save, OutputWorkspace=monitor_ws_name, Target="dSpacing") mantid.SaveNexus(Filename=output_file_paths["nxs_filename"], InputWorkspace=to_save, Append=True) output_list.append(to_save) return output_list
def _focus_mode_trans(output_file_paths, atten, instrument, calibrated_spectra): summed_ws = mantid.CloneWorkspace(InputWorkspace=calibrated_spectra[0]) for i in range(1, 9): # Add workspaces 2-9 to workspace 1 summed_ws = mantid.Plus(LHSWorkspace=summed_ws, RHSWorkspace=calibrated_spectra[i]) summed_ws = mantid.Scale(InputWorkspace=summed_ws, Factor=0.111111111111111) if atten: # Clone a workspace which is not attenuated no_att = output_file_paths["output_name"] + "_noatten" mantid.CloneWorkspace(InputWorkspace=summed_ws, OutputWorkspace=no_att) summed_ws = mantid.ConvertUnits(InputWorkspace=summed_ws, Target="dSpacing") summed_ws = instrument._attenuate_workspace(summed_ws) summed_ws = mantid.ConvertUnits(InputWorkspace=summed_ws, Target="TOF") mantid.SaveGSS(InputWorkspace=summed_ws, Filename=output_file_paths["gss_filename"], Append=False, Bank=1) mantid.SaveFocusedXYE(InputWorkspace=summed_ws, Filename=output_file_paths["tof_xye_filename"], Append=False, IncludeHeader=False) summed_ws = mantid.ConvertUnits(InputWorkspace=summed_ws, Target="dSpacing") # Rename to user friendly name: summed_ws_name = output_file_paths["output_name"] + "_mods1-9" summed_ws = mantid.RenameWorkspace(InputWorkspace=summed_ws, OutputWorkspace=summed_ws_name) mantid.SaveFocusedXYE(InputWorkspace=summed_ws, Filename=output_file_paths["dspacing_xye_filename"], Append=False, IncludeHeader=False) mantid.SaveNexus(InputWorkspace=summed_ws, Filename=output_file_paths["nxs_filename"], Append=False) output_list = [summed_ws] for i in range(0, 9): workspace_name = output_file_paths["output_name"] + "_mod" + str(i + 1) to_save = mantid.ConvertUnits(InputWorkspace=calibrated_spectra[i], Target="dSpacing", OutputWorkspace=workspace_name) output_list.append(to_save) mantid.SaveNexus(Filename=output_file_paths["nxs_filename"], InputWorkspace=to_save, Append=True) return output_list
def merge_reflectivity(reduction_list, xs, q_min=0.001, q_step=-0.01): """ Combine the workspaces for a given cross-section into a single workspace. TODO: trim workspaces trim_first = [item.cross_sections[pol_state].configuration.cut_first_n_points for item in self.data_manager.reduction_list] trim_last = [item.cross_sections[pol_state].configuration.cut_last_n_points for item in self.data_manager.reduction_list] """ ws_list = [] scaling_factors = [] q_max = q_min for i in range(len(reduction_list)): # If we couldn't calculate the reflectivity, we won't have a workspace available if reduction_list[i].cross_sections[xs].reflectivity_workspace is None: continue _, _q_max = reduction_list[i].get_q_range() q_max = max(q_max, _q_max) ws_name = str( reduction_list[i].cross_sections[xs].reflectivity_workspace) # Stitch1DMany only scales workspaces relative to the first one if i == 0: api.Scale(InputWorkspace=ws_name, OutputWorkspace=ws_name + '_histo', factor=reduction_list[i].cross_sections[xs]. configuration.scaling_factor, Operation='Multiply') api.ConvertToHistogram(InputWorkspace=ws_name + '_histo', OutputWorkspace=ws_name + '_histo') else: scaling_factors.append(reduction_list[i].cross_sections[xs]. configuration.scaling_factor) api.ConvertToHistogram(InputWorkspace=ws_name, OutputWorkspace=ws_name + '_histo') ws_list.append(ws_name + '_histo') params = "%s, %s, %s" % (q_min, q_step, q_max) if len(ws_list) > 1: merged_ws, _ = api.Stitch1DMany(InputWorkspaces=ws_list, Params=params, UseManualScaleFactors=True, ManualScaleFactors=scaling_factors, OutputWorkspace=ws_name + "_merged") elif len(ws_list) == 1: merged_ws = api.CloneWorkspace(ws_list[0], OutputWorkspace=ws_name + "_merged") else: return None # Remove temporary workspaces for ws in ws_list: api.DeleteWorkspace(ws) api.SaveAscii(InputWorkspace=merged_ws, Filename="/tmp/test.txt") return merged_ws
def _generate_flux_spectrum(self, run_set, sam_ws): r""" Retrieve the aggregate flux and create an spectrum of intensities versus wavelength such that intensities will be similar for any of the possible flux normalization types. Parameters ---------- sam_ws: str Name of aggregated sample workspace Returns ------- str Name of aggregated flux workspace (output workspace) """ flux_binning = [1.5, 0.0005, 7.5] # wavelength binning suffix = re.sub('[^0-9a-zA-Z]+', '_', self._flux_normalization_type) flux_ws = tws(self._make_run_name(run_set[0]) + '_' + suffix) if self._MonNorm: self._sum_monitors(run_set, flux_ws) rpf = self._elucidate_reflection_parameter_file(sam_ws) sapi.LoadParameterFile(Workspace=flux_ws, Filename=rpf) sapi.ModeratorTzeroLinear(InputWorkspace=flux_ws, OutputWorkspace=flux_ws) sapi.Rebin(InputWorkspace=flux_ws, OutputWorkspace=flux_ws, Params='10', # 10 microseconds TOF bin width PreserveEvents=False) sapi.ConvertUnits(InputWorkspace=flux_ws, OutputWorkspace=flux_ws, Target='Wavelength') sapi.OneMinusExponentialCor(InputWorkspace=flux_ws, OutputWorkspace=flux_ws, C='0.20749999999999999', C1='0.001276') sapi.Scale(InputWorkspace=flux_ws, OutputWorkspace=flux_ws, Factor='1e-06') sapi.Rebin(InputWorkspace=flux_ws, OutputWorkspace=flux_ws, Params=flux_binning) else: ws = mtd[sam_ws].getRun() if self._flux_normalization_type == 'Proton Charge': aggregate_flux = ws.getProtonCharge() elif self._flux_normalization_type == 'Duration': aggregate_flux = ws.getProperty('duration').value # These factors ensure intensities typical of flux workspaces # derived from monitor data f = {'Proton Charge': 0.00874, 'Duration': 0.003333} x = np.arange(flux_binning[0], flux_binning[2], flux_binning[1]) y = f[self._flux_normalization_type] * \ aggregate_flux * np.ones(len(x) - 1) _flux_ws = sapi.CreateWorkspace(OutputWorkspace=flux_ws, DataX=x, DataY=y, UnitX='Wavelength') _flux_ws.setYUnit(mtd[sam_ws].YUnit()) return flux_ws
def _focus_mode_trans(output_file_paths, attenuation_filepath, calibrated_spectra): summed_ws = mantid.MergeRuns(InputWorkspaces=calibrated_spectra[:9]) xList = summed_ws.readX(0) summed_ws = mantid.CropWorkspace(InputWorkspace=summed_ws, XMin=xList[1], Xmax=xList[-2]) summed_ws = mantid.Scale(InputWorkspace=summed_ws, Factor=0.111111111111111) if attenuation_filepath: summed_ws = _attenuate_workspace( output_file_paths=output_file_paths, attenuated_ws=summed_ws, attenuation_filepath=attenuation_filepath) summed_ws = mantid.ConvertUnits(InputWorkspace=summed_ws, Target="TOF") mantid.SaveGSS(InputWorkspace=summed_ws, Filename=output_file_paths["gss_filename"], Append=False, Bank=1) mantid.SaveFocusedXYE(InputWorkspace=summed_ws, Filename=output_file_paths["tof_xye_filename"], Append=False, IncludeHeader=False) summed_ws = mantid.ConvertUnits(InputWorkspace=summed_ws, Target="dSpacing") # Rename to user friendly name: summed_ws_name = output_file_paths["output_name"] + "_mods1-9" summed_ws = mantid.RenameWorkspace(InputWorkspace=summed_ws, OutputWorkspace=summed_ws_name) mantid.SaveFocusedXYE(InputWorkspace=summed_ws, Filename=output_file_paths["dspacing_xye_filename"], Append=False, IncludeHeader=False) mantid.SaveNexus(InputWorkspace=summed_ws, Filename=output_file_paths["nxs_filename"], Append=False) output_list = [summed_ws] for i in range(0, 9): workspace_name = output_file_paths["output_name"] + "_mod" + str(i + 1) to_save = mantid.ConvertUnits(InputWorkspace=calibrated_spectra[i], Target="dSpacing", OutputWorkspace=workspace_name) output_list.append(to_save) mantid.SaveNexus(Filename=output_file_paths["nxs_filename"], InputWorkspace=to_save, Append=True) return output_list
def _focus_mode_all(output_file_paths, processed_spectra, attenuation_filepath): summed_spectra_name = output_file_paths["output_name"] + "_mods1-9" summed_spectra = mantid.MergeRuns(InputWorkspaces=processed_spectra[:9], OutputWorkspace=summed_spectra_name) summed_spectra = mantid.Scale(InputWorkspace=summed_spectra, Factor=0.111111111111111, OutputWorkspace=summed_spectra_name) if attenuation_filepath: summed_spectra = _attenuate_workspace( output_file_paths=output_file_paths, attenuated_ws=summed_spectra, attenuation_filepath=attenuation_filepath) summed_spectra = mantid.ConvertUnits(InputWorkspace=summed_spectra, Target="TOF", OutputWorkspace=summed_spectra_name) mantid.SaveGSS(InputWorkspace=summed_spectra, Filename=output_file_paths["gss_filename"], Append=False, Bank=1) summed_spectra = mantid.ConvertUnits(InputWorkspace=summed_spectra, Target="dSpacing", OutputWorkspace=summed_spectra_name) mantid.SaveNexus(Filename=output_file_paths["nxs_filename"], InputWorkspace=summed_spectra, Append=False) output_list = [summed_spectra] for i in range(0, 5): spectra_index = (i + 9) # Compensate for 0 based index ws_to_save = processed_spectra[ spectra_index] # Save out workspaces 10/11/12 output_name = output_file_paths["output_name"] + "_mod" + str( spectra_index + 1) ws_to_save = mantid.ConvertUnits(InputWorkspace=ws_to_save, OutputWorkspace=ws_to_save, Target="TOF") mantid.SaveGSS(InputWorkspace=ws_to_save, Filename=output_file_paths["gss_filename"], Append=True, Bank=i + 2) ws_to_save = mantid.ConvertUnits(InputWorkspace=ws_to_save, OutputWorkspace=output_name, Target="dSpacing") mantid.SaveNexus(Filename=output_file_paths["nxs_filename"], InputWorkspace=ws_to_save, Append=True) output_list.append(ws_to_save) return output_list
def _ScaleY(self, wsName): """ Scale all spectra by a number so that the maximum of the first spectra is rescaled to 1 @param wsName: name of the workspace to rescale """ workspace = sapi.mtd[wsName] maximumYvalue = workspace.dataY(0).max() sapi.Scale(InputWorkspace=wsName, OutputWorkspace=wsName, Factor=1./maximumYvalue, Operation="Multiply")
def final_fit(fit_ws_name, constraints,y_range, correct_for_offsets, masses, g_log) : function = """ composite=Convolution,FixResolution=true,NumDeriv=true; name=Resolution,Workspace=resolution,WorkspaceIndex=0,X=(),Y=(); name=UserFunction,Formula=exp( -x^2/2./sigma1^2) *(1.+c4/32.*(16.*(x/sqrt(2)/sigma1)^4-48.*(x/sqrt(2)/sigma1)^2+12) +c6/384.*( 64.*(x/sqrt(2)/sigma1)^6 -480.*(x/sqrt(2)/sigma1)^4 +720.*(x/sqrt(2)/sigma1)^2 -120.) )*A + B0, sigma1=3.0,c4=0.0, c6=0.0,A=0.08, B0=0.00, ties = (c6=0. ) """ function+=constraints minimiser = "Simplex" sapi.Fit(Function= function, InputWorkspace=fit_ws_name, MaxIterations=2000, Minimizer= minimiser, Output=fit_ws_name, OutputCompositeMembers=True, StartX = y_range[0] , EndX = y_range[1]) ws = sapi.mtd[fit_ws_name+"_Parameters"] g_log.notice( "\n Final parameters \n") g_log.notice( "width: ",ws.cell(0,1)," +/- ",ws.cell(0,2), " A-1 ") g_log.notice( "c4: ",ws.cell(1,1)," +/- ",ws.cell(1,2), " A-1 ") sigma_to_energy = 1.5 * 2.0445**2 / masses[0] g_log.notice( "mean kinetic energy: ",sigma_to_energy*ws.cell(0,1)**2," +/- ", 2.*sigma_to_energy*ws.cell(0,2)*ws.cell(0,1), " meV ") if correct_for_offsets : sapi.Scale(InputWorkspace=fit_ws_name,Factor=-ws.cell(4,1),Operation="Add",OutputWorkspace=fit_ws_name+'_cor') sapi.Scale(InputWorkspace=fit_ws_name+'_cor', Factor=(2.*np.pi)**(-0.5)/ws.cell(0,1)/ws.cell(3,1),Operation="Multiply", OutputWorkspace=fit_ws_name+'_cor')
def _sum_groups_of_three_ws(calibrated_spectra, output_file_names): output_list = [] for i in range(3): ws_name = output_file_names["output_name"] + "_mods{}-{}".format( i * 3 + 1, (i + 1) * 3) summed_spectra = mantid.MergeRuns( InputWorkspaces=calibrated_spectra[i * 3:(i + 1) * 3], OutputWorkspace=ws_name) scaled = mantid.Scale(InputWorkspace=summed_spectra, Factor=1. / 3, OutputWorkspace=ws_name) output_list.append(scaled) return output_list
def _focus_mode_all(output_file_paths, calibrated_spectra): first_spectrum = calibrated_spectra[0] summed_spectra = mantid.CloneWorkspace(InputWorkspace=first_spectrum) for i in range(1, 9): # TODO why is this 1-8 summed_spectra = mantid.Plus(LHSWorkspace=summed_spectra, RHSWorkspace=calibrated_spectra[i]) summed_spectra_name = output_file_paths["output_name"] + "_mods1-9" summed_spectra = mantid.Scale(InputWorkspace=summed_spectra, Factor=0.111111111111111, OutputWorkspace=summed_spectra_name) mantid.SaveGSS(InputWorkspace=summed_spectra, Filename=output_file_paths["gss_filename"], Append=False, Bank=1) summed_spectra = mantid.ConvertUnits(InputWorkspace=summed_spectra, Target="dSpacing", OutputWorkspace=summed_spectra_name) mantid.SaveNexus(Filename=output_file_paths["nxs_filename"], InputWorkspace=summed_spectra, Append=False) output_list = [summed_spectra] for i in range(0, 3): spectra_index = ( i + 9 ) # We want workspaces 10/11/12 so compensate for 0 based index ws_to_save = calibrated_spectra[ spectra_index] # Save out workspaces 10/11/12 output_name = output_file_paths["output_name"] + "_mod" + str( spectra_index + 1) mantid.SaveGSS(InputWorkspace=ws_to_save, Filename=output_file_paths["gss_filename"], Append=True, Bank=i + 2) ws_to_save = mantid.ConvertUnits(InputWorkspace=ws_to_save, OutputWorkspace=output_name, Target="dSpacing") output_list.append(ws_to_save) mantid.SaveNexus(Filename=output_file_paths["nxs_filename"], InputWorkspace=ws_to_save, Append=True) return output_list
def generate_summed_runs(empty_sample_ws_string, instrument, scale_factor=None): """ Loads the list of empty runs specified by the empty_sample_ws_string and sums them (and optionally scales). Returns the summed workspace. :param empty_sample_ws_string: The empty run numbers to sum :param instrument: The instrument object these runs belong to :param scale_factor: The percentage to scale the loaded runs by :return: The summed and normalised empty runs """ empty_sample = load_current_normalised_ws_list( run_number_string=empty_sample_ws_string, instrument=instrument, input_batching=INPUT_BATCHING.Summed) empty_sample = empty_sample[0] if scale_factor: empty_sample = mantid.Scale(InputWorkspace=empty_sample, OutputWorkspace=empty_sample, Factor=scale_factor, Operation="Multiply") return empty_sample
def calc_calibration_with_vanadium(focused_ws, index, vanadium_ws, instrument): data_ws = mantid.ExtractSingleSpectrum(InputWorkspace=focused_ws, WorkspaceIndex=index) data_ws = mantid.ConvertUnits(InputWorkspace=data_ws, Target="TOF") data_ws = mantid.Rebin(InputWorkspace=data_ws, Params=instrument._get_focus_tof_binning()) data_processed = "van_processed" + str( index) # Workaround for Mantid overwriting the WS in a loop mantid.Divide(LHSWorkspace=data_ws, RHSWorkspace=vanadium_ws, OutputWorkspace=data_processed) mantid.CropWorkspace(InputWorkspace=data_processed, XMin=0.1, OutputWorkspace=data_processed) mantid.Scale(InputWorkspace=data_processed, Factor=10, OutputWorkspace=data_processed) remove_intermediate_workspace(data_ws) return data_processed
def PyExec(self): ms.ExtractSingleSpectrum(InputWorkspace=self._input_ws, OutputWorkspace=self._output_ws, WorkspaceIndex=self._spec_idx) # Performs corrections self._define_corrections() # The workspaces to fit for correction scale factors fit_corrections = [ wks for wks in self._correction_workspaces if 'MultipleScattering' not in wks ] # Perform fitting of corrections fixed_params = {} fixed_gamma_factor = self.getProperty("GammaBackgroundScale").value if fixed_gamma_factor != 0.0 and not self._back_scattering: fixed_params['GammaBackground'] = fixed_gamma_factor fixed_container_scale = self.getProperty("ContainerScale").value if fixed_container_scale != 0.0: fixed_params['Container'] = fixed_container_scale params_ws = self._fit_corrections(fit_corrections, self._linear_fit_table, **fixed_params) self.setProperty("LinearFitResult", params_ws) # Scale gamma background if self.getProperty( "GammaBackground").value and not self._back_scattering: gamma_correct_ws = self._get_correction_workspace( 'GammaBackground')[1] gamma_factor = self._get_correction_scale_factor( 'GammaBackground', fit_corrections, params_ws) ms.Scale(InputWorkspace=gamma_correct_ws, OutputWorkspace=gamma_correct_ws, Factor=gamma_factor) # Scale multiple scattering if self.getProperty("MultipleScattering").value: # Use factor of total scattering as this includes single and multiple scattering multi_scatter_correct_ws = self._get_correction_workspace( 'MultipleScattering')[1] total_scatter_correct_ws = self._get_correction_workspace( 'TotalScattering')[1] total_scatter_factor = self._get_correction_scale_factor( 'TotalScattering', fit_corrections, params_ws) ms.Scale(InputWorkspace=multi_scatter_correct_ws, OutputWorkspace=multi_scatter_correct_ws, Factor=total_scatter_factor) ms.Scale(InputWorkspace=total_scatter_correct_ws, OutputWorkspace=total_scatter_correct_ws, Factor=total_scatter_factor) # Scale by container if self._container_ws != "": container_correct_ws = self._get_correction_workspace( 'Container')[1] container_factor = self._get_correction_scale_factor( 'Container', fit_corrections, params_ws) ms.Scale(InputWorkspace=container_correct_ws, OutputWorkspace=container_correct_ws, Factor=container_factor) # Calculate and output corrected workspaces as a WorkspaceGroup if self._corrected_wsg != "": corrected_workspaces = [ ws_name.replace(self._correction_wsg, self._corrected_wsg) for ws_name in self._correction_workspaces ] for corrected, correction in zip(corrected_workspaces, self._correction_workspaces): ms.Minus(LHSWorkspace=self._output_ws, RHSWorkspace=correction, OutputWorkspace=corrected) ms.GroupWorkspaces(InputWorkspaces=corrected_workspaces, OutputWorkspace=self._corrected_wsg) self.setProperty("CorrectedWorkspaces", self._corrected_wsg) # Apply corrections for correction in self._correction_workspaces: if 'TotalScattering' not in correction: ms.Minus(LHSWorkspace=self._output_ws, RHSWorkspace=correction, OutputWorkspace=self._output_ws) self.setProperty("OutputWorkspace", self._output_ws) # Remove correction workspaces if they are no longer required if self._correction_wsg == "": for wksp in self._correction_workspaces: ms.DeleteWorkspace(wksp)
def _focus_mode_all(output_file_paths, processed_spectra, attenuation_filepath): summed_spectra_name = output_file_paths["output_name"] + "_mods1-9" summed_spectra = mantid.MergeRuns(InputWorkspaces=processed_spectra[:9], OutputWorkspace=summed_spectra_name) xList = summed_spectra.readX(0) summed_spectra = mantid.CropWorkspace(InputWorkspace=summed_spectra, XMin=xList[1], Xmax=xList[-2]) summed_spectra = mantid.Scale(InputWorkspace=summed_spectra, Factor=0.111111111111111, OutputWorkspace=summed_spectra_name) if attenuation_filepath: summed_spectra = _attenuate_workspace( output_file_paths=output_file_paths, attenuated_ws=summed_spectra, attenuation_filepath=attenuation_filepath) summed_spectra = mantid.ConvertUnits(InputWorkspace=summed_spectra, Target="TOF", OutputWorkspace=summed_spectra_name) mantid.SaveGSS(InputWorkspace=summed_spectra, Filename=output_file_paths["gss_filename"], Append=False, Bank=1) summed_spectra = mantid.ConvertUnits(InputWorkspace=summed_spectra, Target="dSpacing", OutputWorkspace=summed_spectra_name) mantid.SaveNexus(Filename=output_file_paths["nxs_filename"], InputWorkspace=summed_spectra, Append=False) mantid.SaveFocusedXYE(InputWorkspace=summed_spectra_name, Filename=output_file_paths["tof_xye_filename"], Append=False, IncludeHeader=False) output_list = [summed_spectra] for i in range(0, 5): spectra_index = (i + 9) # Compensate for 0 based index ws_to_save = processed_spectra[ spectra_index] # Save out workspaces 10/11/12 output_name = output_file_paths["output_name"] + "_mod" + str( spectra_index + 1) ws_to_save = mantid.ConvertUnits(InputWorkspace=ws_to_save, OutputWorkspace=ws_to_save, Target="TOF") mantid.SaveGSS(InputWorkspace=ws_to_save, Filename=output_file_paths["gss_filename"], Append=True, Bank=i + 2) splits = output_file_paths["tof_xye_filename"].split(".") tof_xye_name = splits[0] + "-" + str(i + 10) + "." + splits[1] mantid.SaveFocusedXYE(InputWorkspace=ws_to_save, Filename=tof_xye_name, Append=False, IncludeHeader=False) ws_to_save = mantid.ConvertUnits(InputWorkspace=ws_to_save, OutputWorkspace=output_name, Target="dSpacing") mantid.SaveNexus(Filename=output_file_paths["nxs_filename"], InputWorkspace=ws_to_save, Append=True) output_list.append(ws_to_save) return output_list
def _focus_mode_groups(output_file_paths, calibrated_spectra): output_list = [] to_save = _sum_groups_of_three_ws(calibrated_spectra=calibrated_spectra, output_file_names=output_file_paths) workspaces_4_to_9_name = output_file_paths["output_name"] + "_mods4-9" workspaces_4_to_9 = mantid.MergeRuns( InputWorkspaces=calibrated_spectra[3:9], OutputWorkspace=workspaces_4_to_9_name) xList = workspaces_4_to_9.readX(0) workspaces_4_to_9 = mantid.CropWorkspace(InputWorkspace=workspaces_4_to_9, XMin=xList[1], Xmax=xList[-2]) workspaces_4_to_9 = mantid.Scale(InputWorkspace=workspaces_4_to_9, Factor=0.5, OutputWorkspace=workspaces_4_to_9_name) to_save.append(workspaces_4_to_9) append = False index = 1 for ws in to_save: ws = mantid.ConvertUnits(InputWorkspace=ws, OutputWorkspace=ws, Target="TOF") mantid.SaveGSS(InputWorkspace=ws, Filename=output_file_paths["gss_filename"], Append=False, Bank=index) mantid.SaveFocusedXYE(InputWorkspace=ws, Filename=output_file_paths["tof_xye_filename"], Append=False, IncludeHeader=False) workspace_names = ws.name() ws = mantid.ConvertUnits(InputWorkspace=ws, OutputWorkspace=workspace_names, Target="dSpacing") output_list.append(ws) mantid.SaveNexus(Filename=output_file_paths["nxs_filename"], InputWorkspace=ws, Append=append) append = True index += 1 save_range = 5 for i in range(0, save_range): monitor_ws_name = output_file_paths["output_name"] + "_mod" + str(i + 10) monitor_ws = calibrated_spectra[i + 9] to_save = mantid.CloneWorkspace(InputWorkspace=monitor_ws, OutputWorkspace=monitor_ws_name) to_save = mantid.ConvertUnits(InputWorkspace=to_save, OutputWorkspace=to_save, Target="TOF") splits = output_file_paths["tof_xye_filename"].split(".") tof_xye_name = splits[0] + "-" + str(i + 10) + "." + splits[1] mantid.SaveGSS(InputWorkspace=to_save, Filename=output_file_paths["gss_filename"], Append=True, Bank=i + 5) mantid.SaveFocusedXYE(InputWorkspace=to_save, Filename=tof_xye_name, Append=False, IncludeHeader=False) to_save = mantid.ConvertUnits(InputWorkspace=to_save, OutputWorkspace=monitor_ws_name, Target="dSpacing") mantid.SaveNexus(Filename=output_file_paths["nxs_filename"], InputWorkspace=to_save, Append=True) output_list.append(to_save) return output_list
def reflectivity(self, direct_beam=None, configuration=None): """ Compute reflectivity """ self.q = None self._r = None self._dr = None if configuration is not None: self.configuration = copy.deepcopy(configuration) if self.configuration is None: return # If a direct beam object was passed, use it. apply_norm = direct_beam is not None # and not self.is_direct_beam if not apply_norm: direct_beam = CrossSectionData('none', self.configuration, 'none') logging.info("%s:%s Reduction with DB: %s [config: %s]", self.number, self.entry_name, direct_beam.number, self.configuration.normalization) angle_offset = 0 # Offset from dangle0, in radians def _as_ints(a): return [int(round(a[0])), int(round(a[1]))] output_ws = "r%s_%s" % (self.number, str(self.entry_name)) ws_norm = None if apply_norm and direct_beam._event_workspace is not None: ws_norm = direct_beam._event_workspace logging.info("Calc: %s %s %s", str(_as_ints(self.configuration.peak_roi)), str(_as_ints(self.configuration.bck_roi)), str(_as_ints(self.configuration.low_res_roi))) _dirpix = configuration.direct_pixel_overwrite if configuration.set_direct_pixel else None _dangle0 = configuration.direct_angle_offset_overwrite if configuration.set_direct_angle_offset else None ws = api.MagnetismReflectometryReduction( InputWorkspace=self._event_workspace, NormalizationWorkspace=ws_norm, SignalPeakPixelRange=_as_ints(self.configuration.peak_roi), SubtractSignalBackground=True, SignalBackgroundPixelRange=_as_ints(self.configuration.bck_roi), ApplyNormalization=apply_norm, NormPeakPixelRange=_as_ints(direct_beam.configuration.peak_roi), SubtractNormBackground=True, NormBackgroundPixelRange=_as_ints( direct_beam.configuration.bck_roi), CutLowResDataAxis=True, LowResDataAxisPixelRange=_as_ints(self.configuration.low_res_roi), CutLowResNormAxis=True, LowResNormAxisPixelRange=_as_ints( direct_beam.configuration.low_res_roi), CutTimeAxis=True, QMin=0.001, QStep=-0.01, AngleOffset=angle_offset, UseWLTimeAxis=False, TimeAxisStep=self.configuration.tof_bins, UseSANGLE=not self.configuration.use_dangle, TimeAxisRange=self.configuration.tof_range, SpecularPixel=self.configuration.peak_position, ConstantQBinning=self.configuration.use_constant_q, #EntryName=str(self.entry_name), ConstQTrim=0.1, ErrorWeightedBackground=False, SampleLength=self.configuration.sample_size, DAngle0Overwrite=_dangle0, DirectPixelOverwrite=_dirpix, OutputWorkspace=output_ws) ################## FOR COMPATIBILITY WITH QUICKNXS ################## run_object = ws.getRun() peak_min = run_object.getProperty("scatt_peak_min").value peak_max = run_object.getProperty("scatt_peak_max").value low_res_min = run_object.getProperty("scatt_low_res_min").value low_res_max = run_object.getProperty("scatt_low_res_max").value norm_x_min = run_object.getProperty("norm_peak_min").value norm_x_max = run_object.getProperty("norm_peak_max").value norm_y_min = run_object.getProperty("norm_low_res_min").value norm_y_max = run_object.getProperty("norm_low_res_max").value tth = ws.getRun().getProperty( "SANGLE").getStatistics().mean * math.pi / 180.0 quicknxs_scale = (float(norm_x_max) - float(norm_x_min)) * ( float(norm_y_max) - float(norm_y_min)) quicknxs_scale /= (float(peak_max) - float(peak_min)) * ( float(low_res_max) - float(low_res_min)) quicknxs_scale *= 0.005 / math.sin(tth) ws = api.Scale(InputWorkspace=output_ws, OutputWorkspace=output_ws, factor=quicknxs_scale, Operation='Multiply') ##################################################################### self.q = ws.readX(0)[:].copy() self._r = ws.readY(0)[:].copy() #* self.configuration.scaling_factor self._dr = ws.readE(0)[:].copy() #* self.configuration.scaling_factor #DeleteWorkspace(ws) self._reflectivity_workspace = str(ws)
def PyExec(self): self._setup() if not self._use_corrections: logger.information('Not using corrections') if not self._use_can: logger.information('Not using container') prog_container = Progress(self, start=0.0, end=0.2, nreports=4) prog_container.report('Starting algorithm') # Units should be wavelength sample_unit = s_api.mtd[self._sample_ws_name].getAxis( 0).getUnit().unitID() self._convert_units_wavelength(sample_unit, self._sample_ws_name, self._sample_ws_wavelength, "Wavelength") if self._use_can: # Appy container shift if needed if self._shift_can: # Use temp workspace so we don't modify data prog_container.report('Shifting can') s_api.ScaleX(InputWorkspace=self._can_ws_name, OutputWorkspace=self._shifted_container, Factor=self._can_shift_factor, Operation='Add') logger.information('Container data shifted by %f' % self._can_shift_factor) else: prog_container.report('Cloning Workspace') s_api.CloneWorkspace(InputWorkspace=self._can_ws_name, OutputWorkspace=self._shifted_container) # Apply container scale factor if needed if self._scale_can: # Use temp workspace so we don't modify original data prog_container.report('Scaling can') s_api.Scale(InputWorkspace=self._shifted_container, OutputWorkspace=self._scaled_container, Factor=self._can_scale_factor, Operation='Multiply') logger.information('Container scaled by %f' % self._can_scale_factor) else: prog_container.report('Cloning Workspace') s_api.CloneWorkspace(InputWorkspace=self._shifted_container, OutputWorkspace=self._scaled_container) # Units should be wavelength can_unit = s_api.mtd[self._scaled_container].getAxis( 0).getUnit().unitID() self._convert_units_wavelength(can_unit, self._scaled_container, self._scaled_container_wavelength, "Wavelength") prog_corr = Progress(self, start=0.2, end=0.6, nreports=2) if self._use_corrections: prog_corr.report('Preprocessing corrections') self._pre_process_corrections() if self._use_can: # Use container factors prog_corr.report('Correcting sample and can') self._correct_sample_can() correction_type = 'sample_and_can_corrections' else: # Use sample factor only self._correct_sample() correction_type = 'sample_corrections_only' # Add corrections filename to log values prog_corr.report('Correcting sample') s_api.AddSampleLog(Workspace=self._output_ws_name, LogName='corrections_filename', LogType='String', LogText=self._corrections_ws_name) else: # Do simple subtraction self._subtract() correction_type = 'can_subtraction' # Add container filename to log values can_cut = self._can_ws_name.index('_') can_base = self._can_ws_name[:can_cut] prog_corr.report('Adding container filename') s_api.AddSampleLog(Workspace=self._output_ws_name, LogName='container_filename', LogType='String', LogText=can_base) prog_wrkflow = Progress(self, 0.6, 1.0, nreports=5) # Record the container scale factor if self._use_can and self._scale_can: prog_wrkflow.report('Adding container scaling') s_api.AddSampleLog(Workspace=self._output_ws_name, LogName='container_scale', LogType='Number', LogText=str(self._can_scale_factor)) # Record the container shift amount if self._use_can and self._shift_can: prog_wrkflow.report('Adding container shift') s_api.AddSampleLog(Workspace=self._output_ws_name, LogName='container_shift', LogType='Number', LogText=str(self._can_shift_factor)) # Record the type of corrections applied prog_wrkflow.report('Adding correction type') s_api.AddSampleLog(Workspace=self._output_ws_name, LogName='corrections_type', LogType='String', LogText=correction_type) # Add original sample as log entry sam_cut = self._sample_ws_name.index('_') sam_base = self._sample_ws_name[:sam_cut] prog_wrkflow.report('Adding sample filename') s_api.AddSampleLog(Workspace=self._output_ws_name, LogName='sample_filename', LogType='String', LogText=sam_base) # Convert Units back to original self._convert_units_wavelength(sample_unit, self._output_ws_name, self._output_ws_name, sample_unit) self.setPropertyValue('OutputWorkspace', self._output_ws_name) # Remove temporary workspaces prog_wrkflow.report('Deleting Workspaces') if self._corrections in s_api.mtd: s_api.DeleteWorkspace(self._corrections) if self._scaled_container in s_api.mtd: s_api.DeleteWorkspace(self._scaled_container) if self._shifted_container in s_api.mtd: s_api.DeleteWorkspace(self._shifted_container) if self._scaled_container_wavelength in s_api.mtd: s_api.DeleteWorkspace(self._scaled_container_wavelength) if self._sample_ws_wavelength in s_api.mtd: s_api.DeleteWorkspace(self._sample_ws_wavelength) prog_wrkflow.report('Algorithm Complete')
def calculate_reflectivity(self, direct_beam=None, configuration=None): """ Loop through the cross-section data sets and update the reflectivity. """ if configuration is not None: self.configuration = copy.deepcopy(configuration) if self.configuration is None: return # If a direct beam object was passed, use it. apply_norm = direct_beam is not None # and not self.is_direct_beam if not apply_norm: direct_beam = CrossSectionData('none', self.configuration, 'none') logging.info("%s Reduction with DB: %s [config: %s]", self.number, direct_beam.number, self.configuration.normalization) angle_offset = 0 # Offset from dangle0, in radians def _as_ints(a): return [int(round(a[0])), int(round(a[1])) - 1] output_ws = "r%s" % self.number ws_norm = None if apply_norm and direct_beam._event_workspace is not None: ws_norm = direct_beam._event_workspace ws_list = [ self.cross_sections[xs]._event_workspace for xs in self.cross_sections ] conf = self.cross_sections[self.main_cross_section].configuration wsg = api.GroupWorkspaces(InputWorkspaces=ws_list) _dirpix = conf.direct_pixel_overwrite if conf.set_direct_pixel else None _dangle0 = conf.direct_angle_offset_overwrite if conf.set_direct_angle_offset else None ws = api.MagnetismReflectometryReduction( InputWorkspace=wsg, NormalizationWorkspace=ws_norm, SignalPeakPixelRange=_as_ints(conf.peak_roi), SubtractSignalBackground=conf.subtract_background, SignalBackgroundPixelRange=_as_ints(conf.bck_roi), ApplyNormalization=apply_norm, NormPeakPixelRange=_as_ints(direct_beam.configuration.peak_roi), SubtractNormBackground=conf.subtract_background, NormBackgroundPixelRange=_as_ints( direct_beam.configuration.bck_roi), CutLowResDataAxis=True, LowResDataAxisPixelRange=_as_ints(conf.low_res_roi), CutLowResNormAxis=True, LowResNormAxisPixelRange=_as_ints( direct_beam.configuration.low_res_roi), CutTimeAxis=True, FinalRebin=conf.do_final_rebin, QMin=0.001, QStep=conf.final_rebin_step, RoundUpPixel=False, AngleOffset=angle_offset, UseWLTimeAxis=False, TimeAxisStep=conf.tof_bins, UseSANGLE=not conf.use_dangle, TimeAxisRange=conf.tof_range, SpecularPixel=conf.peak_position, ConstantQBinning=conf.use_constant_q, ConstQTrim=0.1, CropFirstAndLastPoints=False, CleanupBadData=conf.do_final_rebin, ErrorWeightedBackground=False, SampleLength=conf.sample_size, DAngle0Overwrite=_dangle0, DirectPixelOverwrite=_dirpix, OutputWorkspace=output_ws) ################## FOR COMPATIBILITY WITH QUICKNXS ################## _ws = ws[0] if len(ws_list) > 1 else ws run_object = _ws.getRun() peak_min = run_object.getProperty("scatt_peak_min").value peak_max = run_object.getProperty("scatt_peak_max").value + 1.0 low_res_min = run_object.getProperty("scatt_low_res_min").value low_res_max = run_object.getProperty("scatt_low_res_max").value + 1.0 norm_x_min = run_object.getProperty("norm_peak_min").value norm_x_max = run_object.getProperty("norm_peak_max").value + 1.0 norm_y_min = run_object.getProperty("norm_low_res_min").value norm_y_max = run_object.getProperty("norm_low_res_max").value + 1.0 tth = run_object.getProperty("two_theta").value * math.pi / 360.0 quicknxs_scale = (float(norm_x_max) - float(norm_x_min)) * ( float(norm_y_max) - float(norm_y_min)) quicknxs_scale /= (float(peak_max) - float(peak_min)) * ( float(low_res_max) - float(low_res_min)) logging.warning( "Scale size = %s", str((float(peak_max) - float(peak_min)) * (float(low_res_max) - float(low_res_min)))) logging.warning("Alpha_i = %s", str(tth)) _scale = 0.005 / math.sin(tth) if tth > 0.0002 else 1.0 quicknxs_scale *= _scale ws = api.Scale(InputWorkspace=output_ws, OutputWorkspace=output_ws, factor=quicknxs_scale, Operation='Multiply') ##################################################################### _ws = ws if len(ws_list) > 1 else [ws] for xs in _ws: xs_id = xs.getRun().getProperty("cross_section_id").value self.cross_sections[xs_id].q = xs.readX(0)[:].copy() self.cross_sections[xs_id]._r = xs.readY(0)[:].copy() self.cross_sections[xs_id]._dr = xs.readE(0)[:].copy() self.cross_sections[xs_id]._reflectivity_workspace = str(xs)