def _pcolor_func(self, name, *args, **kwargs): """ Implementation of pcolor-style methods :param name: The name of the method :param args: The args passed from the user :param kwargs: The kwargs passed from the use :return: The return value of the pcolor* function """ plotfunctions_func = getattr(plotfunctions, name) if helperfunctions.validate_args(*args): logger.debug('using plotfunctions') def _update_data(artists, workspace): return self._redraw_colorplot(plotfunctions_func, artists, workspace, **kwargs) workspace = args[0] # We return the last mesh so the return type is a single artist like the standard Axes artists = self.track_workspace_artist(workspace, plotfunctions_func(self, *args, **kwargs), _update_data) try: return artists[-1] except TypeError: return artists else: return getattr(Axes, name)(self, *args, **kwargs)
def isthere_dsfinterp(self): try: import dsfinterp except: logger.debug('Python package dsfinterp is missing (https://pypi.python.org/pypi/dsfinterp)') return False return True
def tricontourf(self, *args, **kwargs): """ If the **mantid** projection is chosen, it can be used the same as :py:meth:`matplotlib.axes.Axes.tricontourf` for arrays, or it can be used to plot :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`. You can have something like:: import matplotlib.pyplot as plt from mantid import plots ... fig, ax = plt.subplots(subplot_kw={'projection':'mantid'}) ax.tricontourf(workspace) #for workspaces ax.tricontourf(x,y,z) #for arrays fig.show() For keywords related to workspaces, see :func:`plotfunctions.tricontourf` """ if helperfunctions.validate_args(*args): logger.debug('using plotfunctions') workspace = args[0] return self.track_workspace_artist(workspace, plotfunctions.tricontourf(self, *args, **kwargs)) else: return Axes.tricontourf(self, *args, **kwargs)
def recover_selected_checkpoint(self, selected): """ Recover the passed checkpoint :param selected: String; Checkpoint name to be recovered """ # If this is a valid file then it should only be the checkpoint here if os.path.exists(selected): selected = os.path.basename(selected) self.is_recovery_running = True self.presenter.change_start_mantid_to_cancel_label() ADS.clear() # Recover given the checkpoint selected pid_dir = self.project_recovery.get_pid_folder_to_load_a_checkpoint_from() selected = selected.replace(" ", "T") checkpoint = os.path.join(pid_dir, selected) self.selected_checkpoint = selected try: self._start_recovery_of_checkpoint(checkpoint) except Exception as e: # Fail "Silently" by setting failed run to true, setting checkpoint to tried and closing the view. logger.debug("Project Recovery: " + str(e)) self.has_failed_run = True self._update_checkpoint_tried(selected) self.presenter.close_view()
def _read_atomic_coordinates(self, file_obj=None): """ Reads atomic coordinates from .out CRYSTAL file. :param file_obj: file object from which we read :returns: list with atomic coordinates """ coord_lines = [] self._parser.find_first(file_obj=file_obj, msg="ATOM X(ANGSTROM) Y(ANGSTROM) Z(ANGSTROM)") file_obj.readline() # Line: ******************************************************************************* while not self._parser.file_end(file_obj=file_obj): line = file_obj.readline().replace(b"T", b"") # At the end of this section there is always empty line. if not line.strip(): break coord_lines += [line.strip(b"\n")] for line in coord_lines: # convert from unicode to str in case of Python 2 temp = str(line.strip(b"\n")) logger.debug(temp) return coord_lines
def _calculate_parameters(self): """ Calculates the TransformToIqt parameters and saves in a table workspace. """ CropWorkspace(InputWorkspace=self._sample, OutputWorkspace='__TransformToIqt_sample_cropped', Xmin=self._e_min, Xmax=self._e_max) x_data = mtd['__TransformToIqt_sample_cropped'].readX(0) number_input_points = len(x_data) - 1 num_bins = int(number_input_points / self._number_points_per_bin) self._e_width = (abs(self._e_min) + abs(self._e_max)) / num_bins try: instrument = mtd[self._sample].getInstrument() analyserName = instrument.getStringParameter('analyser')[0] analyser = instrument.getComponentByName(analyserName) if analyser is not None: logger.debug('Found %s component in instrument %s, will look for resolution there' % (analyserName, instrument)) resolution = analyser.getNumberParameter('resolution')[0] else: logger.debug('No %s component found on instrument %s, will look for resolution in top level instrument' % (analyserName, instrument)) resolution = instrument.getNumberParameter('resolution')[0] logger.information('Got resolution from IPF: %f' % resolution) except (AttributeError, IndexError): resolution = 0.0175 logger.warning('Could not get resolution from IPF, using default value: %f' % (resolution)) resolution_bins = int(round((2 * resolution) / self._e_width)) if resolution_bins < 5: logger.warning('Resolution curve has <5 points. Results may be unreliable.') param_table = CreateEmptyTableWorkspace(OutputWorkspace=self._parameter_table) param_table.addColumn('int', 'SampleInputBins') param_table.addColumn('float', 'BinReductionFactor') param_table.addColumn('int', 'SampleOutputBins') param_table.addColumn('float', 'EnergyMin') param_table.addColumn('float', 'EnergyMax') param_table.addColumn('float', 'EnergyWidth') param_table.addColumn('float', 'Resolution') param_table.addColumn('int', 'ResolutionBins') param_table.addRow([number_input_points, self._number_points_per_bin, num_bins, self._e_min, self._e_max, self._e_width, resolution, resolution_bins]) DeleteWorkspace('__TransformToIqt_sample_cropped') self.setProperty('ParameterWorkspace', param_table)
def errorbar(self, *args, **kwargs): """ If the **mantid** projection is chosen, it can be used the same as :py:meth:`matplotlib.axes.Axes.errorbar` for arrays, or it can be used to plot :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`. You can have something like:: import matplotlib.pyplot as plt from mantid import plots ... fig, ax = plt.subplots(subplot_kw={'projection':'mantid'}) ax.errorbar(workspace,'rs',specNum=1) #for workspaces ax.errorbar(x,y,yerr,'bo') #for arrays fig.show() For keywords related to workspaces, see :func:`plotfunctions.errorbar` """ if helperfunctions.validate_args(*args): logger.debug('using plotfunctions') def _data_update(artists, workspace): # errorbar with workspaces can only return a single container container_orig = artists[0] # It is not possible to simply reset the error bars so # we have to plot new lines but ensure we don't reorder them on the plot! orig_idx = self.containers.index(container_orig) container_orig.remove() # The container does not remove itself from the containers list # but protect this just in case matplotlib starts doing this try: self.containers.remove(container_orig) except ValueError: pass # this gets pushed back onto the containers list container_new = plotfunctions.errorbar(self, workspace, **kwargs) self.containers.insert(orig_idx, container_new) self.containers.pop() # update line properties to match original orig_flat, new_flat = cbook.flatten(container_orig), cbook.flatten(container_new) for artist_orig, artist_new in zip(orig_flat, new_flat): artist_new.update_from(artist_orig) # ax.relim does not support collections... self._update_line_limits(container_new[0]) self.autoscale() return container_new workspace = args[0] spec_num = self._get_spec_number(workspace, kwargs) return self.track_workspace_artist(workspace, plotfunctions.errorbar(self, *args, **kwargs), _data_update, spec_num=spec_num) else: return Axes.errorbar(self, *args, **kwargs)
def start_recovery_thread(self): """ Starts the recovery thread if it is not already running """ if not self.pr.recovery_enabled: logger.debug("Project Recovery: Recovery thread not started as recovery is disabled") return if not self.pr.thread_on: self._timer_thread.start() self.pr.thread_on = True
def _do_slice_viewer(self, names): """ Show the sliceviewer window for the given workspaces :param names: A list of workspace names """ for ws in self._ads.retrieveWorkspaces(names, unrollGroups=True): try: SliceViewer(ws=ws, parent=self) except Exception as exception: logger.warning("Could not open slice viewer for workspace '{}'." "".format(ws.name())) logger.debug("{}: {}".format(type(exception).__name__, exception))
def convert(self, wavelength_min, wavelength_max, detector_workspace_indexes, monitor_workspace_index, correct_monitor=False, bg_min=None, bg_max=None): """ Run the conversion Arguments: workspace_ids: Start and end ranges. Ids to be considered as workspaces. Nested list syntax supported wavelength_min: min wavelength in x for monitor workspace wavelength_max: max wavelength in x for detector workspace detector_workspace_indexes: Tuple of workspace indexes (or tuple of tuple min, max ranges to keep) monitor_workspace_index: The index of the monitor workspace correct_monitor: Flag indicating that monitors should have a flat background correction applied bg_min: x min background in wavelength bg_max: x max background in wavelength Returns: _monitor_ws: A workspace of monitors """ # Sanity check inputs. if wavelength_min >= wavelength_max: raise ValueError("Wavelength_min must be < wavelength_max min: %s, max: %s" % (wavelength_min, wavelength_max)) if correct_monitor and not all((bg_min, bg_max)): raise ValueError("Either provide ALL, monitors_to_correct, bg_min, bg_max or none of them") if all((bg_min, bg_max)) and bg_min >= bg_max: raise ValueError("Background min must be < Background max") sum = ConvertToWavelength.sum_workspaces(self.__ws_list) sum_wavelength= msi.ConvertUnits(InputWorkspace=sum, Target="Wavelength", AlignBins='1') logger.debug("Monitor detector index %s" % str(monitor_workspace_index)) # Crop out the monitor workspace _monitor_ws = msi.CropWorkspace(InputWorkspace=sum_wavelength, StartWorkspaceIndex=monitor_workspace_index,EndWorkspaceIndex=monitor_workspace_index) # Crop out the detector workspace then chop out the x-ranges of interest. _detector_ws = ConvertToWavelength.crop_range(sum_wavelength, detector_workspace_indexes) _detector_ws = msi.CropWorkspace(InputWorkspace=_detector_ws, XMin=wavelength_min, XMax=wavelength_max) # Apply a flat background if correct_monitor and all((bg_min, bg_max)): _monitor_ws = msi.CalculateFlatBackground(InputWorkspace=_monitor_ws,WorkspaceIndexList=0,StartX=bg_min, EndX=bg_max) msi.DeleteWorkspace(Workspace=sum_wavelength.name()) return (_monitor_ws, _detector_ws)
def test_input_exceptions(self): # Run the test only if dsfinterp package is present try: import dsfinterp except: logger.debug('Python package dsfinterp is missing (https://pypi.python.org/pypi/dsfinterp)') return nf = 9 fvalues, workspaces = self.generateWorkspaces(nf) # workspaces sim1 to sim9 (nine workpaces) # Try passing different number of workspaces and parameter values try: fvalueswrong = range(nf-1) # eight values mantid.simpleapi.DSFinterp(Workspaces=workspaces, ParameterValues=fvalueswrong, LocalRegression=False, TargetParameters=5.5, OutputWorkspaces='outws') except Exception as e: self.assertTrue('Number of Workspaces and ParameterValues should be the same' in str(e)) else: assert False, "Didn't raise any exception" # Try passing an incompatible workspace try: mantid.simpleapi.CreateWorkspace(OutputWorkspace='sim10', DataX='1,2,3', DataY='1,1,1', DataE='0,0,0') fvalues2 = fvalues+[10,] workspaces2 = workspaces + ['sim10',] mantid.simpleapi.DSFinterp(Workspaces=workspaces2, ParameterValues=fvalues2, LocalRegression=False, TargetParameters=5.5, OutputWorkspaces='outws') except Exception as e: self.assertTrue('Workspace sim10 incompatible with sim1' in str(e)) else: assert False, "Didn't raise any exception" mantid.api.AnalysisDataService.remove('sim10') #Try passing a target parameter outside range try: mantid.simpleapi.DSFinterp(Workspaces=workspaces, ParameterValues=fvalues, LocalRegression=False, TargetParameters=nf+1, OutputWorkspaces='outws') except Exception as e: self.assertTrue('Target parameters should lie in' in str(e)) else: assert False, "Didn't raise any exception" # Try passing a different number of target parameters and output workspaces try: mantid.simpleapi.DSFinterp(Workspaces=workspaces, ParameterValues=fvalues, LocalRegression=False, TargetParameters=[1,2], OutputWorkspaces='outws') except Exception as e: self.assertTrue('Number of OutputWorkspaces and TargetParameters should be the same' in str(e)) else: assert False, "Didn't raise any exception" self.cleanup(nf)
def transCorr(transrun, i_vs_lam, lambda_min, lambda_max, background_min, background_max, int_min, int_max, detector_index_ranges, i0_monitor_index, stitch_start_overlap, stitch_end_overlap, stitch_params ): """ Perform transmission corrections on i_vs_lam. return the corrected result. """ if isinstance(transrun, MatrixWorkspace) and transrun.getAxis(0).getUnit().unitID() == "Wavelength" : logger.debug("Using existing transmission workspace.") _transWS = transrun else: logger.debug("Creating new transmission correction workspace.") # Make the transmission correction workspace. _transWS = make_trans_corr(transrun, stitch_start_overlap, stitch_end_overlap, stitch_params, lambda_min, lambda_max, background_min, background_max, int_min, int_max, detector_index_ranges, i0_monitor_index,) #got sometimes very slight binning diferences, so do this again: _i_vs_lam_trans = RebinToWorkspace(WorkspaceToRebin=_transWS, WorkspaceToMatch=i_vs_lam, OutputWorkspace=_transWS.name()) # Normalise by transmission run. _i_vs_lam_corrected = i_vs_lam / _i_vs_lam_trans return _i_vs_lam_corrected
def recovery_save(self): """ The function to save a recovery checkpoint """ # Set that recovery thread is not running anymore self.pr.thread_on = False try: # Get the interfaces_list interfaces_list = find_all_windows_that_are_savable() # Check if there is anything to be saved or not if len(ADS.getObjectNames()) == 0 and len(interfaces_list) == 0: logger.debug("Project Recovery: Nothing to save") self._spin_off_another_time_thread() return logger.debug("Project Recovery: Saving started") # Create directory for save location recovery_dir = os.path.join(self.pr.recovery_directory_pid, datetime.datetime.now().strftime('%d-%m-%YT%H-%M-%S')) if not os.path.exists(recovery_dir): os.makedirs(recovery_dir) self._add_lock_file(directory=recovery_dir) # Save workspaces self._save_workspaces(directory=recovery_dir) # Save project self._save_project(directory=recovery_dir, interfaces_list=interfaces_list) self._remove_lock_file(directory=recovery_dir) # Clear the oldest checkpoints self.pr.remove_oldest_checkpoints() logger.debug("Project Recovery: Saving finished") except Exception as e: if isinstance(e, KeyboardInterrupt): raise # Fail and print to debugger logger.debug("Project Recovery: Failed to save error msg: " + str(e)) # Spin off another timer thread if not self.pr.closing_workbench: self._spin_off_another_time_thread()
def scatter(self, *args, **kwargs): """ If the **mantid** projection is chosen, it can be used the same as :py:meth:`matplotlib.axes.Axes.scatter` for arrays, or it can be used to plot :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`. You can have something like:: import matplotlib.pyplot as plt from mantid import plots ... fig, ax = plt.subplots(subplot_kw={'projection':'mantid'}) ax.scatter(workspace,'rs',specNum=1) #for workspaces ax.scatter(x,y,'bo') #for arrays fig.show() For keywords related to workspaces, see :func:`plotfunctions.scatter` """ if helperfunctions.validate_args(*args): logger.debug('using plotfunctions') else: return Axes.scatter(self, *args, **kwargs)
def plot(self, *args, **kwargs): """ If the **mantid** projection is chosen, it can be used the same as :py:meth:`matplotlib.axes.Axes.plot` for arrays, or it can be used to plot :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`. You can have something like:: import matplotlib.pyplot as plt from mantid import plots ... fig, ax = plt.subplots(subplot_kw={'projection':'mantid'}) ax.plot(workspace,'rs',specNum=1) #for workspaces ax.plot(x,y,'bo') #for arrays fig.show() For keywords related to workspaces, see :func:`plotfunctions.plot`. """ if helperfunctions.validate_args(*args): logger.debug('using plotfunctions') def _data_update(artists, workspace): # It's only possible to plot 1 line at a time from a workspace x, y, _, __ = plotfunctions._plot_impl(self, workspace, args, kwargs) artists[0].set_data(x, y) self.relim() self.autoscale() return artists workspace = args[0] spec_num = self._get_spec_number(workspace, kwargs) return self.track_workspace_artist( workspace, plotfunctions.plot(self, *args, **kwargs), _data_update, spec_num) else: return Axes.plot(self, *args, **kwargs)
def _calculate_parameters(self): """ Calculates the Fury parameters and saves in a table workspace. """ CropWorkspace(InputWorkspace=self._sample, OutputWorkspace='__Fury_sample_cropped', Xmin=self._e_min, Xmax=self._e_max) x_data = mtd['__Fury_sample_cropped'].readX(0) number_input_points = len(x_data) - 1 num_bins = number_input_points / self._number_points_per_bin self._e_width = (abs(self._e_min) + abs(self._e_max)) / num_bins try: instrument = mtd[self._sample].getInstrument() analyserName = instrument.getStringParameter('analyser')[0] analyser = instrument.getComponentByName(analyserName) if analyser is not None: logger.debug( 'Found %s component in instrument %s, will look for resolution there' % (analyserName, instrument)) resolution = analyser.getNumberParameter('resolution')[0] else: logger.debug( 'No %s component found on instrument %s, will look for resolution in top level instrument' % (analyserName, instrument)) resolution = instrument.getNumberParameter('resolution')[0] if self._verbose: logger.information('Got resolution from IPF: %f' % resolution) except (AttributeError, IndexError): resolution = 0.0175 logger.warning( 'Could not get resolution from IPF, using default value: %f' % resolution) resolution_bins = int(round((2 * resolution) / self._e_width)) if resolution_bins < 5: logger.warning( 'Resolution curve has <5 points. Results may be unreliable.') param_table = CreateEmptyTableWorkspace( OutputWorkspace=self._parameter_table) param_table.addColumn('int', 'SampleInputBins') param_table.addColumn('int', 'NumberBins') param_table.addColumn('int', 'SampleOutputBins') param_table.addColumn('float', 'EnergyMin') param_table.addColumn('float', 'EnergyMax') param_table.addColumn('float', 'EnergyWidth') param_table.addColumn('float', 'Resolution') param_table.addColumn('int', 'ResolutionBins') param_table.addRow([ number_input_points, self._number_points_per_bin, num_bins, self._e_min, self._e_max, self._e_width, resolution, resolution_bins ]) DeleteWorkspace('__Fury_sample_cropped') self.setProperty('ParameterWorkspace', param_table)
def _calculate_parameters(self): """ Calculates the TransformToIqt parameters and saves in a table workspace. """ workflow_prog = Progress(self, start=0.0, end=0.3, nreports=8) workflow_prog.report('Croping Workspace') CropWorkspace(InputWorkspace=self._sample, OutputWorkspace='__TransformToIqt_sample_cropped', Xmin=self._e_min, Xmax=self._e_max) workflow_prog.report('Calculating table properties') x_data = mtd['__TransformToIqt_sample_cropped'].readX(0) number_input_points = len(x_data) - 1 num_bins = int(number_input_points / self._number_points_per_bin) self._e_width = (abs(self._e_min) + abs(self._e_max)) / num_bins workflow_prog.report('Attemping to Access IPF') try: workflow_prog.report('Access IPF') instrument = mtd[self._sample].getInstrument() analyserName = instrument.getStringParameter('analyser')[0] analyser = instrument.getComponentByName(analyserName) if analyser is not None: logger.debug( 'Found %s component in instrument %s, will look for resolution there' % (analyserName, instrument)) resolution = analyser.getNumberParameter('resolution')[0] else: logger.debug( 'No %s component found on instrument %s, will look for resolution in top level instrument' % (analyserName, instrument)) resolution = instrument.getNumberParameter('resolution')[0] logger.information('Got resolution from IPF: %f' % resolution) workflow_prog.report('IPF resolution obtained') except (AttributeError, IndexError): workflow_prog.report('Resorting to Default') resolution = 0.0175 logger.warning( 'Could not get resolution from IPF, using default value: %f' % (resolution)) resolution_bins = int(round((2 * resolution) / self._e_width)) if resolution_bins < 5: logger.warning( 'Resolution curve has <5 points. Results may be unreliable.') workflow_prog.report('Creating Parameter table') param_table = CreateEmptyTableWorkspace( OutputWorkspace=self._parameter_table) workflow_prog.report('Populating Parameter table') param_table.addColumn('int', 'SampleInputBins') param_table.addColumn('float', 'BinReductionFactor') param_table.addColumn('int', 'SampleOutputBins') param_table.addColumn('float', 'EnergyMin') param_table.addColumn('float', 'EnergyMax') param_table.addColumn('float', 'EnergyWidth') param_table.addColumn('float', 'Resolution') param_table.addColumn('int', 'ResolutionBins') param_table.addRow([ number_input_points, self._number_points_per_bin, num_bins, self._e_min, self._e_max, self._e_width, resolution, resolution_bins ]) workflow_prog.report('Deleting temp Workspace') DeleteWorkspace('__TransformToIqt_sample_cropped') self.setProperty('ParameterWorkspace', param_table)
def PyExec(self): # Warn user if error-weighting was turned on error_weighting = self.getProperty("ErrorWeighting").value if error_weighting: msg = "The ErrorWeighting option is turned ON. " msg += "This option is NOT RECOMMENDED" Logger("SANSAzimuthalAverage").warning(msg) # Warn against sub-pixels n_subpix = self.getProperty("NumberOfSubpixels").value if n_subpix != 1: msg = "NumberOfSubpixels was set to %s: " % str(n_subpix) msg += "The recommended value is 1" Logger("SANSAzimuthalAverage").warning(msg) # Q binning options binning = self.getProperty("Binning").value binning_prop = self.getPropertyValue("Binning") workspace = self.getProperty("InputWorkspace").value output_ws_name = self.getPropertyValue("OutputWorkspace") # Q range pixel_size_x = workspace.getInstrument().getNumberParameter( "x-pixel-size")[0] pixel_size_y = workspace.getInstrument().getNumberParameter( "y-pixel-size")[0] if len(binning) == 0 or (binning[0] == 0 and binning[1] == 0 and binning[2] == 0): # Wavelength. Read in the wavelength bins. Skip the first one which is not set up properly for EQ-SANS x = workspace.dataX(1) x_length = len(x) if x_length < 2: raise RuntimeError( "Azimuthal averaging expects at least one wavelength bin") wavelength_max = (x[x_length - 2] + x[x_length - 1]) / 2.0 wavelength_min = (x[0] + x[1]) / 2.0 if wavelength_min == 0 or wavelength_max == 0: raise RuntimeError( "Azimuthal averaging needs positive wavelengths") qmin, qstep, qmax = self._get_binning(workspace, wavelength_min, wavelength_max) align = self.getProperty("AlignWithDecades").value log_binning = self.getProperty("LogBinning").value if log_binning and align: binning_prop = self._get_aligned_binning(qmin, qmax) else: binning_prop = "%g, %g, %g" % (qmin, qstep, qmax) workspace.getRun().addProperty("qstep", float(qstep), True) self.setPropertyValue("Binning", binning_prop) else: qmin = binning[0] qmax = binning[2] logger.debug("Qmin = %s" % qmin) logger.debug("Qmax = %s" % qmax) workspace.getRun().addProperty("qmin", float(qmin), True) workspace.getRun().addProperty("qmax", float(qmax), True) # If we kept the events this far, we need to convert the input workspace # to a histogram here if workspace.id() == "EventWorkspace": alg = AlgorithmManager.create("ConvertToMatrixWorkspace") alg.initialize() alg.setChild(True) alg.setProperty("InputWorkspace", workspace) alg.setPropertyValue("OutputWorkspace", "__tmp_matrix_workspace") alg.execute() workspace = alg.getProperty("OutputWorkspace").value alg = AlgorithmManager.create("Q1DWeighted") alg.initialize() alg.setChild(True) alg.setProperty("InputWorkspace", workspace) alg.setPropertyValue("OutputBinning", binning_prop) alg.setProperty("NPixelDivision", n_subpix) alg.setProperty("PixelSizeX", pixel_size_x) alg.setProperty("PixelSizeY", pixel_size_y) alg.setProperty("ErrorWeighting", error_weighting) alg.setPropertyValue("OutputWorkspace", output_ws_name) #wedge_ws_name = self.getPropertyValue("WedgeWorkspace") n_wedges = self.getProperty("NumberOfWedges").value wedge_angle = self.getProperty("WedgeAngle").value wedge_offset = self.getProperty("WedgeOffset").value alg.setPropertyValue("WedgeWorkspace", output_ws_name + '_wedges') alg.setProperty("NumberOfWedges", n_wedges) alg.setProperty("WedgeAngle", wedge_angle) alg.setProperty("WedgeOffset", wedge_offset) alg.execute() output_ws = alg.getProperty("OutputWorkspace").value wedge_ws = alg.getProperty("WedgeWorkspace").value alg = AlgorithmManager.create("ReplaceSpecialValues") alg.initialize() alg.setChild(True) alg.setProperty("InputWorkspace", output_ws) alg.setPropertyValue("OutputWorkspace", output_ws_name) alg.setProperty("NaNValue", 0.0) alg.setProperty("NaNError", 0.0) alg.setProperty("InfinityValue", 0.0) alg.setProperty("InfinityError", 0.0) alg.execute() output_ws = alg.getProperty("OutputWorkspace").value # Q resolution compute_resolution = self.getProperty("ComputeResolution").value if compute_resolution: alg = AlgorithmManager.create("ReactorSANSResolution") alg.initialize() alg.setChild(True) alg.setProperty("InputWorkspace", output_ws) alg.execute() for i in range(wedge_ws.getNumberOfEntries()): wedge_i = wedge_ws.getItem(i) identifier = i if wedge_i.getRun().hasProperty("wedge_angle"): identifier = int( wedge_i.getRun().getProperty("wedge_angle").value) wedge_i_name = "%s_wedge_%s" % (output_ws_name, identifier) alg = AlgorithmManager.create("ReplaceSpecialValues") alg.initialize() alg.setChild(True) alg.setProperty("InputWorkspace", wedge_i) alg.setProperty("OutputWorkspace", wedge_i_name) alg.setProperty("NaNValue", 0.0) alg.setProperty("NaNError", 0.0) alg.setProperty("InfinityValue", 0.0) alg.setProperty("InfinityError", 0.0) alg.execute() wedge_i = alg.getProperty("OutputWorkspace").value if compute_resolution: alg = AlgorithmManager.create("ReactorSANSResolution") alg.initialize() alg.setChild(True) alg.setProperty("InputWorkspace", wedge_i) alg.execute() self.declareProperty( MatrixWorkspaceProperty("WedgeWorkspace_%s" % i, "", direction=Direction.Output)) self.setPropertyValue("WedgeWorkspace_%s" % i, wedge_i_name) self.setProperty("WedgeWorkspace_%s" % i, wedge_i) msg = "Performed radial averaging between Q=%g and Q=%g" % (qmin, qmax) self.setProperty("OutputMessage", msg) self.setProperty("OutputWorkspace", output_ws)
def _eulerToQuat(self, alpha, beta, gamma, convention): """ Convert Euler angles to a quaternion """ getV3D = {'X': V3D(1, 0, 0), 'Y': V3D(0, 1, 0), 'Z': V3D(0, 0, 1)} return (Quat(alpha, getV3D[convention[0]]) * Quat(beta, getV3D[convention[1]]) * Quat(gamma, getV3D[convention[2]])) def _eulerToAngleAxis(self, alpha, beta, gamma, convention): """ Convert Euler angles to a angle rotation around an axis """ quat = self._eulerToQuat(alpha, beta, gamma, convention) if quat[0] == 1: return 0, 0, 0, 1 deg = math.acos(quat[0]) scale = math.sin(deg) deg *= 360.0 / math.pi ax0 = quat[1] / scale ax1 = quat[2] / scale ax2 = quat[3] / scale return deg, ax0, ax1, ax2 try: from scipy.optimize import minimize AlgorithmFactory.subscribe(AlignComponents) except ImportError: logger.debug('Failed to subscribe algorithm AlignComponets; cannot import minimize from scipy.optimize')
windowlength = self.getProperty('RegressionWindow').value self.channelgroup.InitializeInterpolator(running_regr_type=regressiontype, windowlength=windowlength) else: self.channelgroup.InitializeInterpolator(windowlength=0) # Invoke the interpolator and generate the output workspaces targetfvalues = self.getProperty('TargetParameters').value for targetfvalue in targetfvalues: if targetfvalue < min(fvalues) or targetfvalue > max(fvalues): mesg = 'Target parameters should lie in [{0}, {1}]'.format(min(fvalues),max(fvalues)) logger.error(mesg) raise ValueError(mesg) outworkspaces = self.getProperty('OutputWorkspaces').value if len(targetfvalues) != len(outworkspaces): mesg = 'Number of OutputWorkspaces and TargetParameters should be the same' logger.error(mesg) raise IndexError(mesg) for i in range(len(targetfvalues)): dsf = self.channelgroup( targetfvalues[i] ) outws = mantid.simpleapi.CloneWorkspace( mantid.mtd[workspaces[0]], OutputWorkspace=outworkspaces[i]) dsf.Save(outws) # overwrite dataY and dataE ############################################################################################# #pylint: disable=unused-import try: import dsfinterp AlgorithmFactory.subscribe(DSFinterp) except ImportError: logger.debug('Failed to subscribe algorithm DSFinterp; Python package dsfinterp'\ 'may be missing (https://pypi.python.org/pypi/dsfinterp)')
Convert Euler angles to a quaternion """ getV3D = {'X': V3D(1, 0, 0), 'Y': V3D(0, 1, 0), 'Z': V3D(0, 0, 1)} return (Quat(alpha, getV3D[convention[0]]) * Quat(beta, getV3D[convention[1]]) * Quat(gamma, getV3D[convention[2]])) def _eulerToAngleAxis(self, alpha, beta, gamma, convention): """ Convert Euler angles to a angle rotation around an axis """ quat = self._eulerToQuat(alpha, beta, gamma, convention) if quat[0] == 1: return 0, 0, 0, 1 deg = math.acos(quat[0]) scale = math.sin(deg) deg *= 360.0 / math.pi ax0 = quat[1] / scale ax1 = quat[2] / scale ax2 = quat[3] / scale return deg, ax0, ax1, ax2 try: from scipy.optimize import minimize AlgorithmFactory.subscribe(AlignComponents) except ImportError: logger.debug( 'Failed to subscribe algorithm AlignComponets; cannot import minimize from scipy.optimize' )
def PyExec(self): """ Main execution body """ #get parameter energy = self.getProperty("IncidentEnergy").value msd=1800.0 tail_length_us = 3000.0 dist_mm = 39000.0 + msd + 4500.0 T0_moderator = 0.0 t_focEle_us = 39000.0 / self.e2v(energy) * 1000.0 + T0_moderator t_samp_us = (dist_mm - 4500.0) / self.e2v(energy) * 1000.0 + T0_moderator t_det_us = dist_mm /self.e2v(energy) * 1000 + T0_moderator frame_start_us = t_det_us - 16667/2 frame_end_us = t_det_us + 16667/2 index_under_frame = int(numpy.divide(int(t_det_us),16667)) pre_lead_us = 16667 * index_under_frame pre_tail_us = pre_lead_us + tail_length_us post_lead_us = 16667 * (1+ index_under_frame) #post_tail_us = post_lead_us + tail_length_us #E_final_meV = -1 #E_transfer_meV = -1 # finding an ok TIB range MinTIB_us = 2000.0 slop_frac = 0.2 #print t_focEle_us,pre_lead_us,frame_start_us,MinTIB_us,slop_frac if (t_focEle_us < pre_lead_us) and (t_focEle_us-frame_start_us > MinTIB_us * (slop_frac + 1.0)): logger.debug('choosing TIB just before focus element-1') TIB_high_us = t_focEle_us - MinTIB_us * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us elif (frame_start_us>pre_tail_us) and (t_focEle_us-frame_start_us > MinTIB_us * (slop_frac + 1.0)): logger.debug('choosing TIB just before focus element-2') TIB_high_us = t_focEle_us - MinTIB_us * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us elif t_focEle_us-pre_tail_us > MinTIB_us * (slop_frac + 1.0) and (t_focEle_us-frame_start_us > MinTIB_us * (slop_frac + 1.0)): logger.debug('choosing TIB just before focus element-3') TIB_high_us = t_focEle_us - MinTIB_us * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us elif t_samp_us-pre_tail_us > MinTIB_us * (slop_frac + 1.0) and (t_samp_us-frame_start_us > MinTIB_us * (slop_frac + 1.0)): logger.debug('choosing TIB just before sample-1') TIB_high_us = t_samp_us - MinTIB_us * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us elif t_samp_us-pre_tail_us > MinTIB_us / 1.5 * (slop_frac + 1.0) and (t_samp_us-frame_start_us > MinTIB_us * (slop_frac + 1.0)): logger.debug('choosing TIB just before sample-2') TIB_high_us = t_samp_us - MinTIB_us / 1.5 * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us / 1.5 elif t_samp_us-pre_tail_us > MinTIB_us / 2.0 * (slop_frac + 1.0) and (t_samp_us-frame_start_us > MinTIB_us * (slop_frac + 1.0)): logger.debug('choosing TIB just before sample-3') TIB_high_us = t_samp_us - MinTIB_us / 2.0 * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us / 2.0 elif (pre_lead_us - frame_start_us > MinTIB_us * (slop_frac + 1.0)) and (t_focEle_us > pre_lead_us): logger.debug('choosing TIB just before leading edge before elastic-1') TIB_high_us = pre_lead_us - MinTIB_us * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us elif (pre_lead_us - frame_start_us > MinTIB_us / 1.5 * (slop_frac + 1.0)) and (t_focEle_us > pre_lead_us): logger.debug('choosing TIB just before leading edge before elastic-2') TIB_high_us = pre_lead_us - MinTIB_us / 1.5 * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us / 1.5 elif (pre_lead_us - frame_start_us > MinTIB_us / 2.0 * (slop_frac + 1.0)) and (t_focEle_us > pre_lead_us): logger.debug('choosing TIB just before leading edge before elastic-3') TIB_high_us = pre_lead_us - MinTIB_us / 2.0 * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us / 2.0 # elif (pre_tail_us > frame_start_us) and (t_focEle_us - pre_tail_us > MinTIB_us * (slop_frac + 1.0)): # logger.debug('choosing TIB just before focus element') # TIB_low_us = pre_tail_us + MinTIB_us * slop_frac / 2.0 # TIB_high_us = TIB_low_us + MinTIB_us elif post_lead_us > frame_end_us: logger.debug('choosing TIB at end of frame') TIB_high_us = frame_end_us - MinTIB_us * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us elif post_lead_us - t_det_us > MinTIB_us * (slop_frac + 1.0): logger.debug('choosing TIB between elastic peak and later prompt pulse leading edge') TIB_high_us = post_lead_us - MinTIB_us * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us else: logger.debug('I cannot find a good TIB range') TIB_low_us = 0.0 TIB_high_us = 0.0 #return the result self.setProperty("TibMin",TIB_low_us) self.setProperty("TibMax",TIB_high_us) return
def _calculate_parameters(self): """ Calculates the TransformToIqt parameters and saves in a table workspace. """ from IndirectCommon import getEfixed end_prog = 0.3 if self._calculate_errors else 0.9 workflow_prog = Progress(self, start=0.0, end=end_prog, nreports=8) workflow_prog.report('Cropping Workspace') CropWorkspace(InputWorkspace=self._sample, OutputWorkspace='__TransformToIqt_sample_cropped', Xmin=self._e_min, Xmax=self._e_max) workflow_prog.report('Calculating table properties') x_data = mtd['__TransformToIqt_sample_cropped'].readX(0) number_input_points = len(x_data) - 1 num_bins = int(number_input_points / self._number_points_per_bin) self._e_width = (abs(self._e_min) + abs(self._e_max)) / num_bins workflow_prog.report('Attempting to Access IPF') try: workflow_prog.report('Access IPF') instrument = mtd[self._sample].getInstrument() analyserName = instrument.getStringParameter('analyser')[0] analyser = instrument.getComponentByName(analyserName) if analyser is not None: logger.debug('Found %s component in instrument %s, will look for resolution there' % (analyserName, instrument)) resolution = analyser.getNumberParameter('resolution')[0] else: logger.debug('No %s component found on instrument %s, will look for resolution in top level instrument' % (analyserName, instrument)) resolution = instrument.getNumberParameter('resolution')[0] logger.information('Got resolution from IPF: %f' % resolution) workflow_prog.report('IPF resolution obtained') except (AttributeError, IndexError): workflow_prog.report('Resorting to Default') resolution = getEfixed(self._sample) * 0.01 logger.warning('Could not get the resolution from the IPF, using 1% of the E Fixed value for the ' 'resolution: {0}'.format(resolution)) resolution_bins = int(round((2 * resolution) / self._e_width)) if resolution_bins < 5: logger.warning( 'Resolution curve has <5 points. Results may be unreliable.') workflow_prog.report('Creating Parameter table') param_table = CreateEmptyTableWorkspace( OutputWorkspace=self._parameter_table) workflow_prog.report('Populating Parameter table') param_table.addColumn('int', 'SampleInputBins') param_table.addColumn('float', 'BinReductionFactor') param_table.addColumn('int', 'SampleOutputBins') param_table.addColumn('float', 'EnergyMin') param_table.addColumn('float', 'EnergyMax') param_table.addColumn('float', 'EnergyWidth') param_table.addColumn('float', 'Resolution') param_table.addColumn('int', 'ResolutionBins') param_table.addRow([number_input_points, self._number_points_per_bin, num_bins, self._e_min, self._e_max, self._e_width, resolution, resolution_bins]) workflow_prog.report('Deleting temp Workspace') if mtd.doesExist('__TransformToIqt_sample_cropped'): DeleteWorkspace('__TransformToIqt_sample_cropped') self.setProperty('ParameterWorkspace', param_table)
def PyExec(self): # Warn user if error-weighting was turned on error_weighting = self.getProperty("ErrorWeighting").value if error_weighting: msg = "The ErrorWeighting option is turned ON. " msg += "This option is NOT RECOMMENDED" Logger("SANSAzimuthalAverage").warning(msg) # Warn against sub-pixels n_subpix = self.getProperty("NumberOfSubpixels").value if n_subpix != 1: msg = "NumberOfSubpixels was set to %s: " % str(n_subpix) msg += "The recommended value is 1" Logger("SANSAzimuthalAverage").warning(msg) # Q binning options binning = self.getProperty("Binning").value binning_prop = self.getPropertyValue("Binning") workspace = self.getProperty("InputWorkspace").value output_ws_name = self.getPropertyValue("OutputWorkspace") # Q range pixel_size_x = workspace.getInstrument().getNumberParameter("x-pixel-size")[0] pixel_size_y = workspace.getInstrument().getNumberParameter("y-pixel-size")[0] if len(binning)==0 or (binning[0]==0 and binning[1]==0 and binning[2]==0): # Wavelength. Read in the wavelength bins. Skip the first one which is not set up properly for EQ-SANS x = workspace.dataX(1) x_length = len(x) if x_length < 2: raise RuntimeError("Azimuthal averaging expects at least one wavelength bin") wavelength_max = (x[x_length-2]+x[x_length-1])/2.0 wavelength_min = (x[0]+x[1])/2.0 if wavelength_min==0 or wavelength_max==0: raise RuntimeError("Azimuthal averaging needs positive wavelengths") qmin, qstep, qmax = self._get_binning(workspace, wavelength_min, wavelength_max) align = self.getProperty("AlignWithDecades").value log_binning = self.getProperty("LogBinning").value if log_binning and align: binning_prop = self._get_aligned_binning(qmin, qmax) else: binning_prop = "%g, %g, %g" % (qmin, qstep, qmax) workspace.getRun().addProperty("qstep",float(qstep), True) self.setPropertyValue("Binning", binning_prop) else: qmin = binning[0] qmax = binning[2] logger.debug("Qmin = %s"%qmin) logger.debug("Qmax = %s"%qmax) workspace.getRun().addProperty("qmin",float(qmin), True) workspace.getRun().addProperty("qmax",float(qmax), True) # If we kept the events this far, we need to convert the input workspace # to a histogram here if workspace.id()=="EventWorkspace": alg = AlgorithmManager.create("ConvertToMatrixWorkspace") alg.initialize() alg.setChild(True) alg.setProperty("InputWorkspace", workspace) alg.setPropertyValue("OutputWorkspace", "__tmp_matrix_workspace") alg.execute() workspace = alg.getProperty("OutputWorkspace").value alg = AlgorithmManager.create("Q1DWeighted") alg.initialize() alg.setChild(True) alg.setProperty("InputWorkspace", workspace) alg.setPropertyValue("OutputBinning", binning_prop) alg.setProperty("NPixelDivision", n_subpix) alg.setProperty("PixelSizeX", pixel_size_x) alg.setProperty("PixelSizeY", pixel_size_y) alg.setProperty("ErrorWeighting", error_weighting) alg.setPropertyValue("OutputWorkspace", output_ws_name) #wedge_ws_name = self.getPropertyValue("WedgeWorkspace") n_wedges = self.getProperty("NumberOfWedges").value wedge_angle = self.getProperty("WedgeAngle").value wedge_offset = self.getProperty("WedgeOffset").value alg.setPropertyValue("WedgeWorkspace", output_ws_name+'_wedges') alg.setProperty("NumberOfWedges", n_wedges) alg.setProperty("WedgeAngle", wedge_angle) alg.setProperty("WedgeOffset", wedge_offset) alg.execute() output_ws = alg.getProperty("OutputWorkspace").value wedge_ws = alg.getProperty("WedgeWorkspace").value alg = AlgorithmManager.create("ReplaceSpecialValues") alg.initialize() alg.setChild(True) alg.setProperty("InputWorkspace", output_ws) alg.setPropertyValue("OutputWorkspace", output_ws_name) alg.setProperty("NaNValue", 0.0) alg.setProperty("NaNError", 0.0) alg.setProperty("InfinityValue", 0.0) alg.setProperty("InfinityError", 0.0) alg.execute() output_ws = alg.getProperty("OutputWorkspace").value # Q resolution compute_resolution = self.getProperty("ComputeResolution").value if compute_resolution: alg = AlgorithmManager.create("ReactorSANSResolution") alg.initialize() alg.setChild(True) alg.setProperty("InputWorkspace", output_ws) alg.execute() for i in range(wedge_ws.getNumberOfEntries()): wedge_i = wedge_ws.getItem(i) identifier = i if wedge_i.getRun().hasProperty("wedge_angle"): identifier = int(wedge_i.getRun().getProperty("wedge_angle").value) wedge_i_name = "%s_wedge_%s" % (output_ws_name, identifier) alg = AlgorithmManager.create("ReplaceSpecialValues") alg.initialize() alg.setChild(True) alg.setProperty("InputWorkspace", wedge_i) alg.setProperty("OutputWorkspace", wedge_i_name) alg.setProperty("NaNValue", 0.0) alg.setProperty("NaNError", 0.0) alg.setProperty("InfinityValue", 0.0) alg.setProperty("InfinityError", 0.0) alg.execute() wedge_i = alg.getProperty("OutputWorkspace").value if compute_resolution: alg = AlgorithmManager.create("ReactorSANSResolution") alg.initialize() alg.setChild(True) alg.setProperty("InputWorkspace", wedge_i) alg.execute() self.declareProperty(MatrixWorkspaceProperty("WedgeWorkspace_%s" % i, "", direction = Direction.Output)) self.setPropertyValue("WedgeWorkspace_%s" % i, wedge_i_name) self.setProperty("WedgeWorkspace_%s" % i, wedge_i) msg = "Performed radial averaging between Q=%g and Q=%g" % (qmin, qmax) self.setProperty("OutputMessage", msg) self.setProperty("OutputWorkspace", output_ws)
def cleanup(): names = mtd.getObjectNames() for name in names: if re.search("^_", name) and mtd.doesExist(name): logger.debug("deleting " + name) DeleteWorkspace(name)
def convert(self, wavelength_min, wavelength_max, detector_workspace_indexes, monitor_workspace_index, correct_monitor=False, bg_min=None, bg_max=None): """ Run the conversion Arguments: workspace_ids: Start and end ranges. Ids to be considered as workspaces. Nested list syntax supported wavelength_min: min wavelength in x for monitor workspace wavelength_max: max wavelength in x for detector workspace detector_workspace_indexes: Tuple of workspace indexes (or tuple of tuple min, max ranges to keep) monitor_workspace_index: The index of the monitor workspace correct_monitor: Flag indicating that monitors should have a flat background correction applied bg_min: x min background in wavelength bg_max: x max background in wavelength Returns: _monitor_ws: A workspace of monitors """ # Sanity check inputs. if wavelength_min >= wavelength_max: raise ValueError( "Wavelength_min must be < wavelength_max min: %s, max: %s" % (wavelength_min, wavelength_max)) if correct_monitor and not all((bg_min, bg_max)): raise ValueError( "Either provide ALL, monitors_to_correct, bg_min, bg_max or none of them" ) if all((bg_min, bg_max)) and bg_min >= bg_max: raise ValueError("Background min must be < Background max") sum = ConvertToWavelength.sum_workspaces(self.__ws_list) sum_wavelength = msi.ConvertUnits(InputWorkspace=sum, Target="Wavelength", AlignBins='1') logger.debug("Monitor detector index %s" % str(monitor_workspace_index)) # Crop out the monitor workspace _monitor_ws = msi.CropWorkspace( InputWorkspace=sum_wavelength, StartWorkspaceIndex=monitor_workspace_index, EndWorkspaceIndex=monitor_workspace_index) # Crop out the detector workspace then chop out the x-ranges of interest. _detector_ws = ConvertToWavelength.crop_range( sum_wavelength, detector_workspace_indexes) _detector_ws = msi.CropWorkspace(InputWorkspace=_detector_ws, XMin=wavelength_min, XMax=wavelength_max) # Apply a flat background if correct_monitor and all((bg_min, bg_max)): _monitor_ws = msi.CalculateFlatBackground( InputWorkspace=_monitor_ws, WorkspaceIndexList=0, StartX=bg_min, EndX=bg_max) msi.DeleteWorkspace(Workspace=sum_wavelength.name()) return (_monitor_ws, _detector_ws)
self._sum_contributions = self.getProperty("SumContributions").value # conversion from str to int self._num_quantum_order_events = int( self.getProperty("QuantumOrderEventsNumber").value) self._scale_by_cross_section = self.getPropertyValue( 'ScaleByCrossSection') self._out_ws_name = self.getPropertyValue('OutputWorkspace') self._calc_partial = (len(self._atoms) > 0) # user defined interval is exclusive with respect to # AbinsModules.AbinsParameters.min_wavenumber # AbinsModules.AbinsParameters.max_wavenumber # with bin width AbinsModules.AbinsParameters.bin_width step = self._bin_width start = AbinsModules.AbinsParameters.min_wavenumber + step / 2.0 stop = AbinsModules.AbinsParameters.max_wavenumber + step / 2.0 self._bins = np.arange(start=start, stop=stop, step=step, dtype=AbinsModules.AbinsConstants.FLOAT_TYPE) try: AlgorithmFactory.subscribe(Abins) except ImportError: logger.debug( 'Failed to subscribe algorithm SimulatedDensityOfStates; The python package may be missing.' )
def create_kpoints_data_helper(self, atomic_displacements=None, atomic_coordinates=None, row=None, column=None, freq_num=None, row_width=6): """ Computes normalisation constant for displacements and builds a block of coordinates. :param atomic_displacements: list with atomic displacements :param atomic_coordinates: list with atomic coordinates :param row: number of atomic_displacements row to parse :param column: number of atomic_displacements column to parse :param freq_num: number of mode (frequency) :param row_width: current width of row to parse """ xdisp = atomic_displacements[0] ydisp = atomic_displacements[1] zdisp = atomic_displacements[2] atom_num = -1 # Compute normalisation constant for displacements # and build block of normalised coordinates. normalised_coordinates = [] norm_const1 = 0. for line in atomic_coordinates: atom_num += 1 l = line.split() indx = row * len( atomic_coordinates) * 6 + atom_num * row_width + column if indx <= len(xdisp) - 1: x = xdisp[indx] y = ydisp[indx] z = zdisp[indx] norm_const1 += (x * x.conjugate() + y * y.conjugate() + z * z.conjugate()).real normalised_coordinates += [[ atom_num + 1, l[2], int(l[1]), x, y, z ]] # Normalise displacements and multiply displacements by sqrt(mass)-> xn, yn, zn xn = [] yn = [] zn = [] norm_const1 = sqrt(norm_const1) norm = 0.0 for item in normalised_coordinates: atom = Atom(symbol=str(item[1]).capitalize()) mass = atom.mass x = item[3] / norm_const1 * sqrt(mass) y = item[4] / norm_const1 * sqrt(mass) z = item[5] / norm_const1 * sqrt(mass) xn += [x] yn += [y] zn += [z] norm += (x * x.conjugate() + y * y.conjugate() + z * z.conjugate()).real # Normalise displacements normf = 0.0 ii = -1 # noinspection PyAssignmentToLoopOrWithParameter local_displacements = [] for _ in normalised_coordinates: ii += 1 x = xn[ii] / sqrt(norm) y = yn[ii] / sqrt(norm) z = zn[ii] / sqrt(norm) normf += (x * x.conjugate() + y * y.conjugate() + z * z.conjugate()).real local_displacements.append([x, y, z]) logger.debug("Mode {0} normalised to {1}".format( str(freq_num + 1), str(normf))) return local_displacements
def PyExec(self): """ Main execution body """ #get parameter energy = self.getProperty("IncidentEnergy").value msd = 1800.0 tail_length_us = 3000.0 dist_mm = 39000.0 + msd + 4500.0 T0_moderator = 0.0 t_focEle_us = 39000.0 / self.e2v(energy) * 1000.0 + T0_moderator t_samp_us = (dist_mm - 4500.0) / self.e2v(energy) * 1000.0 + T0_moderator t_det_us = dist_mm / self.e2v(energy) * 1000 + T0_moderator frame_start_us = t_det_us - 16667 / 2 frame_end_us = t_det_us + 16667 / 2 index_under_frame = int(numpy.divide(int(t_det_us), 16667)) pre_lead_us = 16667 * index_under_frame pre_tail_us = pre_lead_us + tail_length_us post_lead_us = 16667 * (1 + index_under_frame) #post_tail_us = post_lead_us + tail_length_us #E_final_meV = -1 #E_transfer_meV = -1 # finding an ok TIB range MinTIB_us = 2000.0 slop_frac = 0.2 #print t_focEle_us,pre_lead_us,frame_start_us,MinTIB_us,slop_frac if (t_focEle_us < pre_lead_us) and (t_focEle_us - frame_start_us > MinTIB_us * (slop_frac + 1.0)): logger.debug('choosing TIB just before focus element-1') TIB_high_us = t_focEle_us - MinTIB_us * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us elif (frame_start_us > pre_tail_us) and ( t_focEle_us - frame_start_us > MinTIB_us * (slop_frac + 1.0)): logger.debug('choosing TIB just before focus element-2') TIB_high_us = t_focEle_us - MinTIB_us * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us elif t_focEle_us - pre_tail_us > MinTIB_us * (slop_frac + 1.0) and ( t_focEle_us - frame_start_us > MinTIB_us * (slop_frac + 1.0)): logger.debug('choosing TIB just before focus element-3') TIB_high_us = t_focEle_us - MinTIB_us * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us elif t_samp_us - pre_tail_us > MinTIB_us * (slop_frac + 1.0) and ( t_samp_us - frame_start_us > MinTIB_us * (slop_frac + 1.0)): logger.debug('choosing TIB just before sample-1') TIB_high_us = t_samp_us - MinTIB_us * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us elif t_samp_us - pre_tail_us > MinTIB_us / 1.5 * ( slop_frac + 1.0) and (t_samp_us - frame_start_us > MinTIB_us * (slop_frac + 1.0)): logger.debug('choosing TIB just before sample-2') TIB_high_us = t_samp_us - MinTIB_us / 1.5 * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us / 1.5 elif t_samp_us - pre_tail_us > MinTIB_us / 2.0 * ( slop_frac + 1.0) and (t_samp_us - frame_start_us > MinTIB_us * (slop_frac + 1.0)): logger.debug('choosing TIB just before sample-3') TIB_high_us = t_samp_us - MinTIB_us / 2.0 * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us / 2.0 elif (pre_lead_us - frame_start_us > MinTIB_us * (slop_frac + 1.0)) and (t_focEle_us > pre_lead_us): logger.debug( 'choosing TIB just before leading edge before elastic-1') TIB_high_us = pre_lead_us - MinTIB_us * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us elif (pre_lead_us - frame_start_us > MinTIB_us / 1.5 * (slop_frac + 1.0)) and (t_focEle_us > pre_lead_us): logger.debug( 'choosing TIB just before leading edge before elastic-2') TIB_high_us = pre_lead_us - MinTIB_us / 1.5 * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us / 1.5 elif (pre_lead_us - frame_start_us > MinTIB_us / 2.0 * (slop_frac + 1.0)) and (t_focEle_us > pre_lead_us): logger.debug( 'choosing TIB just before leading edge before elastic-3') TIB_high_us = pre_lead_us - MinTIB_us / 2.0 * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us / 2.0 # elif (pre_tail_us > frame_start_us) and (t_focEle_us - pre_tail_us > MinTIB_us * (slop_frac + 1.0)): # logger.debug('choosing TIB just before focus element') # TIB_low_us = pre_tail_us + MinTIB_us * slop_frac / 2.0 # TIB_high_us = TIB_low_us + MinTIB_us elif post_lead_us > frame_end_us: logger.debug('choosing TIB at end of frame') TIB_high_us = frame_end_us - MinTIB_us * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us elif post_lead_us - t_det_us > MinTIB_us * (slop_frac + 1.0): logger.debug( 'choosing TIB between elastic peak and later prompt pulse leading edge' ) TIB_high_us = post_lead_us - MinTIB_us * slop_frac / 2.0 TIB_low_us = TIB_high_us - MinTIB_us else: logger.debug('I cannot find a good TIB range') TIB_low_us = 0.0 TIB_high_us = 0.0 #return the result self.setProperty("TibMin", TIB_low_us) self.setProperty("TibMax", TIB_high_us) return
def errorbar(self, *args, **kwargs): """ If the **mantid** projection is chosen, it can be used the same as :py:meth:`matplotlib.axes.Axes.errorbar` for arrays, or it can be used to plot :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`. You can have something like:: import matplotlib.pyplot as plt from mantid import plots ... fig, ax = plt.subplots(subplot_kw={'projection':'mantid'}) ax.errorbar(workspace,'rs',specNum=1) #for workspaces ax.errorbar(x,y,yerr,'bo') #for arrays fig.show() For keywords related to workspaces, see :func:`plotfunctions.errorbar` """ if helperfunctions.validate_args(*args): logger.debug('using plotfunctions') autoscale_on_update = kwargs.pop("autoscale_on_update", True) def _data_update(artists, workspace, new_kwargs=None): if self.lines: self.set_autoscaley_on(autoscale_on_update) # errorbar with workspaces can only return a single container container_orig = artists[0] # It is not possible to simply reset the error bars so # we have to plot new lines but ensure we don't reorder them on the plot! orig_idx = self.containers.index(container_orig) container_orig.remove() # The container does not remove itself from the containers list # but protect this just in case matplotlib starts doing this try: self.containers.remove(container_orig) except ValueError: pass # this gets pushed back onto the containers list if new_kwargs: container_new = plotfunctions.errorbar(self, workspace, **new_kwargs) else: container_new = plotfunctions.errorbar(self, workspace, **kwargs) self.containers.insert(orig_idx, container_new) self.containers.pop() # Update joining line if container_new[0] and container_orig[0]: container_new[0].update_from(container_orig[0]) # Update caps for orig_caps, new_caps in zip(container_orig[1], container_new[1]): new_caps.update_from(orig_caps) # Update bars for orig_bars, new_bars in zip(container_orig[2], container_new[2]): new_bars.update_from(orig_bars) # Re-plotting in the config dialog will assign this attr if hasattr(container_orig, 'errorevery'): setattr(container_new, 'errorevery', container_orig.errorevery) # ax.relim does not support collections... self._update_line_limits(container_new[0]) self.set_autoscaley_on(True) return container_new workspace = args[0] spec_num = self.get_spec_number(workspace, kwargs) is_normalized, kwargs = get_normalize_by_bin_width(workspace, self, **kwargs) if self.lines: self.set_autoscaley_on(autoscale_on_update) artist = self.track_workspace_artist( workspace, plotfunctions.errorbar(self, *args, **kwargs), _data_update, spec_num, is_normalized) self.set_autoscaley_on(True) return artist else: return Axes.errorbar(self, *args, **kwargs)
self._instrument_name = instrument_name instrument_producer = AbinsModules.InstrumentProducer() self._instrument = instrument_producer.produce_instrument(name=self._instrument_name) else: raise ValueError("Unknown instrument %s" % instrument_name) self._atoms = self.getProperty("Atoms").value self._sum_contributions = self.getProperty("SumContributions").value # conversion from str to int self._num_quantum_order_events = int(self.getProperty("QuantumOrderEventsNumber").value) self._scale_by_cross_section = self.getPropertyValue('ScaleByCrossSection') self._out_ws_name = self.getPropertyValue('OutputWorkspace') self._calc_partial = (len(self._atoms) > 0) # user defined interval is exclusive with respect to # AbinsModules.AbinsParameters.min_wavenumber # AbinsModules.AbinsParameters.max_wavenumber # with bin width AbinsModules.AbinsParameters.bin_width step = self._bin_width start = AbinsModules.AbinsParameters.min_wavenumber + step / 2.0 stop = AbinsModules.AbinsParameters.max_wavenumber + step / 2.0 self._bins = np.arange(start=start, stop=stop, step=step, dtype=AbinsModules.AbinsConstants.FLOAT_TYPE) try: AlgorithmFactory.subscribe(Abins) except ImportError: logger.debug('Failed to subscribe algorithm SimulatedDensityOfStates; The python package may be missing.')
def test_input_exceptions(self): # Run the test only if dsfinterp package is present try: import dsfinterp except: logger.debug( 'Python package dsfinterp is missing (https://pypi.python.org/pypi/dsfinterp)' ) return nf = 9 fvalues, workspaces = self.generateWorkspaces( nf) # workspaces sim1 to sim9 (nine workpaces) # Try passing different number of workspaces and parameter values try: fvalueswrong = range(nf - 1) # eight values mantid.simpleapi.DSFinterp(Workspaces=workspaces, ParameterValues=fvalueswrong, LocalRegression=False, TargetParameters=5.5, OutputWorkspaces='outws') except Exception as e: self.assertTrue( 'Number of Workspaces and ParameterValues should be the same' in str(e)) else: assert False, "Didn't raise any exception" # Try passing an incompatible workspace try: mantid.simpleapi.CreateWorkspace(OutputWorkspace='sim10', DataX='1,2,3', DataY='1,1,1', DataE='0,0,0') fvalues2 = fvalues + [ 10, ] workspaces2 = workspaces + [ 'sim10', ] mantid.simpleapi.DSFinterp(Workspaces=workspaces2, ParameterValues=fvalues2, LocalRegression=False, TargetParameters=5.5, OutputWorkspaces='outws') except Exception as e: self.assertTrue('Workspace sim10 incompatible with sim1' in str(e)) else: assert False, "Didn't raise any exception" mantid.api.AnalysisDataService.remove('sim10') #Try passing a target parameter outside range try: mantid.simpleapi.DSFinterp(Workspaces=workspaces, ParameterValues=fvalues, LocalRegression=False, TargetParameters=nf + 1, OutputWorkspaces='outws') except Exception as e: self.assertTrue('Target parameters should lie in' in str(e)) else: assert False, "Didn't raise any exception" # Try passing a different number of target parameters and output workspaces try: mantid.simpleapi.DSFinterp(Workspaces=workspaces, ParameterValues=fvalues, LocalRegression=False, TargetParameters=[1, 2], OutputWorkspaces='outws') except Exception as e: self.assertTrue( 'Number of OutputWorkspaces and TargetParameters should be the same' in str(e)) else: assert False, "Didn't raise any exception" self.cleanup(nf)