def set_sample_log(self, log_name, sub_runs, log_value_array): """Set sample log value for each sub run, i.e., average value in each sub run Parameters ---------- log_name : str sample log name sub_runs: ndarray sub runs with same shape as log_value_array log_value_array : ndarray log values Returns ------- None """ # Check inputs checkdatatypes.check_string_variable('Log name', log_name) checkdatatypes.check_numpy_arrays('Sub runs and log values', [sub_runs, log_value_array], 1, True) if len(self._sample_logs) > 0: self._sample_logs.matching_subruns(sub_runs) else: self._sample_logs.subruns = numpy.atleast_1d(sub_runs) # Set sub runs and log value to dictionary self._sample_logs[log_name] = numpy.atleast_1d(log_value_array)
def get_peak_fit_parameter_vec(self, param_name, det_id): """ get the fitted parameters and return in vector :param param_name: :param det_id: :return: """ checkdatatypes.check_string_variable('Peak fitting parameter name', param_name) checkdatatypes.check_int_variable('Detector ID', det_id, (0, None)) param_vec = np.ndarray(shape=(len(self._peak_fit_info_dict[det_id]), ), dtype='float') log_index_list = sorted(self._peak_fit_info_dict[det_id].keys()) for i, log_index in enumerate(log_index_list): try: param_vec[i] = self._peak_fit_info_dict[det_id][log_index][ param_name] except KeyError: raise RuntimeError( 'Parameter {0} is not a key. Candidates are {1} ... {2}' ''.format( param_name, self._peak_fit_info_dict[det_id].keys(), self._peak_fit_info_dict[det_id][log_index].keys())) # END-FOR return param_vec
def get_sample_log_values(self, sample_log_name, sub_runs=None): """Get ONE INDIVIDUAL sample log's values as a vector Exceptions ---------- RuntimeError : if sample log name not in sample_log_dict Parameters ---------- sample_log_name : str sample_log_name sub_runs : list or ndarray or None None for all log values, List/ndarray for selected sub runs Returns ------- ndarray sample log values ordered by sub run numbers with given sub runs or all sub runs """ if sample_log_name == HidraConstants.SUB_RUNS and \ sample_log_name not in self._sample_logs.keys(): return self.get_sub_runs() checkdatatypes.check_string_variable('Sample log name', sample_log_name, list(self._sample_logs.keys())) return self._sample_logs[sample_log_name, sub_runs]
def generate_mantid_workspace(hidra_workspace, workspace_name, mask_id=None): """ Generate a Mantid MatrixWorkspace from a HidraWorkspace :param hidra_workspace: :param workspace_name: string for output workspace name :param mask_id: Mask index for the reduced diffraction data in HidraWorkspace/HidraProjectFile :return: """ # Check inputs checkdatatypes.check_type('Hidra workspace', hidra_workspace, workspaces.HidraWorkspace) # workspace name: if workspace_name is None: workspace_name = hidra_workspace.name else: checkdatatypes.check_string_variable('Workspace name', workspace_name) # Get data from HiDRA Workspace two_theta_matrix, data_y_matrix, data_e_matrix = hidra_workspace.get_reduced_diffraction_data_set(mask_id) # Mantid (2019.11) does not accept NaN # Convert all NaN to zero. No good peak will have NaN or Zero data_y_matrix[np.where(np.isnan(data_y_matrix))] = 0. data_e_matrix[np.where(np.isnan(data_e_matrix))] = 0. # Create Mantid workspace matrix_ws = CreateWorkspace(DataX=two_theta_matrix, DataY=data_y_matrix, DataE=data_e_matrix, NSpec=data_y_matrix.shape[0], OutputWorkspace=workspace_name, EnableLogging=False) return matrix_ws
def parse_rigorous_int_string(int_str): """ parse a string which must be an integer but not anything can be converted to integer :param int_str: :return: """ checkdatatypes.check_string_variable('Integer in string', int_str) # negative? if int_str.startswith('-'): sign = -1 int_str = int_str.split('-')[1] else: sign = 1 # must be an integer if int_str.isdigit() is False: raise ValueError( '{} cannot be recognized as an integer rigorously'.format(int_str)) # convert try: int_number = sign * int(int_str) except ValueError as val_err: raise ValueError( 'Unable to convert string {} to an integer: {}'.format( int_str, val_err)) return int_number
def get_sample_log_value(self, sample_log_name, sub_run=None): """ Parameters ---------- sample_log_name sub_run Returns ------- float time-averaged sample log value for this sub run """ checkdatatypes.check_string_variable('Sample log name', sample_log_name, list(self._sample_logs.keys())) log_value = self._sample_logs[sample_log_name, sub_run] if isinstance(log_value, numpy.ndarray): assert log_value.shape == (1, ), 'Single log {} (= {}) is a numpy array with multiple items' \ '(shape = {})'.format(sample_log_name, log_value, log_value.shape) log_value = log_value[0] return log_value
def save_mantid_mask(mask_vec, h5_name, two_theta, note): """ Save a mask vector to :param mask_vec: :param h5_name: :param two_theta: :param note: :return: """ checkdatatypes.check_numpy_arrays('Mask vector', [mask_vec], dimension=1, check_same_shape=False) checkdatatypes.check_file_name(h5_name, False, True, False, 'PyRS masking file to export to') if two_theta is not None: checkdatatypes.check_float_variable('2-theta', two_theta, (-360., 360)) if note is not None: checkdatatypes.check_string_variable('Mask note', note, None) # create file mask_file = h5py.File(h5_name, 'w') # add data set mask_data_set = mask_file.create_dataset('mask', data=mask_vec) # add attributes if two_theta: mask_data_set.attrs['2theta'] = two_theta # '{}'.format(two_theta) if note: mask_data_set.attrs['note'] = note # close file mask_file.close() return
def get_sample_log_value(self, sample_log_name, sub_run=None): """ Parameters ---------- sample_log_name sub_run Returns ------- float time-averaged sample log value for this sub run """ checkdatatypes.check_string_variable('Sample log name', sample_log_name, list(self._sample_logs.keys())) log_value = self._sample_logs[sample_log_name, sub_run] if isinstance(log_value, numpy.ndarray): if log_value.shape == (1, ): # only one log value log_value = log_value[0] else: log_set = set(log_value) if len(log_set) == 1: # all the values are the same log_value = log_set.pop() else: msg = 'Single log {} (= {}) is a numpy array with multiple items' \ '(shape = {})'.format(sample_log_name, log_value, log_value.shape) raise AssertionError(msg) return log_value
def set_attributes(h5_group, attribute_name, attribute_value): """ Set attribute to a group """ checkdatatypes.check_string_variable('Attribute name', attribute_name) h5_group.attrs[attribute_name] = attribute_value
def read_diffraction_intensity_vector(self, mask_id, sub_run): """ Get the (reduced) diffraction data's intensity :param mask_id: :param sub_run: If sub run = None: ... :return: 1D array or 2D array depending on sub ru """ # Get default for mask/main if mask_id is None: mask_id = HidraConstants.REDUCED_MAIN checkdatatypes.check_string_variable( 'Mask ID', mask_id, list(self._project_h5[HidraConstants.REDUCED_DATA].keys())) # Get data to return if sub_run is None: # all the sub runs reduced_diff_hist = self._project_h5[ HidraConstants.REDUCED_DATA][mask_id].value else: # specific one sub run sub_run_list = self.read_sub_runs() sub_run_index = sub_run_list.index(sub_run) if mask_id is None: mask_id = HidraConstants.REDUCED_MAIN reduced_diff_hist = self._project_h5[ HidraConstants.REDUCED_DATA][mask_id].value[sub_run_index] # END-IF-ELSE return reduced_diff_hist
def init_session(self, session_name, hidra_ws=None): """ Initialize a new session of reduction and thus to store data according to session name :return: """ # Check inputs checkdatatypes.check_string_variable('Reduction session name', session_name) if session_name == '': raise RuntimeError('Session name {} is empty'.format(session_name)) elif session_name in self._session_dict: print( '[WARNING] Session {} is previously taken. The HidraWorkspace associated ' 'will be replaced if new HidraWorkspace is not None ({})' ''.format(session_name, hidra_ws is None)) if hidra_ws is None: # session is initialized without HidraWorkspace self._curr_workspace = workspaces.HidraWorkspace() else: # session starts with a HidraWorkspace checkdatatypes.check_type('HidraWorkspace', hidra_ws, workspaces.HidraWorkspace) self._curr_workspace = hidra_ws self._session_dict[session_name] = self._curr_workspace
def do_chop(self): """ Save a certain number of time segment from table tableWidget_segments :return: """ if self._currRunNumber is None: gui_helper.pop_message(self, 'Run number has not been set', message_type='error') return # get the run and raw file raw_file_name = None try: run_number = self._currRunNumber status, info_tup = self.get_controller().get_run_info(run_number) if status: raw_file_name = info_tup[0] except ValueError as val_error: raise RuntimeError( 'Unable to find out run number due to {0}'.format(val_error)) # get the output directory and mode # self._quickChopDialog = QuickChopDialog.QuickChopDialog(self, self._currRunNumber, raw_file_name) # result = self._quickChopDialog.exec_() # # # quit if user cancels the operation # if result == 0: # # cancel operation # return # else: # # get information from the dialog box # output_to_archive = self._quickChopDialog.output_to_archive # if output_to_archive: # output_dir = None # else: # output_dir = self._quickChopDialog.output_directory # # to_save_nexus = self._quickChopDialog.save_to_nexus # to_reduce_gsas = self._quickChopDialog.reduce_data # # END-IF-ELSE # get chop manager checkdatatypes.check_string_variable('Event slicing key', self._currSlicerKey) assert isinstance(self._currSlicerKey, str), 'Slicer key %s must be a string but not %s.' \ '' % (str(self._currSlicerKey), type(self._currSlicerKey)) # TODO - slice data has not been implemented yet status, message = self.get_controller().slice_data( raw_file_name, self._currSlicerKey) if status: gui_helper.pop_message(self, message, message_type='information') else: gui_helper.pop_message(self, message, message_type='error') return
def workspace_exists(ws_name): """ check whether a workspace exists or not :param ws_name: :return: """ checkdatatypes.check_string_variable('Workspace name', ws_name) return mtd.doesExist(ws_name)
def get_mask_vector(self, mask_id): """ Get the detector mask :param mask_id: String as ID :return: a 1D array (0: mask, 1: keep) """ checkdatatypes.check_string_variable( 'Mask ID', mask_id, list(self._loaded_mask_dict.keys())) return self._loaded_mask_dict[mask_id][0]
def get_sub_runs(self, session_name): """ Get sub runs from a workspace belonged to a session :param session_name: :return: """ checkdatatypes.check_string_variable('Session name', session_name, list(self._session_dict.keys())) workspace = self._session_dict[session_name] return workspace.get_sub_runs()
def get_sub_run_detector_counts(self, session_name, sub_run): """ Get the detector counts :param session_name: :param sub_run: :return: """ checkdatatypes.check_string_variable('Session name', session_name, list(self._session_dict.keys())) workspace = self._session_dict[session_name] return workspace.get_detector_counts(sub_run)
def get_sub_run_2theta(self, session_name, sub_run): """ Get the detector arm's 2theta position of a sub run :param session_name: name of the session for locating workspace :param sub_run: :return: """ checkdatatypes.check_string_variable('Session name', session_name, list(self._session_dict.keys())) workspace = self._session_dict[session_name] return workspace.get_detector_2theta(sub_run)
def has_sample_log(self, sample_log_name): """ check whether a certain sample log exists in the workspace (very likely loaded from file) :param sample_log_name: sample log name :return: """ # Check inputs checkdatatypes.check_string_variable('Sample log name', sample_log_name) has_log = sample_log_name in self._sample_logs return has_log
def get_hidra_workspace(self, session_name): """ Get the HIDRA workspace :param session_name: string as the session/workspace name :return: HidraWorkspace instance """ checkdatatypes.check_string_variable('Session name', session_name, list(self._session_dict.keys())) # Check availability if session_name not in self._session_dict: raise RuntimeError( 'Session/HidraWorkspace {} does not exist. Available sessions/workspaces are {}' ''.format(session_name, self._session_dict.keys())) workspace = self._session_dict[session_name] return workspace
def parse_integers(int_list_string): """ parse a list of integers. Note that the start is inclusive and the end is exclusive example 1:4, 6:12, 8:12 :param int_list_string: :return: list of int or range tuples """ checkdatatypes.check_string_variable('Integer list (string)', int_list_string) # remove unnecessary spaces int_list_string = int_list_string.replace(' ', '') # split by , int_range_list = int_list_string.split(',') # parse to integers int_list = list() try: for int_range in int_range_list: column_counts = int_range.count(':') if column_counts == 0: # single value int_list.append(parse_rigorous_int_string(int_range)) elif column_counts == 1: # given a range int_str_list = int_range.split(':') start_int = parse_rigorous_int_string(int_str_list[0]) end_int = parse_rigorous_int_string(int_str_list[1]) int_list.extend(range(start_int, end_int)) else: # bad inputs raise ValueError( '{0} has too many : to recognize'.format(int_range)) except ValueError as val_err: raise RuntimeError( 'Unable to parse integer list "{}" due to {}'.format( int_list_string, val_err)) # remove additional integers int_list = list(set(int_list)) int_list.sort() return int_list
def get_reduced_diffraction_data(self, session_name, sub_run=None, mask_id=None): """ Get the reduce data :param session_name: :param sub_run: :param mask_id: :return: 2-vectors: 2theta and intensity """ checkdatatypes.check_string_variable('Session name', session_name, list(self._session_dict.keys())) workspace = self._session_dict[session_name] data_set = workspace.get_reduced_diffraction_data(sub_run, mask_id) return data_set
def export_mask(self, mask_id, out_file, note): """ export mask to HDF5 (PyRS format) :param mask_id: :param out_file: :param note: :return: """ checkdatatypes.check_file_name(out_file, False, True, False, 'Output hdf5 file name') checkdatatypes.check_string_variable('Mask note', note) mask_vec = self._mask_array_dict[mask_id] mask_util.save_mantid_mask(mask_vec, out_file, self._2theta, note) return
def get_reduced_diffraction_data_set(self, mask_id=None): """Get reduced diffraction data set including 2theta and intensities Get the full data set (matrix) of reduced diffraction pattern in 2theta unit Parameters ---------- mask_id : str or None None (as default main) or ID as a String Returns ------- ndarray, ndarray 2theta in 2D array intensities in 2D array """ # Check if mask_id is None: # mask_id is 'main' pass else: checkdatatypes.check_string_variable('Mask ID', mask_id) # Vector 2theta matrix_2theta = self._2theta_matrix.copy() try: intensity_matrix = self._diff_data_set[mask_id].copy() except KeyError: raise RuntimeError( 'Mask ID {} does not exist in reduced diffraction pattern. ' 'The available masks are {}' ''.format(mask_id, self._diff_data_set.keys())) try: variance_matrix = self._var_data_set[mask_id].copy() except KeyError: raise RuntimeError( 'Mask ID {} does not exist in reduced diffraction pattern. ' 'The available masks are {}' ''.format(mask_id, self._var_data_set.keys())) return matrix_2theta, intensity_matrix, variance_matrix
def get_reduced_diffraction_data(self, sub_run: int, mask_id: Optional[str] = None ) -> Tuple[numpy.ndarray, numpy.ndarray]: """Get data set of a single diffraction pattern Parameters ---------- sub_run: int sub run number (integer) mask_id : str or None None (as default main) or ID as a String Returns ------- numpy.ndarray, numpy.ndarray vector 2theta, vector intensity """ # Check inputs # sub run number might start from 0 sub_run = to_int('Sub run number', sub_run, min_value=0) if mask_id is None: # mask_id = 'main' pass else: checkdatatypes.check_string_variable('Mask ID', mask_id) spec_index = self._sample_logs.get_subrun_indices(sub_run)[0] # Vector 2theta vec_2theta = self._2theta_matrix[spec_index][:] # Vector intensity try: vec_intensity = self._diff_data_set[mask_id][spec_index].copy() except KeyError: raise RuntimeError( 'Mask ID {} does not exist in reduced diffraction pattern. ' 'The available masks are {}' ''.format(mask_id, self._diff_data_set.keys())) return vec_2theta, vec_intensity
def set_2theta(self, two_theta, unit='degree'): """ Set 2 theta value :param two_theta: :param unit: degree or radius :return: """ checkdatatypes.check_string_variable('2theta unit', unit, ['degree', 'radius']) if unit == 'degree': two_theta_range = (-180., 180) else: two_theta_range = (-math.pi, math.pi) checkdatatypes.check_float_variable('2theta', two_theta, two_theta_range) self._two_theta = two_theta, unit return
def parse_integer(int_str): """ parse integer from a string or a LineEdit :param int_str: :return: """ if isinstance(int_str, QLineEdit): # QLineEdit: get the string out of it int_str = str(int_str.text()) else: # Then it has to be a string checkdatatypes.check_string_variable('Integer string', int_str) try: int_value = int(int_str) except ValueError as value_error: raise RuntimeError('Unable to parse {0} to integer due to {1}'.format( int_str, value_error)) return int_value
def parse_float(float_str): """ parse flaots from a string or a LineEdit :param float_str: :return: """ if isinstance(float_str, QLineEdit): # Input is QLineEdit float_str = str(float_str.text()) else: # Input has to be string checkdatatypes.check_string_variable('Integer string', float_str) try: float_value = float(float_str) except ValueError as value_error: raise RuntimeError('Unable to parse {0} to integer due to {1}'.format( float_str, value_error)) return float_value
def export_pole_figure(self, detector_id_list, file_name, file_type, file_header=''): """ exported the calculated pole figure :param detector_id_list: list of detector IDs to write the pole figure file :param file_name: :param file_type: ASCII or MTEX (.jul) :param file_header: for MTEX format :return: """ # TESTME - 20180711 - Clean this method and allow user to specifiy header # process detector ID list if detector_id_list is None: detector_id_list = self.get_detector_ids() else: checkdatatypes.check_list('Detector IDs', detector_id_list) # check inputs checkdatatypes.check_file_name(file_name, check_exist=False, check_writable=True) checkdatatypes.check_string_variable( 'Output pole figure file type/format', file_type) # it is a dictionary now if file_type.lower() == 'ascii': # export pole figure arrays as ascii column file export_arrays_to_ascii(self._pole_figure_dict, detector_id_list, file_name) elif file_type.lower() == 'mtex': # export to MTEX format export_to_mtex(self._pole_figure_dict, detector_id_list, file_name, header=file_header) return
def pop_message(parent, message, detailed_message=None, message_type='error'): """ pop up a message with specified message type such as error, warning, info... :param parent: :param message: :param detailed_message: detailed message optionally shown to user :param message_type: str as ['error', 'warning', 'info'] but NOT case sensitive :return: """ message_type = message_type.lower() if message_type not in ['error', 'warning', 'info']: raise TypeError( 'Message type {0} is not supported.'.format(message_type)) # check types checkdatatypes.check_string_variable('(Main) message to show', message) if detailed_message is not None: checkdatatypes.check_string_variable('(Detailed) message to show', detailed_message) # create a QMessageBox msg_box = QMessageBox() # set information type if message_type == 'info': msg_box.setIcon(QMessageBox.Information) elif message_type == 'error': msg_box.setIcon(QMessageBox.Critical) elif message_type == 'warning': msg_box.setIcon(QMessageBox.Warning) # set text msg_box.setText(message) if detailed_message is not None: msg_box.setDetailedText(detailed_message) # another button msg_box.setWindowTitle('PyRS Message') # box msg_box.setStandardButtons(QMessageBox.Ok) ret_val = msg_box.exec_() print('Message box return value: {}'.format(ret_val))
def browse_dir(parent, caption, default_dir): """ Browse a directory :param parent: :param caption: :param default_dir: :return: non-empty string for selected directory; empty string for canceled operation """ # check inputs assert isinstance( parent, object), 'Parent {} must be of some object.'.format(parent) checkdatatypes.check_string_variable('File browsing title/caption', caption) checkdatatypes.check_file_name(default_dir, check_exist=False, is_dir=True) # get directory chosen_dir = QFileDialog.getExistingDirectory(parent, caption, default_dir) print('[DB...BAT] Chosen dir: {} of type {}'.format( chosen_dir, type(chosen_dir))) chosen_dir = str(chosen_dir).strip() return chosen_dir