def print_helper(arg_info_dict): """ Print helping information :param arg_info_dict: dictionary (key = parameter name, value = (-a, --abcd, doc, mandatory, default) :return: None """ checkdatatypes.check_dict('Script argument information', arg_info_dict) help_str = '' for arg_name_i in sorted(arg_info_dict.keys()): short_opt, long_opt, doc, mandatory, default = arg_info_dict[ arg_name_i] help_str += '{}, {}: {}. '.format(short_opt, long_opt, doc) if mandatory: help_str += 'This is mandatory\n' else: help_str += 'This is optional with default value {}\n'.format( default) # END-FOR # print on screen print(help_str) return help_str
def get_strain_stress_grid_setup(parent, user_define_grid, grid_stat_dict, grid_setup_dict): """ Get how the sample grids is set up from a Dialog to which user specifies :return: dictionary of the set up criteria ([Min/Max/'Resolution'][X/Y/Z] = value) """ if grid_setup_dict is not None: checkdatatypes.check_dict('Grid setup parameters', grid_setup_dict) # set up dialog while True: ss_dialog = StrainStressGridSetup(parent) ss_dialog.set_experimental_data_statistics(grid_stat_dict) ss_dialog.set_user_grids_flag(user_define_grid) ss_dialog.set_previous_inputs(grid_setup_dict) # launch dialog and wait for result result = ss_dialog.exec_() # process result if not result: grid_setup_dict = None break else: # loop will be terminated if user cancels or user gives a good result grid_setup_dict = ss_dialog.get_grid_setup() if ss_dialog.is_input_acceptable: break return grid_setup_dict
def set_previous_inputs(self, grid_setup_dict): """ set the previous user inputs of grid setup :param grid_setup_dict: :return: """ # doing nothing if grid_setup_dict is None: return checkdatatypes.check_dict('Grid setup', grid_setup_dict) for param_name in ['Min', 'Max', 'Resolution']: for coord_i in ['X', 'Y', 'Z']: # do not set up if there is no user input if grid_setup_dict[param_name][coord_i] is None: continue # set value line_edit_name = 'lineEdit_grid{}{}'.format( param_name, coord_i) line_edit = getattr(self.ui, line_edit_name) line_edit.setText('{}'.format( grid_setup_dict[param_name][coord_i])) # END-FOR # END-FOR return
def set_experimental_data_statistics(self, stat_dict): """ set the statistics data for the experiments [requirement] statistics dictionary level 1: type: min, max, num_indv_values level 2: direction: e11, e22(, e33) level 3: coordinate_dir: x, y, z :param stat_dict: :return: """ # print ('[DB...BAT] stat dict: {}'.format(stat_dict)) checkdatatypes.check_dict('Grids statistics', stat_dict) # set up self._exp_grid_dimensions = stat_dict for item in stat_dict: self._plot_grid_dimensions[item] = { 'X': None, 'Y': None, 'Z': None } # END-FOR # set up the minimum values, maximum values and number of individual values for dir_i in stat_dict['min'].keys(): for coord_i in ['X', 'Y', 'Z']: line_edit_name = 'lineEdit_{}{}Min'.format(dir_i, coord_i) line_edit = getattr(self.ui, line_edit_name) print(stat_dict['min'][dir_i]) line_edit.setText('{}'.format( stat_dict['min'][dir_i][coord_i])) # END-FOR # END-FOR # set up the maximum values for dir_i in stat_dict['max'].keys(): for coord_i in ['X', 'Y', 'Z']: line_edit_name = 'lineEdit_{}{}Max'.format(dir_i, coord_i) line_edit = getattr(self.ui, line_edit_name) line_edit.setText('{}'.format( stat_dict['max'][dir_i][coord_i])) # END-FOR # END-FOR # set up the number of individual values for dir_i in stat_dict['num_indv_values'].keys(): for coord_i in ['X', 'Y', 'Z']: line_edit_name = 'lineEdit_{}NumIndvPoints{}'.format( dir_i, coord_i) line_edit = getattr(self.ui, line_edit_name) line_edit.setText('{}'.format( stat_dict['num_indv_values'][dir_i][coord_i])) # END-FOR # END-FOR # set up the default values self.do_set_default_values() return
def _set_default_user_input(self, grid_setup_dict): """ set the default values :param grid_setup_dict: :return: """ checkdatatypes.check_dict('Grid (for plot) dimension default values', grid_setup_dict) convert_dict = { 'Min': 'min', 'Max': 'max', 'Resolution': 'num_indv_values' } for param_name in ['Min', 'Max', 'Resolution']: item_name = convert_dict[param_name] for coord_i in ['X', 'Y', 'Z']: line_edit_name = 'lineEdit_grid{}{}'.format( param_name, coord_i) line_edit = getattr(self.ui, line_edit_name) line_edit.setText('{}'.format( grid_setup_dict[item_name][coord_i])) # END-FOR (coord_i) # END-FOR (param_name) return
def write_information(self, info_dict): """ set project information to attributes """ # check and validate checkdatatypes.check_dict('Project file general information', info_dict) self._validate_write_operation() for info_name in info_dict: self._project_h5.attrs[info_name] = info_dict[info_name]
def export_to_mtex(pole_figure_array_dict, detector_id_list, file_name, header): """ export to mtex format, which includes line 1: NRSF2 line 2: alpha beta intensity line 3: (optional header) line 4 and on: alpha\tbeta\tintensity :param file_name: :param detector_id_list: selected the detector IDs for pole figure :param pole_figure_array_dict: :param header :return: """ # check input types checkdatatypes.check_dict('Pole figure array dictionary', pole_figure_array_dict) checkdatatypes.check_list('Detector ID list', detector_id_list) # initialize output string: MTEX HEAD mtex = 'NRSF2\n' mtex += 'alpha beta intensity\n' # user optional header mtex += '{0}\n'.format(header) # writing data pf_keys = sorted(pole_figure_array_dict.keys()) for pf_key in pf_keys: print( '[STUDY ]Pole figure key = {}. It is in detector ID list {} = {}' ''.format(pf_key, detector_id_list, pf_key in detector_id_list)) if pf_key not in detector_id_list: raise NotImplementedError( 'The data structure of pole figure array is not clear. ' 'Find out how detector IDs are involved.') sample_log_index, pole_figure_array = pole_figure_array_dict[pf_key] for i_pt in range(pole_figure_array.shape[0]): mtex += '{0:5.5f}\t{1:5.5f}\t{2:5.5f}\n' \ ''.format(pole_figure_array[i_pt, 0], pole_figure_array[i_pt, 1], pole_figure_array[i_pt, 2]) # END-FOR (i_pt) # END-FOR # write file p_file = open(file_name, 'w') p_file.write(mtex) p_file.close()
def set_user_grid_parameter_values(self, user_grid_value_dict): """ set the parameter values on user defined grid Note: each grid's value is given by a dict with keys (2) value (3) dir (4) scan-index :param user_grid_value_dict: key = position, value is described as note :return: """ checkdatatypes.check_dict('Parameter values on user defined grid', user_grid_value_dict) grid_positions = user_grid_value_dict.keys() grid_positions.sort() for i_grid, grid_pos in enumerate(grid_positions): grid_i = user_grid_value_dict[grid_pos] self.add_matched_grid(grid_pos[0], grid_pos[1], grid_pos[2], grid_i['e11'], grid_i['e22'], grid_i['e33'])
def update_calibration_info_file(cal_info_file, cal_info_table, append): """ Search archive in order to keep calibration up-to-date if in append mode, the additional information will be written to an existing calibration information hdf5 file otherwise, From scratch, a calibration information file will be created :param cal_info_file: :param cal_info_table: calibration information to append to calibration information file :param append: flag whether the mode is append or new :return: """ # check inputs if append: checkdatatypes.check_file_name( cal_info_file, True, True, False, 'Calibration information file to create') else: checkdatatypes.check_file_name( cal_info_file, False, True, False, 'Calibration information file to append') checkdatatypes.check_dict('Calibration information table', cal_info_table) checkdatatypes.check_bool_variable('Append mode', append) # open file if append: cal_info_file = h5py.File(cal_info_file, mdoe='rw') else: cal_info_file = h5py.File(cal_info_file, mode='w') # write to file for wavelength_entry in cal_info_table: if wavelength_entry not in cal_info_file: # TODO fix this # cal_info_file[wavelength_entry] = whatever raise RuntimeError( 'encountered unknown wavelength_entry: {}'.format( wavelength_entry)) for cal_date in cal_info_table[wavelength_entry]: cal_file_name = cal_info_table[wavelength_entry][cal_date] cal_info_file[wavelength_entry].append((cal_date, cal_file_name)) # END-FOR # END-FOR # close cal_info_file.close() return
def set_raw_grid_parameter_values(self, raw_grid_value_dict): """ set the parameter values on raw defined grid Note: each grid's value is given by a dict with keys (2) value (3) dir (4) scan-index :param raw_grid_value_dict: key = position, value is described as above note :return: """ checkdatatypes.check_dict('Parameter values on raw experimental grid', raw_grid_value_dict) grid_positions = raw_grid_value_dict.keys() grid_positions.sort() for i_grid, grid_pos in enumerate(grid_positions): grid_i = raw_grid_value_dict[grid_pos] self.append_row([ grid_i['scan-index'], grid_pos[0], grid_pos[1], grid_pos[2], grid_i['value'], grid_i['dir'] ])
def set_from_dict(self, geometry_shift_dict): """ Set geometry shift parameters from a dictionary, which may miss some parameters :param geometry_shift_dict: :return: """ checkdatatypes.check_dict('Geometry shift parameters', geometry_shift_dict) if 'shift x' in geometry_shift_dict: self._center_shift_x = geometry_shift_dict['shift x'] if 'shift y' in geometry_shift_dict: self._center_shift_y = geometry_shift_dict['shift y'] if 'shift z' in geometry_shift_dict: self._center_shift_z = geometry_shift_dict['shift z'] if 'rotation x' in geometry_shift_dict: self._rotation_x = geometry_shift_dict['rotation x'] if 'rotation y' in geometry_shift_dict: self._rotation_y = geometry_shift_dict['rotation y'] if 'rotation z' in geometry_shift_dict: self._rotation_z = geometry_shift_dict['rotation z']
def export_arrays_to_ascii(pole_figure_array_dict, detector_id_list, file_name): """ export a dictionary of arrays to an ASCII file :param file_name: :param detector_id_list: selected the detector IDs for pole figure :param pole_figure_array_dict: :return: """ # check input types checkdatatypes.check_dict('Pole figure array dictionary', pole_figure_array_dict) checkdatatypes.check_list('Detector ID list', detector_id_list) print( '[INFO] Export Pole Figure Arrays To ASCII:\nKeys: {0}\nValues[0]: {1}' ''.format(pole_figure_array_dict.keys(), pole_figure_array_dict.values()[0])) # combine pole_figure_array_list = list() for pf_key in pole_figure_array_dict.keys(): index_vec, pole_figure_vec = pole_figure_array_dict[pf_key] if pf_key not in detector_id_list: raise NotImplementedError( 'The data structure of pole figure array is not clear. ' 'Find out how detector IDs are involved.') pole_figure_array_list.append(pole_figure_vec) # END-FOR combined_array = np.concatenate(pole_figure_array_list, axis=0) # sort combined_array = np.sort(combined_array, axis=0) # save np.savetxt(file_name, combined_array) # x,y,z equal sized 1D arrays return
def set_statistics(self, stat_dict, row_item_list): """ :param stat_dict: :param row_item_list: list of item names for each row in order to maitain the order :return: """ checkdatatypes.check_dict('Statistic dictionary', stat_dict) checkdatatypes.check_list('Row item names', row_item_list) # add rows to fill the table num_diff = len(row_item_list) - self.rowCount() if num_diff > 0: for i_row in range(num_diff): self.append_row(['', 0., 0., 0.]) # fill the table for i_row, item_name in enumerate(row_item_list): self.update_cell_value(i_row, self._indexItemName, item_name) for dir_i in stat_dict.keys(): self.update_cell_value(i_row, self._indexDirDict[dir_i], stat_dict[dir_i][item_name])
def add_input_data_set(self, det_id, peak_intensity_dict, peak_fit_info_dict, log_dict): """ set peak intensity log and experiment logs that are required by pole figure calculation :param det_id :param peak_intensity_dict : dictionary (key = scan log index (int), value = peak intensity (float) :param peak_fit_info_dict: dictionary (key = scan log index (int), value = peak fitting information (float) :param log_dict: dictionary (key = scan log index (int), value = dictionary (log name, log value)) :return: """ # check inputs if det_id in self._peak_intensity_dict: raise RuntimeError( 'Detector ID {0} already been added. Must be reset calculator.' ''.format(det_id)) checkdatatypes.check_int_variable('Detector ID', det_id, (0, None)) checkdatatypes.check_dict('Peak intensities', peak_intensity_dict) checkdatatypes.check_dict('Peak fitting information', peak_fit_info_dict) checkdatatypes.check_dict('Log values for pole figure', log_dict) # check sample log index if set(peak_intensity_dict.keys()) != set(log_dict.keys()): raise RuntimeError( 'Sample log indexes from peak intensities and sample logs' ' do not match.') # add peak intensity self._peak_intensity_dict[det_id] = peak_intensity_dict # go through all the values for scan_log_index in log_dict: # check each log index (entry) whether there are enough 2theta log_names = log_dict[scan_log_index].keys() checkdatatypes.check_list('Pole figure motor names', log_names, ['2theta', 'chi', 'phi', 'omega']) # END-FOR # set self._peak_info_dict[det_id] = log_dict self._peak_fit_info_dict[det_id] = peak_fit_info_dict
def write_reduced_diffraction_data_set(self, two_theta_array, diff_data_set, var_data_set): """Set the reduced diffraction data (set) Parameters ---------- two_theta_array : numppy.ndarray 2D array for 2-theta vector, which could be various to each other among sub runs diff_data_set : dict dictionary of 2D arrays for reduced diffraction patterns' intensities var_data_set : dict dictionary of 2D arrays for reduced diffraction patterns' variances """ # Check input checkdatatypes.check_numpy_arrays('Two theta vector', [two_theta_array], 2, False) checkdatatypes.check_dict('Diffraction data set', diff_data_set) # Retrieve diffraction group diff_group = self._project_h5[HidraConstants.REDUCED_DATA] # Add 2theta vector if HidraConstants.TWO_THETA in diff_group.keys(): # over write data try: diff_group[HidraConstants.TWO_THETA][...] = two_theta_array except TypeError: # usually two theta vector size changed del diff_group[HidraConstants.TWO_THETA] diff_group.create_dataset(HidraConstants.TWO_THETA, data=two_theta_array) else: # new data diff_group.create_dataset(HidraConstants.TWO_THETA, data=two_theta_array) # Add Diffraction data for mask_id in diff_data_set: # Get data diff_data_matrix_i = diff_data_set[mask_id] self._log.information('Mask {} data set shape: {}'.format( mask_id, diff_data_matrix_i.shape)) # Check checkdatatypes.check_numpy_arrays('Diffraction data (matrix)', [diff_data_matrix_i], None, False) if two_theta_array.shape != diff_data_matrix_i.shape: raise RuntimeError( 'Length of 2theta vector ({}) is different from intensities ({})' ''.format(two_theta_array.shape, diff_data_matrix_i.shape)) # Set name for default mask if mask_id is None: data_name = HidraConstants.REDUCED_MAIN else: data_name = mask_id # Write if data_name in diff_group.keys(): # overwrite diff_h5_data = diff_group[data_name] try: diff_h5_data[...] = diff_data_matrix_i except TypeError: # usually two theta vector size changed del diff_group[data_name] diff_group.create_dataset(data_name, data=diff_data_matrix_i) else: # new diff_group.create_dataset(data_name, data=diff_data_matrix_i) # Add Variances data if var_data_set is None: var_data_set = diff_data_set for mask_id in var_data_set: var_data_set[mask_id] = numpy.sqrt(var_data_set[mask_id]) for mask_id in var_data_set: # Get data var_data_matrix_i = var_data_set[mask_id] self._log.information('Mask {} data set shape: {}'.format( mask_id, var_data_matrix_i.shape)) # Check checkdatatypes.check_numpy_arrays('Diffraction data (matrix)', [var_data_matrix_i], None, False) if two_theta_array.shape != var_data_matrix_i.shape: raise RuntimeError( 'Length of 2theta vector ({}) is different from intensities ({})' ''.format(two_theta_array.shape, var_data_matrix_i.shape)) # Set name for default mask if mask_id is None: data_name = HidraConstants.REDUCED_MAIN + '_var' else: data_name = mask_id + '_var' # Write if data_name in diff_group.keys(): # overwrite diff_h5_data = diff_group[data_name] try: diff_h5_data[...] = var_data_matrix_i except TypeError: # usually two theta vector size changed del diff_group[data_name] diff_group.create_dataset(data_name, data=var_data_matrix_i) else: # new diff_group.create_dataset(data_name, data=var_data_matrix_i)