def _defaultData(self): x_min = self._defaultSimulationParameters()['x_min'] x_max = self._defaultSimulationParameters()['x_max'] x_step = self._defaultSimulationParameters()['x_step'] num_points = int((x_max - x_min) / x_step + 1) x_data = np.linspace(x_min, x_max, num_points) data = DataStore() data.append( DataSet1D(name='PND', x=x_data, y=np.zeros_like(x_data), x_label='2theta (deg)', y_label='Intensity', data_type='experiment')) data.append( DataSet1D(name='{:s} engine'.format(self._interface_name), x=x_data, y=np.zeros_like(x_data), x_label='2theta (deg)', y_label='Intensity', data_type='simulation')) data.append( DataSet1D(name='Difference', x=x_data, y=np.zeros_like(x_data), x_label='2theta (deg)', y_label='Difference', data_type='simulation')) return data
def __init__(self, name: str = 'Series', x: Union[np.ndarray, list] = None, y: Union[np.ndarray, list] = None, e: Union[np.ndarray, list] = None, data_type: str = 'simulation', x_label: str = 'x', y_label: str = 'y'): if not isinstance(data_type, str): raise AttributeError self._datatype = None self.data_type = data_type if x is None: x = np.array([]) if y is None: y = np.array([]) if e is None: e = np.zeros_like(x) self.name = name if not isinstance(x, np.ndarray): x = np.array(x) if not isinstance(y, np.ndarray): y = np.array(y) self.x = x self.y = y self.e = e self.x_label = x_label self.y_label = y_label self._color = None
def calculate(self, x_array: np.ndarray) -> np.ndarray: """ For a given x calculate the corresponding y :param x_array: array of data points to be calculated :type x_array: np.ndarray :return: points calculated at `x` :rtype: np.ndarray """ res = np.zeros_like(x_array) self.additional_data["ivar"] = res if self.type == "powder1DCW": return self.powder_1d_calculate(x_array) if self.type == "powder1DTOF": return self.powder_1d_tof_calculate(x_array) return res
def calculate(self, x_array: np.ndarray) -> np.ndarray: """ Generate a background from the stored background points. :param x_array: Points for which the background should be calculated. :type x_array: np.ndarray :return: Background points at the supplied x-positions. :rtype: np.ndarray """ # shape_x = x_array.shape # reduced_x = x_array.flat # y = np.zeros_like(reduced_x) # low_x = x_array.flat[0] - 1e-10 x_points = self.x_sorted_points if not len(x_points): return np.zeros_like(x_array) # low_y = 0 y_points = self.y_sorted_points return np.interp(x_array, x_points, y_points)
def calculate(self, x_array: np.ndarray) -> np.ndarray: """ Generate a background from the stored background factors. :param x_array: Points for which the background should be calculated. :type x_array: np.ndarray :return: Background points at the supplied x-positions. :rtype: np.ndarray """ shape_x = x_array.shape reduced_x = x_array.flat y = np.zeros_like(reduced_x) powers = self.sorted_powers amps = self.sorted_amplitudes for power, amp in zip(powers, amps): y += amp * x_array**power return y.reshape(shape_x)
def do_calc_setup(self, scale, this_x_array): if len(self.pattern.backgrounds) == 0: bg = np.zeros_like(this_x_array) else: bg = self.pattern.backgrounds[0].calculate(this_x_array) num_crys = len(self.current_crystal.keys()) if num_crys == 0: return bg crystals = [self.storage[key] for key in self.current_crystal.keys()] phase_scales = [ self.storage[str(key) + "_scale"] for key in self.current_crystal.keys() ] phase_lists = [] profiles = [] peak_dat = [] for crystal in crystals: phasesL = cryspy.PhaseL() idx = [ idx for idx, item in enumerate(self.phases.items) if item.label == crystal.data_name ][0] phasesL.items.append(self.phases.items[idx]) phase_lists.append(phasesL) profile, peak = _do_run(self.model, self.polarized, this_x_array, crystal, phasesL) profiles.append(profile) peak_dat.append(peak) # pool = mp.ProcessPool(num_crys) # print("\n\nPOOL = " + str(pool)) # result = pool.amap(functools.partial(_do_run, self.model, self.polarized, this_x_array), crystals, # phase_lists) # while not result.ready(): # time.sleep(0.01) # obtained = result.get() # profiles, peak_dat = zip(*obtained) # else: # raise ArithmeticError # Do this for now x_str = "ttheta" if self.type == "powder1DTOF": x_str = "time" if self.polarized: # TODO *REPLACE PLACEHOLDER FN* dependents, additional_data = self.polarized_update( lambda up, down: up + down, crystals, profiles, peak_dat, phase_scales, x_str, ) else: dependents, additional_data = self.nonPolarized_update( crystals, profiles, peak_dat, phase_scales, x_str) self.additional_data["phases"].update(additional_data) self.additional_data["global_scale"] = scale self.additional_data["background"] = bg self.additional_data["ivar_run"] = this_x_array self.additional_data["phase_names"] = list(additional_data.keys()) self.additional_data["type"] = self.type # just the sum of all phases dependent_output = scale * np.sum(dependents, axis=0) + bg scaled_dependents = [scale * dep for dep in dependents] self.additional_data["components"] = scaled_dependents self.additional_data["components"] = scaled_dependents if borg.debug: print(f"y_calc: {dependent_output}") return (np.sum( [s["profile"] for s in self.additional_data["phases"].values()], axis=0) + self.additional_data["background"])
def calculate(self, x_array: np.ndarray) -> np.ndarray: """ For a given x calculate the corresponding y :param x_array: array of data points to be calculated :type x_array: np.ndarray :return: points calculated at `x` :rtype: np.ndarray """ if self.filename is None: raise AttributeError if self.pattern is None: scale = 1.0 offset = 0 else: scale = self.pattern.scale.raw_value offset = self.pattern.zero_shift.raw_value this_x_array = x_array + offset # Experiment/Instrument/Simulation parameters x_min = this_x_array[0] x_max = this_x_array[-1] num_points = np.prod(x_array.shape) x_step = (x_max - x_min) / (num_points - 1) if len(self.pattern.backgrounds) == 0: bg = np.zeros_like(this_x_array) else: bg = self.pattern.backgrounds[0].calculate(this_x_array) dependents = [] # Sample parameters # We assume that the phases items has the same indexing as the knownphases item cifs = self.grab_cifs() if len(cifs) == 0: raise ValueError("No phases found for calculation") for idx, file in enumerate(cifs): cif_file = CFML_api.CIFFile(file) cell = cif_file.cell space_group = cif_file.space_group atom_list = cif_file.atom_list job_info = cif_file.job_info job_info.range_2theta = (x_min, x_max) job_info.theta_step = x_step job_info.u_resolution = self.conditions["u_resolution"] job_info.v_resolution = self.conditions["v_resolution"] job_info.w_resolution = self.conditions["w_resolution"] job_info.x_resolution = self.conditions["x_resolution"] job_info.y_resolution = self.conditions["y_resolution"] job_info.lambdas = (self.conditions["lamb"], self.conditions["lamb"]) job_info.bkg = 0.0 # Calculations try: reflection_list = CFML_api.ReflectionList( cell, space_group, True, job_info) reflection_list.compute_structure_factors( space_group, atom_list, job_info) diffraction_pattern = CFML_api.DiffractionPattern( job_info, reflection_list, cell.reciprocal_cell_vol) except Exception as e: for cif in cifs: os.remove(cif) raise ArithmeticError item = list(self.known_phases.items())[idx] key = list(self.known_phases.keys())[idx] phase_scale = self.getPhaseScale(key) dependent, additional_data = self.nonPolarized_update( item, diffraction_pattern, reflection_list, job_info, scales=phase_scale) dependents.append(dependent) self.additional_data["phases"].update(additional_data) for cif in cifs: os.remove(cif) self.additional_data["global_scale"] = scale self.additional_data["background"] = bg self.additional_data["ivar_run"] = this_x_array self.additional_data["ivar"] = x_array self.additional_data["components"] = [ scale * dep + bg for dep in dependents ] self.additional_data["phase_names"] = list(self.known_phases.items()) self.additional_data["type"] = "powder1DCW" dependent_output = scale * np.sum(dependents, axis=0) + bg if borg.debug: print(f"y_calc: {dependent_output}") return (np.sum( [s["profile"] for s in self.additional_data["phases"].values()], axis=0) + self.additional_data["background"])
def calculate(self, x_array: np.ndarray) -> np.ndarray: self.create_temp_prm() if self.pattern is None: scale = 1.0 offset = 0 else: scale = self.pattern.scale.raw_value / 1000.0 offset = self.pattern.zero_shift.raw_value this_x_array = x_array + offset gpx = G2sc.G2Project(newgpx=os.path.join( self.prm_dir_path, 'easydiffraction_temp.gpx')) # create a project # step 1, setup: add a phase to the project cif_file = self.filename phase_name = 'Phase' phase_index = 0 phase0 = gpx.add_phase(cif_file, phasename=phase_name, fmthint='CIF') # step 2, setup: add a simulated histogram and link it to the previous phase(s) x_min = this_x_array[0] x_max = this_x_array[-1] n_points = np.prod(x_array.shape) x_step = (x_max - x_min) / (n_points - 1) histogram0 = gpx.add_simulated_powder_histogram( f"{phase_name} simulation", self.prm_file_path, x_min, x_max, Tstep=x_step, phases=gpx.phases()) # Set parameters val1 = 10000.0 #1000000.0 val2 = None LGmix = 0.0 # 1.0 -> 0.0: NO VISIBLE INFLUENCE... phase0.setSampleProfile(phase_index, 'size', 'isotropic', val1, val2=val2, LGmix=LGmix) #print("- size", phase0.data['Histograms'][f'PWDR {phase_name} simulation']['Size']) u = self.conditions["u_resolution"] * 1850 # ~ CrysPy/CrysFML v = self.conditions["v_resolution"] * 1850 # ~ CrysPy/CrysFML w = self.conditions["w_resolution"] * 1850 # ~ CrysPy/CrysFML x = self.conditions["x_resolution"] * 16 # ~ CrysPy/CrysFML y = self.conditions["y_resolution"] - 6 # y - 6 ~ 0 in CrysPy/CrysFML gpx.data[f'PWDR {phase_name} simulation']['Instrument Parameters'][0][ 'U'] = [u, u, 0] gpx.data[f'PWDR {phase_name} simulation']['Instrument Parameters'][0][ 'V'] = [v, v, 0] gpx.data[f'PWDR {phase_name} simulation']['Instrument Parameters'][0][ 'W'] = [w, w, 0] gpx.data[f'PWDR {phase_name} simulation']['Instrument Parameters'][0][ 'X'] = [x, x, 0] gpx.data[f'PWDR {phase_name} simulation']['Instrument Parameters'][0][ 'Y'] = [y, y, 0] wl = self.conditions["wavelength"] gpx.data[f'PWDR {phase_name} simulation']['Instrument Parameters'][0][ 'Lam'] = [wl, wl, 0] # Step 3: Set the scale factor to adjust the y scale #histogram0.SampleParameters['Scale'][0] = 1000000. # step 4, compute: turn off parameter optimization and calculate pattern gpx.data['Controls']['data']['max cyc'] = 0 # refinement not needed try: gpx.do_refinements(refinements=[{}], makeBack=[]) # step 5, retrieve results & plot ycalc = gpx.histogram(0).getdata('ycalc') except: raise ArithmeticError finally: # Clean up for p in pathlib.Path(os.path.dirname( self.filename)).glob("easydiffraction_temp*"): if os.path.basename(p) != "easydiffraction_temp.cif": p.unlink() self.hkl_dict = { 'ttheta': gpx.data[f'PWDR {phase_name} simulation']['Reflection Lists'] [phase_name]['RefList'][:, 5], 'h': gpx.data[f'PWDR {phase_name} simulation']['Reflection Lists'] [phase_name]['RefList'][:, 0], 'k': gpx.data[f'PWDR {phase_name} simulation']['Reflection Lists'] [phase_name]['RefList'][:, 1], 'l': gpx.data[f'PWDR {phase_name} simulation']['Reflection Lists'] [phase_name]['RefList'][:, 2] } if len(self.pattern.backgrounds) == 0: bg = np.zeros_like(this_x_array) else: bg = self.pattern.backgrounds[0].calculate(this_x_array) res = scale * ycalc + bg np.set_printoptions(precision=3) if borg.debug: print(f"y_calc: {res}") return res