def test_plots(mck): x = np.arange(100) y = np.arange(100) phase = Phase(x, y) phase.plot() plt.show() mck.assert_called()
def build_GD(self): """ Build the GD and return it. It will rebuild every time this function is called. """ delays, positions = self._collect() self.GD = Phase(positions, delays, GD_mode=True) return self.GD
def test_phase_gd_mode(): x = np.arange(100) y = np.arange(100) phase = Phase(x, y, GD_mode=True) d, ds, st = phase.fit(reference_point=50, order=2) np.testing.assert_array_almost_equal(d, [50, 1]) assert phase.order == 1 assert phase.dispersion_order == 2
def test_errorplot2(): x = np.arange(100) y = np.arange(100) phase = Phase(x, y) with pytest.raises(ValueError): phase.errors with pytest.raises(ValueError): phase.errorplot()
def build_phase(self): """ Retrieve *only the phase* after the transforms. This will unwrap the angles and constructs a `~pysprint.core.phase.Phase` object. Returns ------- phase : pysprint.core.phase.Phase The phase object. See its docstring for more info. """ if self.nufft_used: self.shift("y") y = np.unwrap(np.angle(self.y), axis=0) self.phase = Phase(self.x, y) return self.phase # because of inplace ops. we need to return the phase
def test_errorplot(): x = np.arange(100) y = np.arange(100) phase = Phase(x, y) phase.fit(2, 2) phase.errorplot() phase.errorplot(percent=True)
def calculate_from_raw(omegas, delays, reference_point, order, show_graph=False): """ Calculate the dispersion from matching pairs of delays and SPP positions. Parameters ---------- omegas : np.ndarray The SPP positions. delays : np.ndarray The delay values in fs. reference_point : float The reference point on the x axis. order : int Maximum dispersion order to look for. Must be in [2, 6]. show_graph : bool, optional Whether to show the fitting. Returns ------- dispersion : array-like The dispersion coefficients in the form of: [GD, GDD, TOD, FOD, QOD, SOD] dispersion_std : array-like Standard deviations due to uncertainty of the fit. It is only calculated if lmfit is installed. The form is: [GD_std, GDD_std, TOD_std, FOD_std, QOD_std, SOD_std] fit_report : str If lmfit is available returns the fit report, else returns an empty string. """ if order == 1: raise ValueError( "Order should be greater than 1. Cannot fit constant function to data." ) GD = Phase(omegas, delays, GD_mode=True) d, ds, s = GD._fit(reference_point=reference_point, order=order) if show_graph: GD.plot() return d, ds, s
class SPPMethod(metaclass=_DatasetBase): """ Interface for Stationary Phase Point Method. """ def __init__(self, ifg_names, sam_names=None, ref_names=None, errors="raise", **kwargs): """ SPPMethod constructor. Parameters ---------- ifg_names : list The list containing the filenames of the interferograms. sam_names : list, optional The list containing the filenames of the sample arm's spectra. ref_names : list, optinal The list containing the filenames of the reference arm's spectra. kwargs : Additional keyword arguments to pass to `parse_raw` function. """ if errors not in ("raise", "ignore"): raise ValueError("errors must be `raise` or `ignore`.") self.ifg_names = ifg_names if sam_names: self.sam_names = sam_names else: self.sam_names = None if ref_names: self.ref_names = ref_names else: self.ref_names = None if errors == "raise": self._validate() if self.sam_names: if not len(self.ifg_names) == len(self.sam_names): raise DatasetError( "Missmatching length of files. Use None if a file is missing." ) if self.ref_names: if not len(self.ifg_names) == len(self.ref_names): raise DatasetError( "Missmatching length of files. Use None if a file is missing." ) self.skiprows = kwargs.pop("skiprows", 0) self.decimal = kwargs.pop("decimal", ",") self.sep = kwargs.pop("sep", ";") self.meta_len = kwargs.pop("meta_len", 1) self.cb = kwargs.pop("callback", defaultcallback) self.delimiter = kwargs.pop("delimiter", None) self.comment = kwargs.pop("comment", None) self.usecols = kwargs.pop("usecols", None) self.names = kwargs.pop("names", None) self.swapaxes = kwargs.pop("swapaxes", False) self.na_values = kwargs.pop("na_values", None) self.skip_blank_lines = kwargs.pop("skip_blank_lines", True) self.keep_default_na = kwargs.pop("keep_default_na", False) if kwargs: raise TypeError(f"invalid keyword argument:{kwargs}") self.load_dict = { "skiprows": self.skiprows, "decimal": self.decimal, "sep": self.sep, "meta_len": self.meta_len, "callback": self.cb, "delimiter": self.delimiter, "comment": self.comment, "usecols": self.usecols, "names": self.names, "swapaxes": self.swapaxes, "na_values": self.na_values, "skip_blank_lines": self.skip_blank_lines, "keep_default_na": self.keep_default_na } self._container = {} self._info = f"Progress: {len(self._container)}/{len(self)}" self.GD = None def _collect(self): # Maybe the dictionary struct can be dropped at this point.. local_delays = {} local_positions = {} for idx, (delay, position) in enumerate(self._container.values()): if idx != 0 and delay.size > 0 and delay.flat[0] in np.concatenate( [a.ravel() for a in local_delays.values()] ): raise ValueError( f"Duplicated delay values found. Delay {delay.flat[0]} fs was previously seen." ) local_delays[idx] = delay local_positions[idx] = position delays = np.concatenate([a.ravel() for a in local_delays.values()]) positions = np.concatenate([a.ravel() for a in local_positions.values()]) return delays, positions def append(self, newifg, newsam=None, newref=None): """ Append a new interferogram to the object. """ # ensure padding before trying to append, and also # we better prevent infinite loop # TODO self.ifg_names.append(newifg) if newsam is not None: if self.sam_names is not None: if len(self.ifg_names) > len(self.sam_names): while len(self.ifg_names) != len(self.sam_names): self.sam_names.append(None) self.sam_names.append(newsam) if newref is not None: if self.ref_names is not None: if len(self.ifg_names) > len(self.ref_names): while len(self.ifg_names) != len(self.ref_names): self.ref_names.append(None) self.ref_names.append(newref) @staticmethod def calculate_from_ifg(ifg_list, reference_point, order, show_graph=False): """ Collect SPP data from a list of `pysprint.Dataset` or child objects and evaluate them. Parameters ---------- ifg_list : list The list containing the interferograms. All member should be `pysprint.Dataset` or child class type, otherwise TypeError is raised. reference_point : float The reference point on the x axis. order : int Maximum dispersion order to look for. Must be in [2, 6]. show_graph : bool, optional Shows a the final graph of the spectral phase and fitted curve. Default is False. Returns ------- dispersion : array-like The dispersion coefficients in the form of: [GD, GDD, TOD, FOD, QOD, SOD] dispersion_std : array-like Standard deviations due to uncertainty of the fit. It is only calculated if lmfit is installed. The form is: [GD_std, GDD_std, TOD_std, FOD_std, QOD_std, SOD_std] fit_report : str If lmfit is available returns the fit report, else returns an empty string. """ for ifg in ifg_list: if not isinstance(ifg, Dataset): raise TypeError("pysprint.Dataset objects are expected.") if order == 1: raise ValueError( "Order should be greater than 1. Cannot fit constant function to data." ) local_delays = {} local_positions = {} for idx, ifg in enumerate(ifg_list): delay, position = ifg.emit() if idx != 0 and delay.size > 0 and delay.flat[0] in np.concatenate( [a.ravel() for a in local_delays.values()] ): raise ValueError( f"Duplicated delay values found. Delay {delay.flat[0]} fs was previously seen." ) local_delays[idx] = delay local_positions[idx] = position delays = np.concatenate([a.ravel() for a in local_delays.values()]) positions = np.concatenate([a.ravel() for a in local_positions.values()]) GD = Phase(positions, delays, GD_mode=True) d, ds, s = GD._fit(reference_point=reference_point, order=order) if show_graph: GD.plot() return d, ds, s def __len__(self): return len(self.ifg_names) # TODO def __str__(self): return f"{type(self).__name__}\nInterferogram count : {len(self)}" def _repr_html_(self): alive = [i for i in Dataset._get_instances() if i.parent == self] s = f""" <table style="border:1px solid black;float:top;"> <tbody> <tr> <td colspan=2 style="text-align:center"> <font size="5">{type(self).__name__}</font> </td> </tr> <tr> <td style="text-align:center"><b>Interferograms accumulated<b></td> <td style="text-align:center"> {len(self)}</td> </tr> <tr> <td style="text-align:center"><b>Interferograms cached<b></td> <td style="text-align:center"> {len(alive)}</td> </tr> <tr> <td style="text-align:center"><b>Eagerly calculating<b></td> <td style="text-align:center"> {self.is_eager}</td> </tr> <tr> <td style="text-align:center"><b>Data recorded from<b></td> <td style="text-align:center"> {len(self._container)}</td> </tr> </table> """ return s @lru_cache(500) def __getitem__(self, key): try: dataframe = Dataset.parse_raw( self.ifg_names[key], self.sam_names[key], self.ref_names[key], **self.load_dict, parent=self ) except (TypeError, ValueError): dataframe = Dataset.parse_raw( self.ifg_names[key], **self.load_dict, parent=self ) return dataframe def _validate(self): for filename in self.ifg_names: if filename is not None and not os.path.exists(filename): raise FileNotFoundError(f"""File named '{filename}' is not found.""") if self.sam_names: for sam in self.sam_names: if sam is not None and not os.path.exists(sam): raise FileNotFoundError(f"""File named '{sam}' is not found.""") if self.ref_names: for ref in self.ref_names: if ref is not None and not os.path.exists(ref): raise FileNotFoundError(f"""File named '{ref}' is not found.""") def flush(self): """ Reset the state of recorded delays and positions, even on active objects that have been constructed on the runtime. """ self._container = {} for ifg in self: ifg._delay = None ifg.delay = None ifg._positions = None ifg.positions = None def save_data(self, filename): """ Save the currectly stored SPP data. Parameters ---------- filename : str The filename to save as. If not ends with ".txt" it's appended by default. """ if not filename.endswith(".txt"): filename += ".txt" delays, positions = self._collect() np.savetxt( f"{filename}", np.column_stack((positions, delays)), delimiter=",", ) @staticmethod def calculate_from_raw(omegas, delays, reference_point, order, show_graph=False): """ Calculate the dispersion from matching pairs of delays and SPP positions. Parameters ---------- omegas : np.ndarray The SPP positions. delays : np.ndarray The delay values in fs. reference_point : float The reference point on the x axis. order : int Maximum dispersion order to look for. Must be in [2, 6]. show_graph : bool, optional Whether to show the fitting. Returns ------- dispersion : array-like The dispersion coefficients in the form of: [GD, GDD, TOD, FOD, QOD, SOD] dispersion_std : array-like Standard deviations due to uncertainty of the fit. It is only calculated if lmfit is installed. The form is: [GD_std, GDD_std, TOD_std, FOD_std, QOD_std, SOD_std] fit_report : str If lmfit is available returns the fit report, else returns an empty string. """ if order == 1: raise ValueError( "Order should be greater than 1. Cannot fit constant function to data." ) GD = Phase(omegas, delays, GD_mode=True) d, ds, s = GD._fit(reference_point=reference_point, order=order) if show_graph: GD.plot() return d, ds, s def build_GD(self): """ Build the GD and return it. It will rebuild every time this function is called. """ delays, positions = self._collect() self.GD = Phase(positions, delays, GD_mode=True) return self.GD def calculate(self, reference_point, order, show_graph=False): """ This function should be used after setting the SPP data in the interactive matplotlib editor or other way. Parameters ---------- reference_point : float The reference point on the x axis. order : int, optional Maximum dispersion order to look for. Must be in [2, 6]. show_graph : bool, optional Shows a the final graph of the spectral phase and fitted curve. Default is False. Returns ------- dispersion : array-like The dispersion coefficients in the form of: [GD, GDD, TOD, FOD, QOD, SOD] dispersion_std : array-like Standard deviations due to uncertainty of the fit. It is only calculated if lmfit is installed. The form is: [GD_std, GDD_std, TOD_std, FOD_std, QOD_std, SOD_std] fit_report : str If lmfit is available returns the fit report, else returns an empty string. """ if order == 1: raise ValueError( "Order should be greater than 1. Cannot fit constant function to data." ) self.build_GD() d, ds, s = self.GD._fit(reference_point=reference_point, order=order) if show_graph: self.GD.plot() return d, ds, s @property def info(self): """ Return how many interferograms were processed. """ self._info = f"Progress: {len(self._container)}/{len(self)}" return self._info @property def is_eager(self): """ Returns if eager execution is enabled. """ # TODO if self.cb.__name__ == "inner": return True return False
def calculate_from_ifg(ifg_list, reference_point, order, show_graph=False): """ Collect SPP data from a list of `pysprint.Dataset` or child objects and evaluate them. Parameters ---------- ifg_list : list The list containing the interferograms. All member should be `pysprint.Dataset` or child class type, otherwise TypeError is raised. reference_point : float The reference point on the x axis. order : int Maximum dispersion order to look for. Must be in [2, 6]. show_graph : bool, optional Shows a the final graph of the spectral phase and fitted curve. Default is False. Returns ------- dispersion : array-like The dispersion coefficients in the form of: [GD, GDD, TOD, FOD, QOD, SOD] dispersion_std : array-like Standard deviations due to uncertainty of the fit. It is only calculated if lmfit is installed. The form is: [GD_std, GDD_std, TOD_std, FOD_std, QOD_std, SOD_std] fit_report : str If lmfit is available returns the fit report, else returns an empty string. """ for ifg in ifg_list: if not isinstance(ifg, Dataset): raise TypeError("pysprint.Dataset objects are expected.") if order == 1: raise ValueError( "Order should be greater than 1. Cannot fit constant function to data." ) local_delays = {} local_positions = {} for idx, ifg in enumerate(ifg_list): delay, position = ifg.emit() if idx != 0 and delay.size > 0 and delay.flat[0] in np.concatenate( [a.ravel() for a in local_delays.values()] ): raise ValueError( f"Duplicated delay values found. Delay {delay.flat[0]} fs was previously seen." ) local_delays[idx] = delay local_positions[idx] = position delays = np.concatenate([a.ravel() for a in local_delays.values()]) positions = np.concatenate([a.ravel() for a in local_positions.values()]) GD = Phase(positions, delays, GD_mode=True) d, ds, s = GD._fit(reference_point=reference_point, order=order) if show_graph: GD.plot() return d, ds, s
class FFTMethod(Dataset): """ Basic interface for the Fourier transform method. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # making sure it's not normalized if self._is_normalized: self.y_norm = self.y self._is_normalized = False if np.any(self.sam) or np.any(self.ref): warnings.warn("This method doesn't require arms' spectra.", PySprintWarning) self.original_x = self.x self.at = None self.std = None self.fwhm = None self.window_order = None self.phase = None self._ifft_called_first = False self.nufft_used = False @inplacify def shift(self, axis="x"): """ Equivalent to `scipy.fftpack.fftshift`, but it's easier to use this function instead, because we don't need to explicitly call the class' x and y attribute. Parameters ---------- axis : str Must be 'x', 'y', 'both', 'xy' or 'yx'. """ if axis == "x": self.x = fftshift(self.x) elif axis == "y": self.y = fftshift(self.y) elif axis == "both" or axis == "xy" or axis == "yx": self.y = fftshift(self.y) self.x = fftshift(self.x) else: raise ValueError("axis should be either `x`, `y` or `both`.") return self @inplacify def ifft( self, interpolate=True, usenifft=False, eps=1e-12, exponent="positive", ): """ Applies inverse Fast Fourier Transfrom to the dataset. Parameters ---------- interpolate : bool, default is True -- WILL BE REMOVED Whether to apply linear interpolation on the dataset before transforming. usenifft : bool, optional Whether to use non uniform fft. It uses the algorithm described in the references. This means the interferogram will *not* be linearly interpolated. Default is False. eps : float, optional The desired approximate error for the non uniform FFT result. Must be in range 1E-33 < eps < 1E-1, though be aware that the errors are only well calibrated near the range 1E-12 ~ 1E-6. Default is 1E-12. exponent : str, optional if 'negative', compute the transform with a negative exponent. if 'positive', compute the transform with a positive exponent. Default is `positive`. Notes ----- The basic scheme is ifft -> windowing -> fft, so you should call these functions in this order. Otherwise the transforms may be inconsistent. If numba is not installed the non uniform FTT is approximately 5x times slower, but still remains comparable to `np.fft.ifft`. References ---------- [1] Dutt A., Rokhlin V. : Fast Fourier Transforms for Nonequispaced Data II, Applied and Computational Harmonic Analysis Volume 2, Issue 1, January 1995, Pages 85-100 (1995) [2] Greengard, Leslie & Lee, June-Yub.: Accelerating the Nonuniform Fast Fourier Transform, Society for Industrial and Applied Mathematics. 46. 443-454. 10.1137/S003614450343200X. (2004) """ self.nufft_used = usenifft self._ifft_called_first = True if usenifft: x_spaced = np.linspace(self.x[0], self.x[-1], len(self.x)) timestep = np.diff(x_spaced)[0] x_axis = np.fft.fftfreq(len(self.x), d=timestep / (2 * np.pi)) y_transform = nuifft( self.x, self.y, gl=len(self.x), df=(x_axis[1] - x_axis[0]), epsilon=eps, exponent=exponent, ) self.x, self.y = x_axis, np.fft.fftshift(y_transform) else: self.x, self.y = ifft_method(self.x, self.y, interpolate=interpolate) return self @inplacify def fft(self): """ Applies fft to the dataset. If ifft was not called first, inaccurate results might happen. """ if not self._ifft_called_first: warnings.warn("This module is designed to call ifft before fft", FourierWarning) self.x, self.y = fft_method(self.original_x, self.y) return self @inplacify def window(self, at, fwhm, window_order=6, plot=True): """ Draws a gaussian window on the plot with the desired parameters. The maximum value is adjusted for the dataset's maximum value, mostly for visibility. Parameters ---------- at : float The maximum of the gaussian curve. fwhm : float Full width at half maximum of the gaussian window_order : int, optional Order of the gaussian curve. If not even, it's incremented by 1. Default is 6. plot : bool, optional Whether to immediately show the window with the data. Default is `True`. """ self.at = at self.fwhm = fwhm self.window_order = window_order gaussian = gaussian_window(self.x, self.at, self.fwhm, self.window_order) self.plt.plot(self.x, gaussian * max(abs(self.y)), "k--") if plot: self.plot(overwrite="$t\,[fs]$") self.show() return self @inplacify def apply_window(self): """ If window function is set, applies window on the dataset. """ self.plt.clf() self.plt.cla() self.plt.close() self.y = cut_gaussian( self.x, self.y, spike=self.at, fwhm=self.fwhm, win_order=self.window_order, ) return self def build_phase(self): """ Retrieve *only the phase* after the transforms. This will unwrap the angles and constructs a `~pysprint.core.phase.Phase` object. Returns ------- phase : pysprint.core.phase.Phase The phase object. See its docstring for more info. """ if self.nufft_used: self.shift("y") y = np.unwrap(np.angle(self.y), axis=0) self.phase = Phase(self.x, y) return self.phase # because of inplace ops. we need to return the phase def calculate(self, reference_point, order, show_graph=False): """ FFTMethod's calculate function. It will unwrap the phase by changing deltas _between values to 2*pi complement. After that, fit a curve to determine dispersion coefficients. Parameters ---------- reference_point : float The reference point on the x axis. order : int Polynomial (and maximum dispersion) order to fit. Must be in [1, 5]. show_graph : bool, optional Shows a the final graph of the spectral phase and fitted curve. Default is False. Returns ------- dispersion : array-like The dispersion coefficients in the form of: [GD, GDD, TOD, FOD, QOD, SOD] dispersion_std : array-like Standard deviations due to uncertainty of the fit. It is only calculated if lmfit is installed. The form is: [GD_std, GDD_std, TOD_std, FOD_std, QOD_std, SOD_std] fit_report : str If lmfit is available returns the fit report, else returns an empty string. Notes ----- Decorated with pprint_disp, so the results are immediately printed without explicitly saying so. Developer-commentary -------------------- Currently the x-axis transformation is sloppy, because we cache the original x axis and not transforming it backwards. In addition we need to keep track of interpolation and zero-padding too. Currently the transforms are correct only if first ifft was used. For now it's doing okay: giving good results. For consistency we should still implement that a better way later. """ self.build_phase() dispersion, dispersion_std, fit_report = self.phase._fit( reference_point=reference_point, order=order) if show_graph: self.phase.plot() self._dispersion_array = dispersion return -dispersion, dispersion_std, fit_report def autorun( self, reference_point=None, order=None, *, enable_printing=True, skip_domain_check=False, only_phase=False, show_graph=True, usenifft=False, ): """ Automatically run the Fourier Transfrom based evaluation on the dataset. It's not as reliable as I want it to be, so use it carefully. I'm working on making it as competent and useful as possible. Parameters ---------- reference_point : float, optional The reference point on the x axis. If not given, only_phase mode will be activated. Default is None. order : int, optional Polynomial (and maximum dispersion) order to fit. Must be in [1, 6]. If not given, only_phase mode will be activated. Default is None. only_phase : bool, optional If True, activate the only_phase mode, which will retrieve the phase without fitting a curve, and return a `pysprint.core.Phase.phase` object. Default is False (also not giving enough information for curve fitting will automatically activate it). enable_printing : bool, optional If True enable printing the detailed results. Default is True. skip_domain_check : bool, optional If True skip the interferogram domain check and force the algorithm to perform actions without changing domain. If False, check for potential wrong domains and change for an appropriate one. Default is False. show_graph : bool, optional If True show the graph with the phase and the fitted curve, if there is any. Default is True. usenifft : bool, optional If True use the Non Uniform Fast Fourier Transform algorithm. For more details see `help(pysprint.FFTMethod.ifft)`. Default is False. References ---------- [1] Dutt A., Rokhlin V. : Fast Fourier Transforms for Nonequispaced Data II, Applied and Computational Harmonic Analysis Volume 2, Issue 1, January 1995, Pages 85-100 (1995) [2] Greengard, Leslie & Lee, June-Yub.: Accelerating the Nonuniform Fast Fourier Transform, Society for Industrial and Applied Mathematics. 46. 443-454. 10.1137/S003614450343200X. (2004) """ if not reference_point or not order: only_phase = True if not enable_printing: with open(os.devnull, "w") as g, contextlib.redirect_stdout(g): _run( self, skip_domain_check=skip_domain_check, show_graph=show_graph, usenifft=usenifft, ) if only_phase: y = np.unwrap(np.angle(self.y), axis=0) self.phase = Phase(self.x, y) return self.phase self.calculate(reference_point=reference_point, order=order, show_graph=True) else: _run( self, skip_domain_check=skip_domain_check, show_graph=show_graph, usenifft=usenifft, ) if only_phase: y = np.unwrap(np.angle(self.y), axis=0) self.phase = Phase(self.x, y) return self.phase self.calculate(reference_point=reference_point, order=order, show_graph=True) # TODO: add interpolation def get_pulse_shape_from_array(self, x_sample, y_sample, truncate=True, tol=None): """ Find out the shape of the pulse in the time domain I(t). Parameters ---------- x_sample : np.ndarray The x values of the sample arm. y_sample : np.ndarray The y values of the sample arm. truncate : bool, optional Whether to truncate the phase and sample spectra to the longest_common_subsequence (imeplemented at pysprint.core.bases.algorithms). Default is True. tol : float or None, optional The tolerance which determines how big difference is allowed _between x values to interpret them as the same datapoint. """ if self.phase is None: raise NotCalculatedException("Must calculate phase first.") if not len(y_sample) == len(x_sample): raise ValueError("Missmatching shapes.") # quick check if we're able to broadcast y_sample = np.asarray(y_sample, dtype=float) x_phase, y_phase = self.phase.data[0], self.phase.data[1] if len(y_sample) != len(self.phase.data[0]): if truncate: x_sample, y_sample, x_phase, y_phase = longest_common_subsequence( x_sample, y_sample, x_phase, y_phase, tol=tol) logger.info( f"Shapes were truncated from {np.min(x_sample)} to {np.max(x_sample)} with length {len(x_sample)}." ) else: raise ValueError( f"Shapes differ with {len(x_sample)} and {len(self.phase.data[0])}." ) E_field = np.sqrt(y_sample) * np.exp(-1j * y_phase) E_pulse = np.abs(np.fft.ifft(E_field))**2 x_spaced = np.linspace(x_phase[0], x_phase[-1], len(x_phase)) timestep = np.diff(x_spaced)[0] x_axis = np.fft.fftfreq(len(x_phase), d=timestep / (2 * np.pi)) return x_axis, E_pulse def get_pulse_shape_from_file(self, filename, truncate=True, tol=None, **kwargs): """ Find out the shape of the pulse in the time domain I(t). The sample arm's spectra is loaded from file. Parameters ---------- filename : str The file containing the sample arm's spectra. truncate : bool, optional Whether to truncate the phase and sample spectra to the longest_common_subsequence (imeplemented at pysprint.core.bases.algorithms). Default is True. tol : float or None, optional The tolerance which determines how big difference is allowed _between x values to interpret them as the same datapoint. kwargs : dict, optional The additional keyword arguments for parsing. Same as `pysprint.Dataset.parse_raw`. If `chdomain=True`, then change the domain after loading. """ if isinstance(filename, str): ch = kwargs.pop("chdomain", False) df = pd.read_csv(filename, names=["x", "y"], **kwargs) x_sample = df["x"].values y_sample = df["y"].values if ch: x_sample = self.wave2freq(x_sample) return self.get_pulse_shape_from_array(x_sample, y_sample, truncate=truncate, tol=tol) def errorplot(self, *args, **kwargs): """ Plot the errors of fitting. Parameters ---------- ax : matplotlib.axes.Axes, optional An axis to draw the plot on. If not given, it will plot of the last used axis. percent : bool, optional Whether to plot percentage difference. Default is False. title : str, optional The title of the plot. Default is "Errors". kwargs : dict, optional Additional keyword arguments to pass to plot function. """ try: getattr(self.phase, "errorplot", None)(*args, **kwargs) except TypeError: raise NotCalculatedException( "Must calculate before plotting errors.") @property def get_phase(self): """ Return the phase if it is already calculated. """ if self.phase is not None: return self.phase raise NotCalculatedException("Must retrieve the phase first.") @property def errors(self): """ Return the fitting errors as np.ndarray. """ errors = getattr(self.phase, "errors", None) if errors is not None: return errors raise NotCalculatedException("Must calculate the fit first.") # redefinition to ensure proper attributes are changed @inplacify def resample(self, N, kind="linear", **kwds): """ Resample the interferogram to have `N` datapoints. Parameters ---------- N : int The number of datapoints required. kind : str, optional The type of interpolation to use. Default is `linear`. kwds : optional Additional keyword argument to pass to `scipy.interpolate.interp1d`. Raises ------ PySprintWarning, if trying to subsample to lower `N` datapoints than original. """ f = interp1d(self.x, self.y_norm, kind, **kwds) if N < len(self.x): N = len(self.x) warnings.warn( "Trying to resample to lower resolution, keeping shape..", PySprintWarning) xnew = np.linspace(np.min(self.x), np.max(self.x), N) ynew = f(xnew) setattr(self, "x", xnew) setattr(self, "y_norm", ynew) setattr(self, "y", ynew) return self
def autorun( self, reference_point=None, order=None, *, enable_printing=True, skip_domain_check=False, only_phase=False, show_graph=True, usenifft=False, ): """ Automatically run the Fourier Transfrom based evaluation on the dataset. It's not as reliable as I want it to be, so use it carefully. I'm working on making it as competent and useful as possible. Parameters ---------- reference_point : float, optional The reference point on the x axis. If not given, only_phase mode will be activated. Default is None. order : int, optional Polynomial (and maximum dispersion) order to fit. Must be in [1, 6]. If not given, only_phase mode will be activated. Default is None. only_phase : bool, optional If True, activate the only_phase mode, which will retrieve the phase without fitting a curve, and return a `pysprint.core.Phase.phase` object. Default is False (also not giving enough information for curve fitting will automatically activate it). enable_printing : bool, optional If True enable printing the detailed results. Default is True. skip_domain_check : bool, optional If True skip the interferogram domain check and force the algorithm to perform actions without changing domain. If False, check for potential wrong domains and change for an appropriate one. Default is False. show_graph : bool, optional If True show the graph with the phase and the fitted curve, if there is any. Default is True. usenifft : bool, optional If True use the Non Uniform Fast Fourier Transform algorithm. For more details see `help(pysprint.FFTMethod.ifft)`. Default is False. References ---------- [1] Dutt A., Rokhlin V. : Fast Fourier Transforms for Nonequispaced Data II, Applied and Computational Harmonic Analysis Volume 2, Issue 1, January 1995, Pages 85-100 (1995) [2] Greengard, Leslie & Lee, June-Yub.: Accelerating the Nonuniform Fast Fourier Transform, Society for Industrial and Applied Mathematics. 46. 443-454. 10.1137/S003614450343200X. (2004) """ if not reference_point or not order: only_phase = True if not enable_printing: with open(os.devnull, "w") as g, contextlib.redirect_stdout(g): _run( self, skip_domain_check=skip_domain_check, show_graph=show_graph, usenifft=usenifft, ) if only_phase: y = np.unwrap(np.angle(self.y), axis=0) self.phase = Phase(self.x, y) return self.phase self.calculate(reference_point=reference_point, order=order, show_graph=True) else: _run( self, skip_domain_check=skip_domain_check, show_graph=show_graph, usenifft=usenifft, ) if only_phase: y = np.unwrap(np.angle(self.y), axis=0) self.phase = Phase(self.x, y) return self.phase self.calculate(reference_point=reference_point, order=order, show_graph=True)
def constructor2(): Phase.from_disperion_array([1, 2, 3, 4]) Phase.from_disperion_array([1, 2, 3, 4], domain=np.arange(5611))
def constructor1(): Phase.from_coeff([1, 2, 3, 4]) Phase.from_coeff([1, 2, 3, 4], domain=np.arange(5611))
def build_GD(self, silent=False, fastmath=True, usenifft=False, parallel=False, errors="ignore"): """ Build the GD. Parameters ---------- silent : bool, optional Whether to print progressbar. By default it will print. fastmath : bool, optional Whether to build additional arrays to display heatmap. Default is True. usenifft : bool, optional Whether to use Non-unfirom FFT when calculating GD. Default is False. **Not stable.** parallel : bool, optional Whether to use parallel computation. Only availabe if `Dask` is installed. The speedup is about 50-70%. Default is False. errors : str, optional Whether to raise an error is the algorithm couldn't find the center of the peak. Returns ------- GD : pysprint.core.phase.Phase The phase object with `GD_mode=True`. See its docstring for more info. """ if parallel: if not CAN_PARALLELIZE: raise ModuleNotFoundError( "Module `dask` not found. Please install it in order to use parallelism." ) else: self.fastmath = fastmath self._apply_window_seq_parallel(fastmath=fastmath, usenifft=usenifft, errors=errors) if not silent: with ProgressBar(): computed = compute(*self.found_centers.values()) else: computed = compute(*self.found_centers.values()) cleaned_delays = [ k for i, k in enumerate(self.found_centers.keys()) if computed[i] is not None ] delay = np.fromiter(cleaned_delays, dtype=float) omega = np.fromiter([c for c in computed if c is not None], dtype=float) if not silent: print( f"Errors found: {len(self.window_seq) - sum(1 for _ in filter(None.__ne__, computed))}" ) else: self.fastmath = fastmath self._apply_window_sequence(silent=silent, fastmath=fastmath, usenifft=usenifft) self._clean_centers() delay = np.fromiter(self.found_centers.keys(), dtype=float) omega = np.fromiter(self.found_centers.values(), dtype=float) self.GD = Phase(delay, omega, GD_mode=True) return self.GD