def _smeared_abeles_constant(q, w, resolution, parallel=True): """ A kernel for fast and constant dQ/Q smearing Parameters ---------- q: np.ndarray Q values to evaluate the reflectivity at w: np.ndarray Parameters for the reflectivity model resolution: float Percentage dq/q resolution. dq specified as FWHM of a resolution kernel. parallel: bool, optional Do you want to calculate in parallel? This option is only applicable if you are using the ``_creflect`` module. The option is ignored if using the pure python calculator, ``_reflect``. Returns ------- reflectivity: np.ndarray The resolution smeared reflectivity """ if resolution < 0.5: return refcalc.abeles(q, w, parallel=parallel) resolution /= 100 gaussnum = 51 gaussgpoint = (gaussnum - 1) / 2 gauss = lambda x, s: (1. / s / np.sqrt(2 * np.pi) * np.exp(-0.5 * x**2 / s / s)) lowq = np.min(q) highq = np.max(q) if lowq <= 0: lowq = 1e-6 start = np.log10(lowq) - 6 * resolution / _FWHM finish = np.log10(highq * (1 + 6 * resolution / _FWHM)) interpnum = np.round(np.abs(1 * (np.abs(start - finish)) / (1.7 * resolution / _FWHM / gaussgpoint))) xtemp = np.linspace(start, finish, int(interpnum)) xlin = np.power(10., xtemp) # resolution smear over [-4 sigma, 4 sigma] gauss_x = np.linspace(-1.7 * resolution, 1.7 * resolution, gaussnum) gauss_y = gauss(gauss_x, resolution / _FWHM) rvals = refcalc.abeles(xlin, w, parallel=parallel) smeared_rvals = np.convolve(rvals, gauss_y, mode='same') interpolator = InterpolatedUnivariateSpline(xlin, smeared_rvals) smeared_output = interpolator(q) # smeared_output *= np.sum(gauss_y) smeared_output *= gauss_x[1] - gauss_x[0] return smeared_output
def test_c_abeles(self): if TEST_C_REFLECT: # test reflectivity calculation with values generated from Motofit calc = _creflect.abeles(self.qvals, self.layer_format) assert_almost_equal(calc, self.rvals) # test for non-contiguous Q values tempq = self.qvals[0::5] assert_(tempq.flags['C_CONTIGUOUS'] is False) calc = _creflect.abeles(tempq, self.layer_format) assert_almost_equal(calc, self.rvals[0::5])
def _smearkernel(x, w, q, dq, parallel): """ Kernel for adaptive Gaussian quadrature integration Parameters ---------- x : float Independent variable for integration. w : array-like The uniform slab model parameters in 'layer' form. q : float Nominal mean Q of normal distribution dq : float FWHM of a normal distribution. Returns ------- reflectivity : float Model reflectivity multiplied by the probability density function evaluated at a given distance, x, away from the mean Q value. """ prefactor = 1 / np.sqrt(2 * np.pi) gauss = prefactor * np.exp(-0.5 * x * x) localq = q + x * dq / _FWHM return refcalc.abeles(localq, w, parallel=parallel) * gauss
def _smeared_abeles_fixed(qvals, w, dqvals, quad_order=17, workers=0): """ Resolution smearing that uses fixed order Gaussian quadrature integration for the convolution. Parameters ---------- qvals : array-like The Q values for evaluation w : array-like The uniform slab model parameters in 'layer' form. dqvals : array-like dQ values corresponding to each value in `qvals`. Each dqval is the FWHM of a Gaussian approximation to the resolution kernel. quad-order : int, optional Specify the order of the Gaussian quadrature integration for the convolution. workers: int, optional Specifies the number of threads for parallel calculation. This option is only applicable if you are using the ``_creflect`` module. The option is ignored if using the pure python calculator, ``_reflect``. If `workers == 0` then all available processors are used. Returns ------- reflectivity : np.ndarray The smeared reflectivity """ # get the gauss-legendre weights and abscissae abscissa, weights = gauss_legendre(quad_order) # get the normal distribution at that point prefactor = 1. / np.sqrt(2 * np.pi) def gauss(x): return np.exp(-0.5 * x * x) gaussvals = prefactor * gauss(abscissa * _INTLIMIT) # integration between -3.5 and 3.5 sigma va = qvals - _INTLIMIT * dqvals / _FWHM vb = qvals + _INTLIMIT * dqvals / _FWHM va = va[:, np.newaxis] vb = vb[:, np.newaxis] qvals_for_res = ((np.atleast_2d(abscissa) * (vb - va) + vb + va) / 2.) smeared_rvals = refcalc.abeles(qvals_for_res.flatten(), w, workers=workers) smeared_rvals = np.reshape(smeared_rvals, (qvals.size, abscissa.size)) smeared_rvals *= np.atleast_2d(gaussvals * weights) return np.sum(smeared_rvals, 1) * _INTLIMIT
def test_c_abeles_reshape(self): # c reflectivity should be able to deal with multidimensional input if not TEST_C_REFLECT: return reshaped_q = np.reshape(self.qvals, (2, 250)) reshaped_r = self.rvals.reshape(2, 250) calc = _creflect.abeles(reshaped_q, self.layer_format) assert_equal(reshaped_r.shape, calc.shape) assert_almost_equal(reshaped_r, calc, 15)
def test_compare_c_py_abeles0(self): # test two layer system if not TEST_C_REFLECT: return layer0 = np.array([[0, 2.07, 0.01, 3], [0, 6.36, 0.1, 3]]) calc1 = _reflect.abeles(self.qvals, layer0, scale=0.99, bkg=1e-8) calc2 = _creflect.abeles(self.qvals, layer0, scale=0.99, bkg=1e-8) assert_almost_equal(calc1, calc2)
def test_compare_c_py_abeles(self): # test python and c are equivalent # but not the same file if not HAVE_CREFLECT: return assert_(_reflect.__file__ != _creflect.__file__) calc1 = _reflect.abeles(self.qvals, self.layer_format) calc2 = _creflect.abeles(self.qvals, self.layer_format) assert_almost_equal(calc1, calc2) calc1 = _reflect.abeles(self.qvals, self.layer_format, scale=2.) calc2 = _creflect.abeles(self.qvals, self.layer_format, scale=2.) assert_almost_equal(calc1, calc2) calc1 = _reflect.abeles(self.qvals, self.layer_format, scale=0.5, bkg=0.1) calc2 = _creflect.abeles(self.qvals, self.layer_format, scale=0.5, bkg=0.1) assert_almost_equal(calc1, calc2)
def test_compare_c_py_abeles2(self): # test two layer system if not HAVE_CREFLECT: return layer2 = np.array([[0, 2.07, 0.01, 3], [10, 3.47, 0.01, 3], [100, 1.0, 0.01, 4], [0, 6.36, 0.1, 3]]) calc1 = _reflect.abeles(self.qvals, layer2, scale=0.99, bkg=1e-8) calc2 = _creflect.abeles(self.qvals, layer2, scale=0.99, bkg=1e-8) assert_almost_equal(calc1, calc2)
def test_cabeles_parallelised(self): # I suppose this could fail if someone doesn't have a multicore computer if not HAVE_CREFLECT: return coefs = np.array([[0, 0, 0, 0], [300, 3, 1e-3, 3], [10, 3.47, 1e-3, 3], [0, 6.36, 0, 3]]) x = np.linspace(0.01, 0.2, 1000000) pstart = time.time() _creflect.abeles(x, coefs, parallel=True) pfinish = time.time() sstart = time.time() _creflect.abeles(x, coefs, parallel=False) sfinish = time.time() assert_(0.7 * (sfinish - sstart) > (pfinish - pstart))
def test_compare_c_py_abeles(self): # test python and c are equivalent # but not the same file if not TEST_C_REFLECT: return assert_(_reflect.__file__ != _creflect.__file__) calc1 = _reflect.abeles(self.qvals, self.layer_format) calc2 = _creflect.abeles(self.qvals, self.layer_format) assert_almost_equal(calc1, calc2) calc1 = _reflect.abeles(self.qvals, self.layer_format, scale=2.) calc2 = _creflect.abeles(self.qvals, self.layer_format, scale=2.) assert_almost_equal(calc1, calc2) calc1 = _reflect.abeles(self.qvals, self.layer_format, scale=0.5, bkg=0.1) # workers = 1 is a non-threaded implementation calc2 = _creflect.abeles(self.qvals, self.layer_format, scale=0.5, bkg=0.1, workers=1) # workers = 2 forces the calculation to go through multithreaded calcn, # even on single core processor calc3 = _creflect.abeles(self.qvals, self.layer_format, scale=0.5, bkg=0.1, workers=2) assert_almost_equal(calc1, calc2) assert_almost_equal(calc1, calc3)
def abeles(q, layers, scale=1, bkg=0., workers=0): r""" Abeles matrix formalism for calculating reflectivity from a stratified medium. Parameters ---------- q : array_like the q values required for the calculation. :math:`Q = \frac{4\pi}{\lambda}\sin(\Omega)`. Units = Angstrom**-1 layers : np.ndarray coefficients required for the calculation, has shape (2 + N, 4), where N is the number of layers * layers[0, 1] - SLD of fronting (/ 1e-6 Angstrom**-2) * layers[0, 2] - iSLD of fronting (/ 1e-6 Angstrom**-2) * layers[N, 0] - thickness of layer N * layers[N, 1] - SLD of layer N (/ 1e-6 Angstrom**-2) * layers[N, 2] - iSLD of layer N (/ 1e-6 Angstrom**-2) * layers[N, 3] - roughness between layer N-1/N * layers[-1, 1] - SLD of backing (/ 1e-6 Angstrom**-2) * layers[-1, 2] - iSLD of backing (/ 1e-6 Angstrom**-2) * layers[-1, 3] - roughness between backing and last layer scale : float Multiply all reflectivities by this value. bkg : float Linear background to be added to all reflectivities workers : int Specifies the number of threads for parallel calculation. This option is only applicable if you are using the ``_creflect`` module. The option is ignored if using the pure python calculator, ``_reflect``. If `workers == 0` then all available processors are used. Returns ------- Reflectivity: np.ndarray Calculated reflectivity values for each q value. """ return refcalc.abeles(q, layers, scale=scale, bkg=bkg, workers=workers)
def abeles(q, layers, scale=1, bkg=0., parallel=True): """ Abeles matrix formalism for calculating reflectivity from a stratified medium. Parameters ---------- q: array_like the q values required for the calculation. Q = 4 * Pi / lambda * sin(omega). Units = Angstrom**-1 layers: np.ndarray coefficients required for the calculation, has shape (2 + N, 4), where N is the number of layers * layers[0, 1] - SLD of fronting (/ 1e-6 Angstrom**-2) * layers[0, 2] - iSLD of fronting (/ 1e-6 Angstrom**-2) * layers[N, 0] - thickness of layer N * layers[N, 1] - SLD of layer N (/ 1e-6 Angstrom**-2) * layers[N, 2] - iSLD of layer N (/ 1e-6 Angstrom**-2) * layers[N, 3] - roughness between layer N-1/N * layers[-1, 1] - SLD of backing (/ 1e-6 Angstrom**-2) * layers[-1, 2] - iSLD of backing (/ 1e-6 Angstrom**-2) * layers[-1, 3] - roughness between backing and last layer scale: float Multiply all reflectivities by this value. bkg: float Linear background to be added to all reflectivities parallel: bool Do you want to calculate in parallel? This option is only applicable if you are using the ``_creflect`` module. The option is ignored if using the pure python calculator, ``_reflect``. Returns ------- Reflectivity: np.ndarray Calculated reflectivity values for each q value. """ return refcalc.abeles(q, layers, scale=scale, bkg=bkg, parallel=parallel)
def reflectivity(q, coefs, *args, **kwds): """ Abeles matrix formalism for calculating reflectivity from a stratified medium. Parameters ---------- q : np.ndarray The qvalues required for the calculation. Q=4*Pi/lambda * sin(omega). Units = Angstrom**-1 coefs : np.ndarray * coefs[0] = number of layers, N * coefs[1] = scale factor * coefs[2] = SLD of fronting (/1e-6 Angstrom**-2) * coefs[3] = iSLD of fronting (/1e-6 Angstrom**-2) * coefs[4] = SLD of backing * coefs[5] = iSLD of backing * coefs[6] = background * coefs[7] = roughness between backing and layer N * coefs[4 * (N - 1) + 8] = thickness of layer N in Angstrom (layer 1 is closest to fronting) * coefs[4 * (N - 1) + 9] = SLD of layer N (/ 1e-6 Angstrom**-2) * coefs[4 * (N - 1) + 10] = iSLD of layer N (/ 1e-6 Angstrom**-2) * coefs[4 * (N - 1) + 11] = roughness between layer N and N-1. kwds : dict, optional The following keys are used: 'dqvals' - float or np.ndarray, optional If dqvals is a float, then a constant dQ/Q resolution smearing is employed. For 5% resolution smearing supply 5. If `dqvals` is the same shape as q, then the array contains the FWHM of a Gaussian approximated resolution kernel. Point by point resolution smearing is employed. Use this option if dQ/Q varies across your dataset. If `dqvals.ndim == q.ndim + 2` and `q.shape == dqvals[..., -3].shape` then an individual resolution kernel is applied to each measurement point. This resolution kernel is a probability distribution function (PDF). `dqvals` will have the shape (qvals.shape, M, 2). There are `M` points in the kernel. `dqvals[..., 0]` holds the q values for the kernel, `dqvals[..., 1]` gives the corresponding probability. 'quad_order' - int, optional the order of the Gaussian quadrature polynomial for doing the resolution smearing. default = 17. Don't choose less than 13. If quad_order == 'ultimate' then adaptive quadrature is used. Adaptive quadrature will always work, but takes a _long_ time (2 or 3 orders of magnitude longer). Fixed quadrature will always take a lot less time. BUT it won't necessarily work across all samples. For example, 13 points may be fine for a thin layer, but will be atrocious at describing a multilayer with bragg peaks. 'parallel': bool, optional Do you want to calculate in parallel? This option is only applicable if you are using the ``_creflect`` module. The option is ignored if using the pure python calculator, ``_reflect``. The default is `True`. """ parallel=True if 'parallel' in kwds: parallel = kwds['parallel'] qvals = q quad_order = 17 scale = coefs[1] bkg = coefs[6] if not is_proper_abeles_input(coefs): raise ValueError('The size of the parameter array passed to reflectivity' ' should be 4 * coefs[0] + 8') # make into form suitable for reflection calculation w = coefs_to_layer(coefs) if 'quad_order' in kwds: quad_order = kwds['quad_order'] if 'dqvals' in kwds and kwds['dqvals'] is not None: dqvals = kwds['dqvals'] # constant dq/q smearing if isinstance(dqvals, numbers.Real): dqvals = float(dqvals) return (scale * _smeared_abeles_constant(qvals, w, dqvals, parallel=parallel)) + bkg # point by point resolution smearing if dqvals.size == qvals.size: dqvals_flat = dqvals.flatten() qvals_flat = q.flatten() # adaptive quadrature if quad_order == 'ultimate': smeared_rvals = (scale * _smeared_abeles_adaptive(qvals_flat, w, dqvals_flat, parallel=parallel) + bkg) return smeared_rvals.reshape(q.shape) # fixed order quadrature else: smeared_rvals = (scale * _smeared_abeles_fixed( qvals_flat, w, dqvals_flat, quad_order=quad_order, parallel=parallel) + bkg) return np.reshape(smeared_rvals, q.shape) # resolution kernel smearing elif (dqvals.ndim == qvals.ndim + 2 and dqvals.shape[0: qvals.ndim] == qvals.shape): # TODO may not work yet. qvals_for_res = dqvals[..., 0] # work out the reflectivity at the kernel evaluation points smeared_rvals = refcalc.abeles(qvals_for_res, w, scale=coefs[1], bkg=coefs[6], parallel=parallel) # multiply by probability smeared_rvals *= dqvals[..., 1] # now do simpson integration return scipy.integrate.simps(smeared_rvals, x=dqvals[..., 0]) # no smearing return refcalc.abeles(q, w, scale=coefs[1], bkg=coefs[6], parallel=parallel)