def _nearest_intensity(dT, data): index = util.nearest(dT, data.angular_resolution) if (abs(dT - data.angular_resolution[index]) > ACCEPTABLE_DIVERGENCE_DIFFERENCE).any(): raise ValueError( "polarization cross sections for direct beam are not aligned") return U(data.v, data.dv)[index]
def plot_sa(data): """ Plot spin asymmetry data. """ from matplotlib import pyplot as plt from uncertainties.unumpy import uarray as U, nominal_values, std_devs from ..refldata import Intent # TODO: interp doesn't test for matching resolution data = dict((d.polarization, d) for d in data) pp, mm = data['++'], data['--'] v_pp = U(pp.v, pp.dv) v_mm = interp(pp.x, mm.x, U(mm.v, mm.dv)) sa = (v_pp - v_mm) / (v_pp + v_mm) v, dv = nominal_values(sa), std_devs(sa) plt.errorbar(pp.x, v, yerr=dv, fmt='.', label=pp.name) plt.xlabel("%s (%s)" % (pp.xlabel, pp.xunits) if pp.xunits else pp.xlabel) plt.ylabel(r'$(R^{++} -\, R^{--}) / (R^{++} +\, R^{--})$')
def _apply_correction(data, dtheta, Hinv, use_pm, use_mp): """Apply the efficiency correction in eff to the data.""" # Identify active cross sections if use_pm and use_mp: parts = ALL_XS elif use_mp: parts = MP_XS elif use_pm: parts = PM_XS else: parts = NSF_XS # TODO: interpolation with mixed resolution # Interpolate data so that it aligns with ++. If smoothing is # desired, apply the interpolated smoothing before calling polcor, # in which case the interpolation does nothing. assert parts[0] == '++' x = data['++'].Qz y = [U(data['++'].v, data['++'].dv)] for p in parts[1:]: px = data[p].Qz py = U(data[p].v, data[p].dv) y.append(interp(x, px, py, left=np.NaN, right=np.NaN)) Y = np.vstack(y) # Look up correction matrix for each point using the ++ cross section correction_index = util.nearest(data['++'].angular_resolution, dtheta) # Apply the correction at each point X, dX = np.zeros(Y.shape), np.zeros(Y.shape) for point, polarization in enumerate(correction_index): x = Hinv[polarization] * UM(Y[:, point]).T X[:, point], dX[:, point] = nominal_values(x).flat, std_devs(x).flat # Put the corrected intensities back into the datasets # interpolate back to the original Qz in that dataset: for k, xs in enumerate(parts): x = data[xs].Qz px = data['++'].Qz py = U(X[k, :], dX[k, :]) y = interp(x, px, py, left=np.NaN, right=np.NaN) data[xs].v, data[xs].dv = nominal_values(y), std_devs(y) data[xs].vlabel = 'counts per incident count' data[xs].vunits = None
def clip_reflred_err1d(field, low, high, nanval=0.): """ Clip the values to the range, returning the indices of the values which were clipped. Note that this modifies field in place. NaN values are clipped to the nanval default. *field* is dataflow.uncertainty array, whose values retain their variance even if they are forced within the bounds. *low*, *high* and *nanval* are floats. """ index = np.isnan(field.x) field[index] = U(nanval, field.variance[index]) reject = index index = field.x < low field[index] = U(low, field.variance[index]) reject |= index index = field.x > high field[index] = U(high, field.variance[index]) reject |= index return reject
def _apply_correction(dtheta, Hinv, data, spinflip=True): """Apply the efficiency correction in eff to the data.""" # Get the intensities from the datasets # TODO: need to interpolate data so that it aligns with ++ # If smoothing is desired, apply the smoothing before calling polcor parts = (ALL_XS if spinflip else NSF_XS) Y = np.vstack([U(data[xs].v, data[xs].dv) for xs in parts]) # Look up correction matrix for each point using the ++ cross section index = util.nearest(data['++'].angular_resolution, dtheta) # Apply the correction at each point X, dX = np.zeros(Y.shape), np.zeros(Y.shape) for i,idx in enumerate(index): x = Hinv[idx] * UM(Y[:,i]).T X[:,i], dX[:,i] = nominal_values(x).flat, std_devs(x).flat # Put the corrected intensities back into the datasets for i, xs in enumerate(parts): data[xs].v, data[xs].dv = X[i,:], dX[i,:] data[xs].vlabel = 'Reflectivity' data[xs].vunits = None
def _interp_intensity(dT, data): return interp(dT, data.angular_resolution, U(data.v, data.dv))
def clip_reflred_err1d(field,low,high,nanval=0.): idx = np.isnan(field.x); field[idx] = U(nanval,field.variance[idx]); reject = idx idx = field.x<low; field[idx] = U(low,field.variance[idx]); reject |= idx idx = field.x>high; field[idx] = U(high,field.variance[idx]); reject |= idx return reject
def _interp_intensity(dT, data): #return util.nearest(dT, data.angular_resolution, U(data.v, data.dv)) return util.interp(dT, data.angular_resolution, U(data.v, data.dv))