def burst(self, wavelength0, wavelength): """ Calculates pdf(wavelength) for burst time component Parameters ---------- wavelength0: float Nominal wavelength, Angstrom wavelength: float Wavelength, Angstrom Returns ------- pdf: float Probability density function at `wavelength` """ # burst time tc = general.tauC(wavelength0, xsi=self.xsi, z0=self.z0 / 1000, freq=self.freq) # time of flight TOF = self.L / 1000.0 / general.wavelength_velocity(wavelength0) width = tc / TOF * wavelength0 return stats.uniform.pdf(wavelength - wavelength0, -width / 2, width)
def width(self, wavelength0): # burst width tc = general.tauC(wavelength0, xsi=self.xsi, z0=self.z0 / 1000, freq=self.freq) # time of flight TOF = self.L / 1000. / general.wavelength_velocity(wavelength0) width = tc / TOF * wavelength0 # da width width += self._da * wavelength0 # crossing width tau_h = self.H / self.R / (2 * np.pi * self.freq) width += tau_h / TOF * wavelength0 return width
def crossing(self, wavelength0, wavelength): """ Calculates pdf(wavelength) due to crossing time Parameters ---------- wavelength0: float nominal wavelength, Angstrom wavelength: float Wavelength, Angstrom Returns ------- pdf: float Probability density function at `wavelength` """ tau_h = self.H / self.R / (2 * np.pi * self.freq) TOF = self.L / 1000.0 / general.wavelength_velocity(wavelength0) width = tau_h / TOF * wavelength0 return stats.uniform.pdf(wavelength - wavelength0, -width / 2, width)
def _reduce_single_angle(self, scale=1): """ Reduce a single angle. """ n_spectra = self.reflected_beam.n_spectra n_tpixels = np.size(self.reflected_beam.m_topandtail, 1) n_ypixels = np.size(self.reflected_beam.m_topandtail, 2) # calculate omega and two_theta depending on the mode. mode = self.reflected_beam.mode # we'll need the wavelengths to calculate Q. wavelengths = self.reflected_beam.m_lambda m_twotheta = np.zeros((n_spectra, n_tpixels, n_ypixels)) detector_z_difference = (self.reflected_beam.detector_z - self.direct_beam.detector_z) beampos_z_difference = (self.reflected_beam.m_beampos - self.direct_beam.m_beampos) Y_PIXEL_SPACING = self.reflected_beam.cat.y_pixels_per_mm[0] total_z_deflection = (detector_z_difference + beampos_z_difference * Y_PIXEL_SPACING) if mode in ['FOC', 'POL', 'POLANAL', 'MT']: # omega_nom.shape = (N, ) omega_nom = np.degrees( np.arctan(total_z_deflection / self.reflected_beam.detector_y) / 2.) ''' Wavelength specific angle of incidence correction This involves: 1) working out the trajectory of the neutrons through the collimation system. 2) where those neutrons intersect the sample. 3) working out the elevation of the neutrons when they hit the sample. 4) correcting the angle of incidence. ''' speeds = general.wavelength_velocity(wavelengths) collimation_distance = self.reflected_beam.cat.collimation_distance s2_sample_distance = (self.reflected_beam.cat.sample_distance - self.reflected_beam.cat.slit2_distance) # work out the trajectories of the neutrons for them to pass # through the collimation system. trajectories = find_trajectory(collimation_distance / 1000., 0, speeds) # work out where the beam hits the sample res = parabola_line_intersection_point(s2_sample_distance / 1000, 0, trajectories, speeds, omega_nom[:, np.newaxis]) intersect_x, intersect_y, x_prime, elevation = res # correct the angle of incidence with a wavelength dependent # elevation. omega_corrected = omega_nom[:, np.newaxis] - elevation m_twotheta += np.arange(n_ypixels * 1.)[np.newaxis, np.newaxis, :] m_twotheta -= self.direct_beam.m_beampos[:, np.newaxis, np.newaxis] m_twotheta *= Y_PIXEL_SPACING m_twotheta += detector_z_difference m_twotheta /= (self.reflected_beam.detector_y[:, np.newaxis, np.newaxis]) m_twotheta = np.arctan(m_twotheta) m_twotheta = np.degrees(m_twotheta) # you may be reflecting upside down, reverse the sign. upside_down = np.sign(omega_corrected[:, 0]) m_twotheta *= upside_down[:, np.newaxis, np.newaxis] omega_corrected *= upside_down[:, np.newaxis] elif mode in ['SB', 'DB']: # the angle of incidence is half the two theta of the reflected # beam omega = np.arctan( total_z_deflection / self.reflected_beam.detector_y) / 2. # work out two theta for each of the detector pixels m_twotheta += np.arange(n_ypixels * 1.)[np.newaxis, np.newaxis, :] m_twotheta -= self.direct_beam.m_beampos[:, np.newaxis, np.newaxis] m_twotheta += detector_z_difference m_twotheta -= ( self.reflected_beam.detector_y[:, np.newaxis, np.newaxis] * np.tan(omega[:, np.newaxis, np.newaxis])) m_twotheta /= (self.reflected_beam.detector_y[:, np.newaxis, np.newaxis]) m_twotheta = np.arctan(m_twotheta) m_twotheta += omega[:, np.newaxis, np.newaxis] # still in radians at this point # add an extra dimension, because omega_corrected needs to be the # angle of incidence for each wavelength. I.e. should be # broadcastable to (N, T) omega_corrected = np.degrees(omega)[:, np.newaxis] m_twotheta = np.degrees(m_twotheta) ''' --Specular Reflectivity-- Use the (constant wavelength) spectra that have already been integrated over 2theta (in processnexus) to calculate the specular reflectivity. Beware: this is because m_topandtail has already been divided through by monitor counts and error propagated (at the end of processnexus). Thus, the 2theta pixels are correlated to some degree. If we use the 2D plot to calculate reflectivity (sum {Iref_{2theta, lambda}}/I_direct_{lambda}) then the error bars in the reflectivity turn out much larger than they should be. ''' ydata, ydata_sd = EP.EPdiv(self.reflected_beam.m_spec, self.reflected_beam.m_spec_sd, self.direct_beam.m_spec, self.direct_beam.m_spec_sd) # calculate the 1D Qz values. xdata = general.q(omega_corrected, wavelengths) xdata_sd = (self.reflected_beam.m_lambda_fwhm / self.reflected_beam.m_lambda)**2 xdata_sd += (self.reflected_beam.domega[:, np.newaxis] / omega_corrected)**2 xdata_sd = np.sqrt(xdata_sd) * xdata ''' ---Offspecular reflectivity--- normalise the counts in the reflected beam by the direct beam spectrum this gives a reflectivity. Also propagate the errors, leaving the fractional variance (dr/r)^2. --Note-- that adjacent y-pixels (same wavelength) are correlated in this treatment, so you can't just sum over them. i.e. (c_0 / d) + ... + c_n / d) != (c_0 + ... + c_n) / d ''' m_ref, m_ref_sd = EP.EPdiv( self.reflected_beam.m_topandtail, self.reflected_beam.m_topandtail_sd, self.direct_beam.m_spec[:, :, np.newaxis], self.direct_beam.m_spec_sd[:, :, np.newaxis]) # you may have had divide by zero's. m_ref = np.where(np.isinf(m_ref), 0, m_ref) m_ref_sd = np.where(np.isinf(m_ref_sd), 0, m_ref_sd) # calculate the Q values for the detector pixels. Each pixel has # different 2theta and different wavelength, ASSUME that they have the # same angle of incidence qx, qy, qz = general.q2(omega_corrected[:, :, np.newaxis], m_twotheta, 0, wavelengths[:, :, np.newaxis]) reduction = {} reduction['x'] = self.x = xdata reduction['x_err'] = self.x_err = xdata_sd reduction['y'] = self.y = ydata / scale reduction['y_err'] = self.y_err = ydata_sd / scale reduction['omega'] = omega_corrected reduction['m_twotheta'] = m_twotheta reduction['m_ref'] = self.m_ref = m_ref reduction['m_ref_err'] = self.m_ref_err = m_ref_sd reduction['qz'] = self.m_qz = qz reduction['qx'] = self.m_qx = qx reduction['nspectra'] = self.n_spectra = n_spectra reduction['start_time'] = self.reflected_beam.start_time reduction['datafile_number'] = self.datafile_number = ( self.reflected_beam.datafile_number) fnames = [] datasets = [] datafilename = self.reflected_beam.datafilename datafilename = os.path.basename(datafilename.split('.nx.hdf')[0]) for i in range(n_spectra): data_tup = self.data(scanpoint=i) datasets.append(ReflectDataset(data_tup)) if self.save: for i, dataset in enumerate(datasets): fname = '{0}_{1}.dat'.format(datafilename, i) fnames.append(fname) with open(fname, 'wb') as f: dataset.save(f) fname = '{0}_{1}.xml'.format(datafilename, i) with open(fname, 'wb') as f: dataset.save_xml(f, start_time=reduction['start_time'][i]) reduction['fname'] = fnames return datasets, deepcopy(reduction)
def _reduce_single_angle(self, scale=1): """ Reduce a single angle. """ n_spectra = self.reflected_beam.n_spectra n_tpixels = np.size(self.reflected_beam.m_topandtail, 1) n_ypixels = np.size(self.reflected_beam.m_topandtail, 2) # calculate omega and two_theta depending on the mode. mode = self.reflected_beam.mode # we'll need the wavelengths to calculate Q. wavelengths = self.reflected_beam.m_lambda m_twotheta = np.zeros((n_spectra, n_tpixels, n_ypixels)) if mode in ['FOC', 'POL', 'POLANAL', 'MT']: detector_z_difference = (self.reflected_beam.detector_z - self.direct_beam.detector_z) beampos_z_difference = (self.reflected_beam.m_beampos - self.direct_beam.m_beampos) total_z_deflection = (detector_z_difference + beampos_z_difference * Y_PIXEL_SPACING) # omega_nom.shape = (N, ) omega_nom = np.degrees(np.arctan(total_z_deflection / self.reflected_beam.detector_y) / 2.) ''' Wavelength specific angle of incidence correction This involves: 1) working out the trajectory of the neutrons through the collimation system. 2) where those neutrons intersect the sample. 3) working out the elevation of the neutrons when they hit the sample. 4) correcting the angle of incidence. ''' speeds = general.wavelength_velocity(wavelengths) collimation_distance = self.reflected_beam.cat.collimation_distance s2_sample_distance = (self.reflected_beam.cat.sample_distance - self.reflected_beam.cat.slit2_distance) # work out the trajectories of the neutrons for them to pass # through the collimation system. trajectories = pm.find_trajectory(collimation_distance / 1000., 0, speeds) # work out where the beam hits the sample res = pm.parabola_line_intersection_point(s2_sample_distance / 1000, 0, trajectories, speeds, omega_nom[:, np.newaxis]) intersect_x, intersect_y, x_prime, elevation = res # correct the angle of incidence with a wavelength dependent # elevation. omega_corrected = omega_nom[:, np.newaxis] - elevation elif mode == 'SB' or mode == 'DB': omega = self.reflected_beam.M_beampos + self.reflected_beam.detectorZ[:, np.newaxis] omega -= self.direct_beam.M_beampos + self.direct_beam.detectorZ omega /= 2 * self.reflected_beam.detectorY[:, np.newaxis, np.newaxis] omega = np.arctan(omega) m_twotheta += np.arange(n_ypixels * 1.)[np.newaxis, np.newaxis, :] * Y_PIXEL_SPACING m_twotheta += self.reflected_beam.detectorZ[:, np.newaxis, np.newaxis] m_twotheta -= self.direct_beam.M_beampos[:, :, np.newaxis] + self.direct_beam.detectorZ m_twotheta -= self.reflected_beam.detectorY[:, np.newaxis, np.newaxis] * np.tan(omega[:, :, np.newaxis]) m_twotheta /= self.reflected_beam.detectorY[:, np.newaxis, np.newaxis] m_twotheta = np.arctan(m_twotheta) m_twotheta += omega[:, :, np.newaxis] ''' --Specular Reflectivity-- Use the (constant wavelength) spectra that have already been integrated over 2theta (in processnexus) to calculate the specular reflectivity. Beware: this is because m_topandtail has already been divided through by monitor counts and error propagated (at the end of processnexus). Thus, the 2theta pixels are correlated to some degree. If we use the 2D plot to calculate reflectivity (sum {Iref_{2theta, lambda}}/I_direct_{lambda}) then the error bars in the reflectivity turn out much larger than they should be. ''' ydata, ydata_sd = EP.EPdiv(self.reflected_beam.m_spec, self.reflected_beam.m_spec_sd, self.direct_beam.m_spec, self.direct_beam.m_spec_sd) # calculate the 1D Qz values. xdata = general.q(omega_corrected, wavelengths) xdata_sd = (self.reflected_beam.m_lambda_fwhm / self.reflected_beam.m_lambda) ** 2 xdata_sd += (self.reflected_beam.domega[:, np.newaxis] / omega_corrected) ** 2 xdata_sd = np.sqrt(xdata_sd) * xdata ''' ---Offspecular reflectivity--- normalise the counts in the reflected beam by the direct beam spectrum this gives a reflectivity. Also propagate the errors, leaving the fractional variance (dr/r)^2. --Note-- that adjacent y-pixels (same wavelength) are correlated in this treatment, so you can't just sum over them. i.e. (c_0 / d) + ... + c_n / d) != (c_0 + ... + c_n) / d ''' m_ref, m_ref_sd = EP.EPdiv(self.reflected_beam.m_topandtail, self.reflected_beam.m_topandtail_sd, self.direct_beam.m_spec[:, :, np.newaxis], self.direct_beam.m_spec_sd[:, :, np.newaxis]) # you may have had divide by zero's. m_ref = np.where(np.isinf(m_ref), 0, m_ref) m_ref_sd = np.where(np.isinf(m_ref_sd), 0, m_ref_sd) # calculate the Q values for the detector pixels. Each pixel has # different 2theta and different wavelength, ASSUME that they have the # same angle of incidence qx, qy, qz = general.q2(omega_corrected[:, :, np.newaxis], m_twotheta, 0, wavelengths[:, :, np.newaxis]) reduction = {} reduction['xdata'] = self.xdata = xdata reduction['xdata_sd'] = self.xdata_sd = xdata_sd reduction['ydata'] = self.ydata = ydata reduction['ydata_sd'] = self.ydata_sd = ydata_sd reduction['m_ref'] = self.m_ref = m_ref reduction['m_ref_sd'] = self.m_ref_sd = m_ref_sd reduction['qz'] = self.m_qz = qz reduction['qy'] = self.m_qy = qy reduction['nspectra'] = self.n_spectra = n_spectra reduction['datafile_number'] = self.datafile_number = ( self.reflected_beam.datafile_number) fnames = [] if self.save: for i in range(n_spectra): data_tup = self.data(scanpoint=i) dataset = ReflectDataset(data_tup) fname = 'PLP{0:07d}_{1}.dat'.format(self.datafile_number, i) fnames.append(fname) with open(fname, 'wb') as f: dataset.save(f) fname = 'PLP{0:07d}_{1}.xml'.format(self.datafile_number, i) with open(fname, 'wb') as f: dataset.save_xml(f) reduction['fname'] = fnames return deepcopy(reduction)
def sample(self, samples, random_state=None): """ Sample the beam for reflected signal. 2400000 samples roughly corresponds to 1200 sec of *PLATYPUS* using dlambda=3.3 and dtheta=3.3 at angle=0.65. 150000000 samples roughly corresponds to 3600 sec of *PLATYPUS* using dlambda=3.3 and dtheta=3.3 at angle=3.0. (The sample number <--> actual acquisition time correspondence has not been checked fully) Parameters ---------- samples: int How many samples to run. random_state: {int, `~np.random.RandomState`, `~np.random.Generator`}, optional If `random_state` is not specified the `~np.random.RandomState` singleton is used. If `random_state` is an int, a new ``RandomState`` instance is used, seeded with seed. If `random_state` is already a ``RandomState`` or a ``Generator`` instance, then that object is used. Specify `random_state` for repeatable minimizations. """ # grab a random number generator rng = check_random_state(random_state) # generate neutrons of various wavelengths wavelengths = self.spectrum_dist.rvs(size=samples, random_state=rng) # generate neutrons of different angular divergence angles = self.angular_dist.rvs(samples, random_state=rng) + self.angle # angular deviation due to gravity # --> no correction for gravity affecting width of angular resolution if self.gravity: speeds = general.wavelength_velocity(wavelengths) # trajectories through slits for different wavelengths trajectories = pm.find_trajectory(self.L12 / 1000.0, 0, speeds) # elevation at sample elevations = pm.elevation( trajectories, speeds, (self.L12 + self.L2S) / 1000.0 ) angles -= elevations # calculate Q q = general.q(angles, wavelengths) # calculate reflectivities for a neutron of a given Q. # the angular resolution smearing has already been done. The wavelength # resolution smearing follows. r = self.model(q, x_err=0.0) # accept or reject neutrons based on the reflectivity of # sample at a given Q. criterion = rng.uniform(size=samples) accepted = criterion < r # implement wavelength smearing from choppers. Jitter the wavelengths # by a uniform distribution whose full width is dlambda / 0.68. if self.force_gaussian: noise = rng.standard_normal(size=samples) jittered_wavelengths = wavelengths * ( 1 + self.dlambda / 2.3548 * noise ) else: noise = rng.uniform(-0.5, 0.5, size=samples) jittered_wavelengths = wavelengths * ( 1 + self.dlambda / 0.68 * noise ) # update reflected beam counts. Rebin smearing # is taken into account due to the finite size of the wavelength # bins. hist = np.histogram( jittered_wavelengths[accepted], self.wavelength_bins ) self.reflected_beam += hist[0] self.bmon_reflect += float(samples) # update resolution kernel. If we have more than 100000 in all # bins skip if ( len(self._res_kernel) and np.min([len(v) for v in self._res_kernel.values()]) > 500000 ): return bin_loc = np.digitize(jittered_wavelengths, self.wavelength_bins) for i in range(1, len(self.wavelength_bins)): # extract q values that fall in each wavelength bin q_for_bin = np.copy(q[bin_loc == i]) q_samples_so_far = self._res_kernel.get(i - 1, np.array([])) updated_samples = np.concatenate((q_samples_so_far, q_for_bin)) # no need to keep double precision for these sample arrays self._res_kernel[i - 1] = updated_samples.astype(np.float32)
def __init__( self, model, angle, L12=2859, footprint=60, L2S=120, dtheta=3.3, lo_wavelength=2.8, hi_wavelength=18, dlambda=3.3, rebin=2, gravity=False, force_gaussian=False, force_uniform_wavelength=False, ): self.model = model self.bkg = model.bkg.value self.angle = angle # dlambda refers to the FWHM of the gaussian approximation to a uniform # distribution. The full width of the uniform distribution is # dlambda/0.68. self.dlambda = dlambda / 100.0 # the rebin percentage refers to the full width of the bins. You have to # multiply this value by 0.68 to get the equivalent contribution to the # resolution function. self.rebin = rebin / 100.0 self.wavelength_bins = calculate_wavelength_bins( lo_wavelength, hi_wavelength, rebin ) bin_centre = 0.5 * ( self.wavelength_bins[1:] + self.wavelength_bins[:-1] ) # angular deviation due to gravity # --> no correction for gravity affecting width of angular resolution elevations = 0 if gravity: speeds = general.wavelength_velocity(bin_centre) # trajectories through slits for different wavelengths trajectories = pm.find_trajectory(L12 / 1000.0, 0, speeds) # elevation at sample elevations = pm.elevation( trajectories, speeds, (L12 + L2S) / 1000.0 ) # nominal Q values self.q = general.q(angle - elevations, bin_centre) # keep a tally of the direct and reflected beam self.direct_beam = np.zeros((self.wavelength_bins.size - 1)) self.reflected_beam = np.zeros((self.wavelength_bins.size - 1)) # beam monitor counts for normalisation self.bmon_direct = 0 self.bmon_reflect = 0 self.gravity = gravity # wavelength generator self.force_uniform_wavelength = force_uniform_wavelength if force_uniform_wavelength: self.spectrum_dist = uniform( loc=lo_wavelength - 1, scale=hi_wavelength - lo_wavelength + 1 ) else: a = PN("PLP0000711.nx.hdf") q, i, di = a.process( normalise=False, normalise_bins=False, rebin_percent=0.5, lo_wavelength=max(0, lo_wavelength - 1), hi_wavelength=hi_wavelength + 1, ) q = q.squeeze() i = i.squeeze() self.spectrum_dist = SpectrumDist(q, i) self.force_gaussian = force_gaussian # angular resolution generator, based on a trapezoidal distribution # The slit settings are the optimised set typically used in an # experiment. dtheta/theta refers to the FWHM of a Gaussian # approximation to a trapezoid. # stores the q vectors contributing towards each datapoint self._res_kernel = {} self._min_samples = 0 self.dtheta = dtheta / 100.0 self.footprint = footprint self.angle = angle self.L2S = L2S self.L12 = L12 s1, s2 = general.slit_optimiser( footprint, self.dtheta, angle=angle, L2S=L2S, L12=L12, verbose=False, ) div, alpha, beta = general.div(s1, s2, L12=L12) self.div, self.s1, self.s2 = s1, s2, div if force_gaussian: self.angular_dist = norm(scale=div / 2.3548) else: self.angular_dist = trapz( c=(alpha - beta) / 2.0 / alpha, d=(alpha + beta) / 2.0 / alpha, loc=-alpha, scale=2 * alpha, )
def test_wavelength_velocity(self): speed = general.wavelength_velocity(20.) assert_almost_equal(speed, 197.8017006541796, 5)
def process( self, h5norm=None, lo_wavelength=2.5, hi_wavelength=19.0, background=True, direct=False, omega=None, twotheta=None, rebin_percent=1.0, wavelength_bins=None, normalise=True, integrate=-1, eventmode=None, peak_pos=None, background_mask=None, normalise_bins=True, **kwds ): """ Processes the ProcessNexus object to produce a time of flight spectrum. The processed spectrum is stored in the `processed_spectrum` attribute. The specular spectrum is also returned from this function. Parameters ---------- h5norm : HDF5 NeXus file The hdf5 file containing the floodfield data. lo_wavelength : float The low wavelength cutoff for the rebinned data (A). hi_wavelength : float The high wavelength cutoff for the rebinned data (A). background : bool Should a background subtraction be carried out? direct : bool Is it a direct beam you measured? This is so a gravity correction can be applied. omega : float Expected angle of incidence of beam. If this is None, then the rough angle of incidence is obtained from the NeXus file. twotheta : float Expected two theta value of specular beam. If this is None then the rough angle of incidence is obtained from the NeXus file. rebin_percent : float Specifies the rebinning percentage for the spectrum. If `rebin_percent is None`, then no rebinning is done. wavelength_bins : array_like The wavelength bins for rebinning. If `wavelength_bins is not None` then the `rebin_percent` parameter is ignored. normalise : bool Normalise by the monitor counts. integrate : int - integrate == -1 the spectrum is integrated over all the scanpoints. - integrate >= 0 the individual spectra are calculated individually. If `eventmode is not None` then integrate specifies which scanpoint to examine. eventmode : None or array_like If eventmode is `None` then the integrated detector image is used. If eventmode is an array then the array specifies the integration times (in seconds) for the detector image, e.g. [0, 20, 30] would result in two spectra. The first would contain data for 0 s to 20s, the second would contain data for 20 s to 30 s. This option can only be used when `integrate >= -1`. If eventmode has zero length (e.g. []), then a single time interval for the entire acquisition is used, [0, acquisition_time]. This would source the image from the eventmode file, rather than the NeXUS file. peak_pos : None or (float, float) Specifies the peak position and peak standard deviation to use. background_mask : array_like An array of bool that specifies which y-pixels to use for background subtraction. Should be the same length as the number of y pixels in the detector image. Otherwise an automatic mask is applied (if background is True). normalise_bins : bool Divides the intensity in each wavelength bin by the width of the bin. This allows one to compare spectra even if they were processed with different rebin percentages. Notes ----- After processing this object contains the following the following attributes: - path - path to the data file - datafilename - name of the datafile - datafile_number - datafile number. - m_topandtail - the corrected 2D detector image, (n_spectra, TOF, Y) - m_topandtail_sd - corresponding standard deviations - n_spectra - number of spectra in processed data - bm1_counts - beam montor counts, (n_spectra,) - m_spec - specular intensity, (n_spectra, TOF) - m_spec_sd - corresponding standard deviations - m_beampos - beam_centre for each spectrum, (n_spectra, ) - m_lambda - wavelengths for each spectrum, (n_spectra, TOF) - m_lambda_fwhm - corresponding FWHM of wavelength distribution - m_lambda_hist - wavelength bins for each spectrum, (n_spectra, TOF) - m_spec_tof - TOF for each wavelength bin, (n_spectra, TOF) - mode - the Platypus mode, e.g. FOC/MT/POL/POLANAL/SB/DB - detector_z - detector height, (n_spectra, ) - detector_y - sample-detector distance, (n_spectra, ) - domega - collimation uncertainty - lopx - lowest extent of specular beam (in y pixels), (n_spectra, ) - hipx - highest extent of specular beam (in y pixels), (n_spectra, ) Returns ------- m_lambda, m_spec, m_spec_sd: np.ndarray Arrays containing the wavelength, specular intensity as a function of wavelength, standard deviation of specular intensity """ cat = self.cat scanpoint = 0 # beam monitor counts for normalising data bm1_counts = cat.bm1_counts.astype("float64") # TOF bins TOF = cat.t_bins.astype("float64") # This section controls how multiple detector images are handled. # We want event streaming. if eventmode is not None: scanpoint = integrate if integrate == -1: scanpoint = 0 output = self.process_event_stream(scanpoint=scanpoint, frame_bins=eventmode) frame_bins, detector, bm1_counts = output else: # we don't want detector streaming detector = cat.detector scanpoint = 0 # integrate over all spectra if integrate == -1: detector = np.sum(detector, 0)[np.newaxis,] bm1_counts[:] = np.sum(bm1_counts) n_spectra = np.size(detector, 0) # Up until this point detector.shape=(N, T, Y, # pre-average over x, leaving (n, t, y) also convert to dp detector = np.sum(detector, axis=3, dtype="float64") # detector shape should now be (n, t, y) # calculate the counting uncertainties detector_sd = np.sqrt(detector) bm1_counts_sd = np.sqrt(bm1_counts) # detector normalisation with a water file if h5norm: x_bins = cat.x_bins[scanpoint] # shape (y,) detector_norm, detector_norm_sd = create_detector_norm(h5norm, x_bins[0], x_bins[1]) # detector has shape (N, T, Y), shape of detector_norm should # broadcast to (1, 1, y) # TODO: Correlated Uncertainties? detector, detector_sd = EP.EPdiv(detector, detector_sd, detector_norm, detector_norm_sd) # shape of these is (n_spectra, TOFbins) m_spec_tof_hist = np.zeros((n_spectra, np.size(TOF, 0)), dtype="float64") m_lambda_hist = np.zeros((n_spectra, np.size(TOF, 0)), dtype="float64") m_spec_tof_hist[:] = TOF """ chopper to detector distances note that if eventmode is specified the n_spectra is NOT equal to the number of entries in e.g. /longitudinal_translation this means you have to copy values in from the correct scanpoint """ flight_distance = np.zeros(n_spectra, dtype="float64") d_cx = np.zeros(n_spectra, dtype="float64") detpositions = np.zeros(n_spectra, dtype="float64") # The angular divergence of the instrument domega = np.zeros(n_spectra, dtype="float64") phase_angle = np.zeros(n_spectra, dtype="float64") # process each of the spectra taken in the detector image originalscanpoint = scanpoint for idx in range(n_spectra): freq = cat.frequency[scanpoint] # calculate the angular divergence domega[idx] = general.div( cat.ss2vg[scanpoint], cat.ss3vg[scanpoint], (cat.slit3_distance[0] - cat.slit2_distance[0]) )[0] """ work out the total flight length IMPORTANT: this varies as a function of twotheta. This is because the Platypus detector does not move on an arc. At high angles chod can be ~ 0.75% different. This is will visibly shift fringes. """ if omega is None: omega = cat.omega[scanpoint] if twotheta is None: twotheta = cat.twotheta[scanpoint] output = self.chod(omega, twotheta, scanpoint=scanpoint) flight_distance[idx], d_cx[idx] = output # calculate phase openings output = self.phase_angle(scanpoint) phase_angle[scanpoint], master_opening = output """ toffset - the time difference between the magnet pickup on the choppers (TTL pulse), which is situated in the middle of the chopper window, and the trailing edge of chopper 1, which is supposed to be time0. However, if there is a phase opening this time offset has to be relocated slightly, as time0 is not at the trailing edge. """ poff = cat.chopper1_phase_offset[0] poffset = 1.0e6 * poff / (2.0 * 360.0 * freq) toffset = ( poffset + 1.0e6 * master_opening / 2 / (2 * np.pi) / freq - 1.0e6 * phase_angle[scanpoint] / (360 * 2 * freq) ) m_spec_tof_hist[idx] -= toffset detpositions[idx] = cat.dy[scanpoint] if eventmode is not None: m_spec_tof_hist[:] = TOF - toffset flight_distance[:] = flight_distance[0] detpositions[:] = detpositions[0] break else: scanpoint += 1 scanpoint = originalscanpoint # convert TOF to lambda # m_spec_tof_hist (n, t) and chod is (n,) m_lambda_hist = general.velocity_wavelength(1.0e3 * flight_distance[:, np.newaxis] / m_spec_tof_hist) m_lambda = 0.5 * (m_lambda_hist[:, 1:] + m_lambda_hist[:, :-1]) TOF -= toffset # gravity correction if direct beam if direct: # TODO: Correlated Uncertainties? output = correct_for_gravity( detector, detector_sd, m_lambda, self.cat.collimation_distance, self.cat.dy, lo_wavelength, hi_wavelength, ) detector, detector_sd, m_gravcorrcoefs = output beam_centre, beam_sd = find_specular_ridge(detector, detector_sd) # beam_centre = m_gravcorrcoefs else: beam_centre, beam_sd = find_specular_ridge(detector, detector_sd) # you want to specify the specular ridge on the averaged detector image if peak_pos is not None: beam_centre = np.ones(n_spectra) * peak_pos[0] beam_sd = np.ones(n_spectra) * peak_pos[1] """ Rebinning in lambda for all detector Rebinning is the default option, but sometimes you don't want to. detector shape input is (n, t, y) """ if wavelength_bins is not None: rebinning = wavelength_bins elif 0.0 < rebin_percent < 15.0: rebinning = calculate_wavelength_bins(lo_wavelength, hi_wavelength, rebin_percent) # rebin_percent percentage is zero. No rebinning, just cutoff # wavelength else: rebinning = m_lambda_hist[0, :] rebinning = rebinning[np.searchsorted(rebinning, lo_wavelength) : np.searchsorted(rebinning, hi_wavelength)] """ now do the rebinning for all the N detector images rebin.rebinND could do all of these at once. However, m_lambda_hist could vary across the range of spectra. If it was the same I could eliminate the loop. """ output = [] output_sd = [] for idx in range(n_spectra): # TODO: Correlated Uncertainties? plane, plane_sd = rebin.rebin_along_axis( detector[idx], m_lambda_hist[idx], rebinning, y1_sd=detector_sd[idx] ) output.append(plane) output_sd.append(plane_sd) detector = np.array(output) detector_sd = np.array(output_sd) if len(detector.shape) == 2: detector = detector[np.newaxis,] detector_sd = detector_sd[np.newaxis,] # (1, T) m_lambda_hist = np.atleast_2d(rebinning) """ Divide the detector intensities by the width of the wavelength bin. This is so the intensities between different rebinning strategies can be compared. """ if normalise_bins: div = 1 / np.ediff1d(m_lambda_hist[0])[:, np.newaxis] detector, detector_sd = EP.EPmulk(detector, detector_sd, div) # convert the wavelength base to a timebase m_spec_tof_hist = 0.001 * flight_distance[:, np.newaxis] / general.wavelength_velocity(m_lambda_hist) m_lambda = 0.5 * (m_lambda_hist[:, 1:] + m_lambda_hist[:, :-1]) m_spec_tof = 0.001 * flight_distance[:, np.newaxis] / general.wavelength_velocity(m_lambda) # we want to integrate over the following pixel region lopx = np.floor(beam_centre - beam_sd * EXTENT_MULT).astype("int") hipx = np.ceil(beam_centre + beam_sd * EXTENT_MULT).astype("int") m_spec = np.zeros((n_spectra, np.size(detector, 1))) m_spec_sd = np.zeros_like(m_spec) # background subtraction if background: if background_mask is not None: # background_mask is (Y), need to make 3 dimensional (N, T, Y) # first make into (T, Y) backgnd_mask = np.repeat(background_mask[np.newaxis, :], detector.shape[1], axis=0) # make into (N, T, Y) full_backgnd_mask = np.repeat(backgnd_mask[np.newaxis, :], n_spectra, axis=0) else: # there may be different background regions for each spectrum # in the file y1 = np.round(lopx - PIXEL_OFFSET).astype("int") y0 = np.round(y1 - (EXTENT_MULT * beam_sd)).astype("int") y2 = np.round(hipx + PIXEL_OFFSET).astype("int") y3 = np.round(y2 + (EXTENT_MULT * beam_sd)).astype("int") full_backgnd_mask = np.zeros_like(detector, dtype="bool") for i in range(n_spectra): full_backgnd_mask[i, :, y0[i] : y1[i]] = True full_backgnd_mask[i, :, y2[i] + 1 : y3[i] + 1] = True # TODO: Correlated Uncertainties? detector, detector_sd = background_subtract(detector, detector_sd, full_backgnd_mask) """ top and tail the specular beam with the known beam centres. All this does is produce a specular intensity with shape (N, T), i.e. integrate over specular beam """ for i in range(n_spectra): m_spec[i] = np.sum(detector[i, :, lopx[i] : hipx[i] + 1], axis=1) sd = np.sum(detector_sd[i, :, lopx[i] : hipx[i] + 1] ** 2, axis=1) m_spec_sd[i] = np.sqrt(sd) # assert np.isfinite(m_spec).all() # assert np.isfinite(m_specSD).all() # assert np.isfinite(detector).all() # assert np.isfinite(detectorSD).all() # normalise by beam monitor 1. if normalise: m_spec, m_spec_sd = EP.EPdiv(m_spec, m_spec_sd, bm1_counts[:, np.newaxis], bm1_counts_sd[:, np.newaxis]) output = EP.EPdiv( detector, detector_sd, bm1_counts[:, np.newaxis, np.newaxis], bm1_counts_sd[:, np.newaxis, np.newaxis] ) detector, detector_sd = output """ now work out dlambda/lambda, the resolution contribution from wavelength. van Well, Physica B, 357(2005) pp204-207), eqn 4. this is only an approximation for our instrument, as the 2nd and 3rd discs have smaller openings compared to the master chopper. Therefore the burst time needs to be looked at. """ tau_da = m_spec_tof_hist[:, 1:] - m_spec_tof_hist[:, :-1] m_lambda_fwhm = general.resolution_double_chopper( m_lambda, z0=d_cx[:, np.newaxis] / 1000.0, freq=cat.frequency[:, np.newaxis], L=flight_distance[:, np.newaxis] / 1000.0, H=cat.ss2vg[originalscanpoint] / 1000.0, xsi=phase_angle[:, np.newaxis], tau_da=tau_da, ) m_lambda_fwhm *= m_lambda # put the detector positions and mode into the dictionary as well. detector_z = cat.dz detector_y = cat.dy mode = cat.mode d = dict() d["path"] = cat.path d["datafilename"] = cat.filename d["datafile_number"] = cat.datafile_number if h5norm is not None: d["normfilename"] = h5norm.filename d["m_topandtail"] = detector d["m_topandtail_sd"] = detector_sd d["n_spectra"] = n_spectra d["bm1_counts"] = bm1_counts d["m_spec"] = m_spec d["m_spec_sd"] = m_spec_sd d["m_beampos"] = beam_centre d["m_lambda"] = m_lambda d["m_lambda_fwhm"] = m_lambda_fwhm d["m_lambda_hist"] = m_lambda_hist d["m_spec_tof"] = m_spec_tof d["mode"] = mode d["detector_z"] = detector_z d["detector_y"] = detector_y d["domega"] = domega d["lopx"] = lopx d["hipx"] = hipx self.processed_spectrum = d return m_lambda, m_spec, m_spec_sd
def correct_for_gravity(detector, detector_sd, lamda, coll_distance, sample_det, lo_wavelength, hi_wavelength, theta=0): """ Returns a gravity corrected yt plot, given the data, its associated errors, the wavelength corresponding to each of the time bins, and the trajectory of the neutrons. Low lambda and high Lambda are wavelength cutoffs to ignore. Parameters ---------- detector : np.ndarray Detector image. Has shape (N, T, Y) detector_sd : np.ndarray Standard deviations of detector image lamda : np.ndarray Wavelengths corresponding to the detector image, has shape (N, T) coll_distance : float Collimation distance between slits, mm sample_det : float Sample - detector distance, mm lo_wavelength : float Low wavelength cut off, Angstrom hi_wavelength : float High wavelength cutoff, Angstrom theta : float Angle between second collimation slit, first collimation slit, and horizontal Returns ------- corrected_data, corrected_data_sd, m_gravcorrcoefs : np.ndarray, np.ndarray, np.ndarray Corrected image. This is a theoretical prediction where the spectral ridge is for each wavelength. This will be used to calculate the actual angle of incidence in the reduction process. """ num_lambda = np.size(lamda, axis=1) x_init = np.arange((np.size(detector, axis=2) + 1) * 1.0) - 0.5 m_gravcorrcoefs = np.zeros((np.size(detector, 0)), dtype="float64") corrected_data = np.zeros_like(detector) corrected_data_sd = np.zeros_like(detector) for spec in range(np.size(detector, 0)): neutron_speeds = general.wavelength_velocity(lamda[spec]) trajectories = pm.find_trajectory(coll_distance / 1000.0, theta, neutron_speeds) travel_distance = (coll_distance + sample_det[spec]) / 1000.0 # centres(t,) # TODO, don't use centroids, use Gaussian peak centroids = np.apply_along_axis(ut.centroid, 1, detector[spec]) lopx = np.searchsorted(lamda[spec], lo_wavelength) hipx = np.searchsorted(lamda[spec], hi_wavelength) def f(tru_centre): deflections = pm.y_deflection(trajectories[lopx:hipx], neutron_speeds[lopx:hipx], travel_distance) model = 1000.0 * deflections / Y_PIXEL_SPACING + tru_centre diff = model - centroids[lopx:hipx, 0] diff = diff[~np.isnan(diff)] return diff # find the beam centre for an infinitely fast neutron x0 = np.array([np.nanmean(centroids[lopx:hipx, 0])]) res = leastsq(f, x0) m_gravcorrcoefs[spec] = res[0][0] total_deflection = 1000.0 * pm.y_deflection(trajectories, neutron_speeds, travel_distance) total_deflection /= Y_PIXEL_SPACING x_rebin = x_init.T + total_deflection[:, np.newaxis] for wavelength in range(np.size(detector, axis=1)): output = rebin.rebin( x_init, detector[spec, wavelength], x_rebin[wavelength], y1_sd=detector_sd[spec, wavelength] ) corrected_data[spec, wavelength] = output[0] corrected_data_sd[spec, wavelength] = output[1] return corrected_data, corrected_data_sd, m_gravcorrcoefs