Beispiel #1
0
    def test_div(self):
        c, dc = EP.EPdiv(1.1, 0.5, 1.1, 0.5)
        assert_array_almost_equal(1, c)
        assert_array_almost_equal(0.642824346533225, dc)

        c, dc = EP.EPdiv(0, 0.5, 1.1, 0.5)
        assert_equal(0, c)
        assert_array_almost_equal(0.45454545454545453, dc)
Beispiel #2
0
    def test_pow(self):
        c, dc = EP.EPpow(1.1, 0.5, 1.1, 0.5)
        assert_array_almost_equal(1.1105342410545758, c)
        assert_array_almost_equal(0.5577834505363953, dc)

        c, dc = EP.EPpow(1.1, 0.5, 1.1, 0)
        assert_array_almost_equal(1.1105342410545758, c)
        assert_array_almost_equal(0.5552671205272879, dc)
Beispiel #3
0
    def test_mul(self):
        c, dc = EP.EPmul(1.1, 0.5, 1.1, 0.5)
        assert_array_almost_equal(1.21, c)
        assert_equal(np.sqrt(0.605), dc)

        c, dc = EP.EPmul(0, 0.5, 1.1, 0.5)
        assert_equal(0, c)
        assert_equal(0.55, dc)
Beispiel #4
0
    def reflectivity(self):
        """
        The reflectivity of the sampled system
        """
        rerr = np.sqrt(self.reflected_beam)
        bmon_reflect_err = np.sqrt(self.bmon_reflect)

        ierr = np.sqrt(self.direct_beam)
        bmon_direct_err = np.sqrt(self.bmon_direct)

        dx = np.sqrt(
            (self.dlambda) ** 2 + self.dtheta ** 2 + (0.68 * self.rebin) ** 2
        )
        dx *= self.q

        # divide reflectivity signal by bmon
        ref, rerr = ErrorProp.EPdiv(
            self.reflected_beam, rerr, self.bmon_reflect, bmon_reflect_err
        )
        # divide direct signal by bmon
        direct, ierr = ErrorProp.EPdiv(
            self.direct_beam, ierr, self.bmon_direct, bmon_direct_err
        )

        # now calculate reflectivity
        ref, rerr = ErrorProp.EPdiv(ref, rerr, direct, ierr)

        # filter points with zero counts because error is incorrect
        mask = rerr != 0

        dataset = ReflectDataset(
            data=(self.q[mask], ref[mask], rerr[mask], dx[mask])
        )

        # apply some counting statistics on top of dataset otherwise there will
        # be no variation at e.g. critical edge.
        # return dataset.synthesise()
        return dataset
Beispiel #5
0
    def reflectivity(self):
        """
        The reflectivity of the sampled system
        """
        rerr = np.sqrt(self.reflected_beam)
        ierr = np.sqrt(self.direct_beam)
        dx = np.sqrt((self.dlambda)**2 + self.dtheta**2 + self.rebin**2)

        ref, rerr = ErrorProp.EPdiv(self.reflected_beam, rerr,
                                    self.direct_beam, ierr)
        dataset = ReflectDataset(data=(self.q, ref, rerr, dx * self.q))

        # apply some counting statistics on top of dataset otherwise there will
        # be no variation at e.g. critical edge.
        return dataset.synthesise()
Beispiel #6
0
    def test_transform(self):
        pth = os.path.dirname(os.path.abspath(__file__))

        fname = os.path.join(pth, 'c_PLP0011859_q.txt')
        data = ReflectDataset(fname)
        t = Transform('logY')

        yt, et = t(data.x, data.y, y_err=data.y_err)
        assert_equal(yt, np.log10(data.y))

        yt, _ = t(data.x, data.y, y_err=None)
        assert_equal(yt, np.log10(data.y))

        EPy, EPe = EP.EPlog10(data.y, data.y_err)
        assert_equal(yt, EPy)
        assert_equal(et, EPe)
Beispiel #7
0
    def __transform(self, x, y, y_err=None):
        r"""
        Transform the data passed in

        Parameters
        ----------
        x : array-like

        y : array-like

        y_err : array-like

        Returns
        -------
        yt, et : tuple
            The transformed data
        """

        if y_err is None:
            etemp = np.ones_like(y)
        else:
            etemp = y_err

        if self.form in ["lin", None]:
            yt = np.copy(y)
            et = np.copy(etemp)
        elif self.form == "logY":
            yt, et = EP.EPlog10(y, etemp)
            if not np.isfinite(yt).all():
                warnings.warn(
                    "Some of the transformed data was non-finite."
                    " Please check your datasets for points with zero or"
                    " negative values.",
                    RuntimeWarning,
                )
        elif self.form == "YX4":
            yt = y * np.power(x, 4)
            et = etemp * np.power(x, 4)
        elif self.form == "YX2":
            yt = y * np.power(x, 2)
            et = etemp * np.power(x, 2)
        if y_err is None:
            return yt, None
        else:
            return yt, et
Beispiel #8
0
    def __transform(self, x, y, y_err=None):
        r"""
        Transform the data passed in

        Parameters
        ----------
        x : array-like

        y : array-like

        y_err : array-like

        Returns
        -------
        yt, et : tuple
            The transformed data
        """

        if y_err is None:
            etemp = np.ones_like(y)
        else:
            etemp = y_err

        if self.form in ['lin', None]:
            yt = np.copy(y)
            et = np.copy(etemp)
        elif self.form == 'logY':
            yt, et = EP.EPlog10(y, etemp)
        elif self.form == 'YX4':
            yt = y * np.power(x, 4)
            et = etemp * np.power(x, 4)
        elif self.form == 'YX2':
            yt = y * np.power(x, 2)
            et = etemp * np.power(x, 2)
        if y_err is None:
            return yt, None
        else:
            return yt, et
Beispiel #9
0
 def test_tan(self):
     c, dc = EP.EPtan(1.1, 0.5)
     assert_array_almost_equal(1.9647596572486525, c)
     assert_array_almost_equal(2.430140255375921, dc)
Beispiel #10
0
 def test_cos(self):
     c, dc = EP.EPcos(1.1, 0.5)
     assert_array_almost_equal(0.4535961214255773, c)
     assert_array_almost_equal(0.4456036800307177, dc)
Beispiel #11
0
 def test_sin(self):
     c, dc = EP.EPsin(1.1, 0.5)
     assert_array_almost_equal(0.8912073600614354, c)
     assert_array_almost_equal(0.22679806071278866, dc)
Beispiel #12
0
 def test_exp(self):
     c, dc = EP.EPexp(1.1, 0.5)
     assert_array_almost_equal(3.0041660239464334, c)
     assert_array_almost_equal(1.5020830119732167, dc)
Beispiel #13
0
def reduce_xrdml(f, bkg=None, scale=1, sample_length=None):
    """
    Reduces a Panalytical XRDML file

    Parameters
    ----------
    f: file-like object or string
        The specular reflectivity (XRDML) file of interest
    bkg: list
        A list of file-like objects or strings that contain background
        measurements. The background is assumed to have the same number of
        points as the specular reflectivity curve.  The backgrounds are
        averaged and subtracted from the specular reflectivity
    scale: float
        The direct beam intensity (cps)
    sample_length: None or float
        If None then no footprint correction is done. Otherwise the transverse
        footprint of the sample (mm).

    Returns
    -------
    specular_q, specular_r, specular_dr: np.ndarray
        The specular reflectivity as a function of momentum transfer, Q.
    """

    spec = parse_xrdml_file(f)

    reflectivity = spec['intensities'] / spec['count_time']
    reflectivity_s = np.sqrt(reflectivity) / spec['count_time']

    # do the background subtraction
    if bkg is not None:
        bkgds = [parse_xrdml_file(fi) for fi in bkg]

        bkgd_refs = np.r_[[bkgd['intensities'] for bkgd in bkgds]]
        bkgd_refs_s = np.r_[[
            np.sqrt(bkgd['intensities']) / bkgd['count_time'] for bkgd in bkgds
        ]]
        bkgd_refs_var = bkgd_refs_s**2
        weights = 1. / bkgd_refs_var
        numerator = np.sum(bkgd_refs * weights, axis=0)
        denominator = np.sum(weights, axis=0)

        total_bkgd = numerator / denominator
        total_bkgd_s = np.sqrt(1 / denominator)

        reflectivity, reflectivity_s = EP.EPsub(reflectivity, reflectivity_s,
                                                total_bkgd, total_bkgd_s)

    # work out the Q values
    qx, qy, qz = general.q2(spec['omega'], spec['twotheta'],
                            np.zeros_like(spec['omega']), spec['wavelength'])

    # do a footprint correction
    if sample_length is not None:
        footprint_correction = general.beamfrac(
            np.array([XRR_BEAMWIDTH_SD]) * 2.35, np.array([sample_length]),
            spec['omega'])
        reflectivity /= footprint_correction
        reflectivity_s /= footprint_correction

    # divide by the direct beam intensity
    # assumes that the direct beam intensity is enormous, so the counting
    # uncertainties in the scale factor are negligible.
    reflectivity /= scale
    reflectivity_s /= scale

    return qz, reflectivity, reflectivity_s
Beispiel #14
0
 def test_sub(self):
     c, dc = EP.EPsub(1.1, 0.5, 1.1, 0.5)
     assert_equal(0, c)
     assert_equal(np.sqrt(0.5), dc)
Beispiel #15
0
 def test_add(self):
     c, dc = EP.EPadd(1.1, 0.5, 1.1, 0.5)
     assert_equal(2.2, c)
     assert_equal(np.sqrt(0.5), dc)
Beispiel #16
0
def reduce_xrdml(f, bkg=None, scale=None, sample_length=None):
    """
    Reduces a Panalytical XRDML file

    Parameters
    ----------
    f: file-like object or string
        The specular reflectivity (XRDML) file of interest
    bkg: list
        A list of file-like objects or strings that contain background
        measurements. The background is assumed to have the same number of
        points as the specular reflectivity curve.  The backgrounds are
        averaged and subtracted from the specular reflectivity
    scale: float, None
        The direct beam intensity (cps). If `scale is None` then the dataset
        is scaled by the point with maximum intensity below Q = 0.0318 (Q_crit
        for Si at 8.048 keV).
    sample_length: None or float
        If None then no footprint correction is done. Otherwise the transverse
        footprint of the sample (mm).

    Returns
    -------
    dataset: refnx.dataset.ReflectDataset
        The specular reflectivity as a function of momentum transfer, Q.
    """

    spec = parse_xrdml_file(f)

    reflectivity = spec["intensities"] / spec["count_time"]
    reflectivity_s = np.sqrt(reflectivity) / spec["count_time"]

    # do the background subtraction
    if bkg is not None:
        bkgds = [parse_xrdml_file(fi) for fi in bkg]

        bkgd_refs = np.r_[[bkgd["intensities"] for bkgd in bkgds]]
        bkgd_refs_s = np.r_[[
            np.sqrt(bkgd["intensities"]) / bkgd["count_time"] for bkgd in bkgds
        ]]
        bkgd_refs_var = bkgd_refs_s**2
        weights = 1.0 / bkgd_refs_var
        numerator = np.sum(bkgd_refs * weights, axis=0)
        denominator = np.sum(weights, axis=0)

        total_bkgd = numerator / denominator
        total_bkgd_s = np.sqrt(1 / denominator)

        reflectivity, reflectivity_s = EP.EPsub(reflectivity, reflectivity_s,
                                                total_bkgd, total_bkgd_s)

    # work out the Q values
    qx, qy, qz = general.q2(
        spec["omega"],
        spec["twotheta"],
        np.zeros_like(spec["omega"]),
        spec["wavelength"],
    )

    # do a footprint correction
    if sample_length is not None:
        footprint_correction = general.beamfrac(
            np.array([XRR_BEAMWIDTH_SD]) * 2.35,
            np.array([sample_length]),
            spec["omega"],
        )
        reflectivity /= footprint_correction
        reflectivity_s /= footprint_correction

    # divide by the direct beam intensity
    # assumes that the direct beam intensity is enormous, so the counting
    # uncertainties in the scale factor are negligible.
    if scale is None:
        # no scale factor was specifed, so normalise by highest intensity point
        # below Qc for Silicon at 8.048 keV
        below_qc = qz[qz < 0.0318]
        if len(below_qc):
            scale = np.max(reflectivity[qz < 0.0318])

    reflectivity /= scale
    reflectivity_s /= scale

    d = ReflectDataset(data=(qz, reflectivity, reflectivity_s))

    return d
Beispiel #17
0
    def _reduce_single_angle(self, scale=1):
        """
        Reduce a single angle.
        """
        n_spectra = self.reflected_beam.n_spectra
        n_tpixels = np.size(self.reflected_beam.m_topandtail, 1)
        n_xpixels = np.size(self.reflected_beam.m_topandtail, 2)

        # we'll need the wavelengths to calculate Q.
        wavelengths = self.reflected_beam.m_lambda
        m_twotheta = np.zeros((n_spectra, n_tpixels, n_xpixels))

        detrot_difference = (self.reflected_beam.detector_z -
                             self.direct_beam.detector_z)

        # difference in pixels between reflected position and direct beam
        # at the two different detrots.
        QZ_PIXEL_SPACING = self.reflected_beam.cat.qz_pixel_size[0]
        dy = self.reflected_beam.detector_y

        # convert that pixel difference to angle (in small angle approximation)
        # higher `som` leads to lower m_beampos. i.e. higher two theta
        # is at lower pixel values
        beampos_2theta_diff = -(self.reflected_beam.m_beampos -
                                self.direct_beam.m_beampos)
        beampos_2theta_diff *= QZ_PIXEL_SPACING / dy[0]
        beampos_2theta_diff = np.degrees(beampos_2theta_diff)

        total_2theta_deflection = detrot_difference + beampos_2theta_diff

        # omega_nom.shape = (N, )
        omega_nom = total_2theta_deflection / 2.0
        omega_corrected = omega_nom[:, np.newaxis]

        m_twotheta += np.arange(n_xpixels * 1.0)[np.newaxis, np.newaxis, :]
        m_twotheta -= self.direct_beam.m_beampos[:, np.newaxis, np.newaxis]
        # minus sign in following line because higher two theta is at lower
        # pixel values
        m_twotheta *= -QZ_PIXEL_SPACING / dy[:, np.newaxis, np.newaxis]
        m_twotheta = np.degrees(m_twotheta)
        m_twotheta += detrot_difference

        # you may be reflecting upside down, reverse the sign.
        upside_down = np.sign(omega_corrected[:, 0])
        m_twotheta *= upside_down[:, np.newaxis, np.newaxis]
        omega_corrected *= upside_down[:, np.newaxis]
        """
        --Specular Reflectivity--
        Use the (constant wavelength) spectra that have already been integrated
        over 2theta (in processnexus) to calculate the specular reflectivity.
        Beware: this is because m_topandtail has already been divided through
        by monitor counts and error propagated (at the end of processnexus).
        Thus, the 2theta pixels are correlated to some degree. If we use the 2D
        plot to calculate reflectivity
        (sum {Iref_{2theta, lambda}}/I_direct_{lambda}) then the error bars in
        the reflectivity turn out much larger than they should be.
        """
        ydata, ydata_sd = EP.EPdiv(
            self.reflected_beam.m_spec,
            self.reflected_beam.m_spec_sd,
            self.direct_beam.m_spec,
            self.direct_beam.m_spec_sd,
        )

        # calculate the 1D Qz values.
        xdata = general.q(omega_corrected, wavelengths)
        xdata_sd = (self.reflected_beam.m_lambda_fwhm /
                    self.reflected_beam.m_lambda)**2
        xdata_sd += (self.reflected_beam.domega[:, np.newaxis] /
                     omega_corrected)**2
        xdata_sd = np.sqrt(xdata_sd) * xdata
        """
        ---Offspecular reflectivity---
        normalise the counts in the reflected beam by the direct beam
        spectrum this gives a reflectivity. Also propagate the errors,
        leaving the fractional variance (dr/r)^2.
        --Note-- that adjacent y-pixels (same wavelength) are correlated in
        this treatment, so you can't just sum over them.
        i.e. (c_0 / d) + ... + c_n / d) != (c_0 + ... + c_n) / d
        """
        m_ref, m_ref_sd = EP.EPdiv(
            self.reflected_beam.m_topandtail,
            self.reflected_beam.m_topandtail_sd,
            self.direct_beam.m_spec[:, :, np.newaxis],
            self.direct_beam.m_spec_sd[:, :, np.newaxis],
        )

        # you may have had divide by zero's.
        m_ref = np.where(np.isinf(m_ref), 0, m_ref)
        m_ref_sd = np.where(np.isinf(m_ref_sd), 0, m_ref_sd)

        # calculate the Q values for the detector pixels.  Each pixel has
        # different 2theta and different wavelength, ASSUME that they have the
        # same angle of incidence
        qx, qy, qz = general.q2(
            omega_corrected[:, :, np.newaxis],
            m_twotheta,
            0,
            wavelengths[:, :, np.newaxis],
        )

        reduction = {}
        reduction["x"] = self.x = xdata
        reduction["x_err"] = self.x_err = xdata_sd
        reduction["y"] = self.y = ydata / scale
        reduction["y_err"] = self.y_err = ydata_sd / scale
        reduction["omega"] = omega_corrected
        reduction["m_twotheta"] = m_twotheta
        reduction["m_ref"] = self.m_ref = m_ref
        reduction["m_ref_err"] = self.m_ref_err = m_ref_sd
        reduction["qz"] = self.m_qz = qz
        reduction["qx"] = self.m_qx = qx
        reduction["nspectra"] = self.n_spectra = n_spectra
        reduction["start_time"] = self.reflected_beam.start_time
        reduction[
            "datafile_number"] = self.datafile_number = self.reflected_beam.datafile_number

        fnames = []
        datasets = []
        datafilename = self.reflected_beam.datafilename
        datafilename = os.path.basename(datafilename.split(".nx.hdf")[0])

        for i in range(n_spectra):
            data_tup = self.data(scanpoint=i)
            datasets.append(ReflectDataset(data_tup))

        if self.save:
            for i, dataset in enumerate(datasets):
                fname = "{0}_{1}.dat".format(datafilename, i)
                fnames.append(fname)
                with open(fname, "wb") as f:
                    dataset.save(f)

                fname = "{0}_{1}.xml".format(datafilename, i)
                with open(fname, "wb") as f:
                    dataset.save_xml(f, start_time=reduction["start_time"][i])

        reduction["fname"] = fnames
        return datasets, deepcopy(reduction)
Beispiel #18
0
 def test_log(self):
     c, dc = EP.EPlog(1.1, 0.5)
     assert_array_almost_equal(0.09531017980432493, c)
     assert_array_almost_equal(0.45454545454545453, dc)
Beispiel #19
0
    def _reduce_single_angle(self, scale=1):
        """
        Reduce a single angle.
        """
        n_spectra = self.reflected_beam.n_spectra
        n_tpixels = np.size(self.reflected_beam.m_topandtail, 1)
        n_ypixels = np.size(self.reflected_beam.m_topandtail, 2)

        # calculate omega and two_theta depending on the mode.
        mode = self.reflected_beam.mode

        # we'll need the wavelengths to calculate Q.
        wavelengths = self.reflected_beam.m_lambda
        m_twotheta = np.zeros((n_spectra, n_tpixels, n_ypixels))

        detector_z_difference = (self.reflected_beam.detector_z -
                                 self.direct_beam.detector_z)

        beampos_z_difference = (self.reflected_beam.m_beampos -
                                self.direct_beam.m_beampos)

        Y_PIXEL_SPACING = self.reflected_beam.cat.y_pixels_per_mm[0]

        total_z_deflection = (detector_z_difference +
                              beampos_z_difference * Y_PIXEL_SPACING)

        if mode in ['FOC', 'POL', 'POLANAL', 'MT']:
            # omega_nom.shape = (N, )
            omega_nom = np.degrees(
                np.arctan(total_z_deflection / self.reflected_beam.detector_y)
                / 2.)
            '''
            Wavelength specific angle of incidence correction
            This involves:
            1) working out the trajectory of the neutrons through the
            collimation system.
            2) where those neutrons intersect the sample.
            3) working out the elevation of the neutrons when they hit the
            sample.
            4) correcting the angle of incidence.
            '''
            speeds = general.wavelength_velocity(wavelengths)
            collimation_distance = self.reflected_beam.cat.collimation_distance
            s2_sample_distance = (self.reflected_beam.cat.sample_distance -
                                  self.reflected_beam.cat.slit2_distance)

            # work out the trajectories of the neutrons for them to pass
            # through the collimation system.
            trajectories = find_trajectory(collimation_distance / 1000., 0,
                                           speeds)

            # work out where the beam hits the sample
            res = parabola_line_intersection_point(s2_sample_distance / 1000,
                                                   0, trajectories, speeds,
                                                   omega_nom[:, np.newaxis])
            intersect_x, intersect_y, x_prime, elevation = res

            # correct the angle of incidence with a wavelength dependent
            # elevation.
            omega_corrected = omega_nom[:, np.newaxis] - elevation

            m_twotheta += np.arange(n_ypixels * 1.)[np.newaxis, np.newaxis, :]
            m_twotheta -= self.direct_beam.m_beampos[:, np.newaxis, np.newaxis]
            m_twotheta *= Y_PIXEL_SPACING
            m_twotheta += detector_z_difference
            m_twotheta /= (self.reflected_beam.detector_y[:, np.newaxis,
                                                          np.newaxis])
            m_twotheta = np.arctan(m_twotheta)
            m_twotheta = np.degrees(m_twotheta)

            # you may be reflecting upside down, reverse the sign.
            upside_down = np.sign(omega_corrected[:, 0])
            m_twotheta *= upside_down[:, np.newaxis, np.newaxis]
            omega_corrected *= upside_down[:, np.newaxis]

        elif mode in ['SB', 'DB']:
            # the angle of incidence is half the two theta of the reflected
            # beam
            omega = np.arctan(
                total_z_deflection / self.reflected_beam.detector_y) / 2.

            # work out two theta for each of the detector pixels
            m_twotheta += np.arange(n_ypixels * 1.)[np.newaxis, np.newaxis, :]
            m_twotheta -= self.direct_beam.m_beampos[:, np.newaxis, np.newaxis]
            m_twotheta += detector_z_difference
            m_twotheta -= (
                self.reflected_beam.detector_y[:, np.newaxis, np.newaxis] *
                np.tan(omega[:, np.newaxis, np.newaxis]))

            m_twotheta /= (self.reflected_beam.detector_y[:, np.newaxis,
                                                          np.newaxis])
            m_twotheta = np.arctan(m_twotheta)
            m_twotheta += omega[:, np.newaxis, np.newaxis]

            # still in radians at this point
            # add an extra dimension, because omega_corrected needs to be the
            # angle of incidence for each wavelength. I.e. should be
            # broadcastable to (N, T)
            omega_corrected = np.degrees(omega)[:, np.newaxis]
            m_twotheta = np.degrees(m_twotheta)
        '''
        --Specular Reflectivity--
        Use the (constant wavelength) spectra that have already been integrated
        over 2theta (in processnexus) to calculate the specular reflectivity.
        Beware: this is because m_topandtail has already been divided through
        by monitor counts and error propagated (at the end of processnexus).
        Thus, the 2theta pixels are correlated to some degree. If we use the 2D
        plot to calculate reflectivity
        (sum {Iref_{2theta, lambda}}/I_direct_{lambda}) then the error bars in
        the reflectivity turn out much larger than they should be.
        '''
        ydata, ydata_sd = EP.EPdiv(self.reflected_beam.m_spec,
                                   self.reflected_beam.m_spec_sd,
                                   self.direct_beam.m_spec,
                                   self.direct_beam.m_spec_sd)

        # calculate the 1D Qz values.
        xdata = general.q(omega_corrected, wavelengths)
        xdata_sd = (self.reflected_beam.m_lambda_fwhm /
                    self.reflected_beam.m_lambda)**2
        xdata_sd += (self.reflected_beam.domega[:, np.newaxis] /
                     omega_corrected)**2
        xdata_sd = np.sqrt(xdata_sd) * xdata
        '''
        ---Offspecular reflectivity---
        normalise the counts in the reflected beam by the direct beam
        spectrum this gives a reflectivity. Also propagate the errors,
        leaving the fractional variance (dr/r)^2.
        --Note-- that adjacent y-pixels (same wavelength) are correlated in
        this treatment, so you can't just sum over them.
        i.e. (c_0 / d) + ... + c_n / d) != (c_0 + ... + c_n) / d
        '''
        m_ref, m_ref_sd = EP.EPdiv(
            self.reflected_beam.m_topandtail,
            self.reflected_beam.m_topandtail_sd,
            self.direct_beam.m_spec[:, :, np.newaxis],
            self.direct_beam.m_spec_sd[:, :, np.newaxis])

        # you may have had divide by zero's.
        m_ref = np.where(np.isinf(m_ref), 0, m_ref)
        m_ref_sd = np.where(np.isinf(m_ref_sd), 0, m_ref_sd)

        # calculate the Q values for the detector pixels.  Each pixel has
        # different 2theta and different wavelength, ASSUME that they have the
        # same angle of incidence
        qx, qy, qz = general.q2(omega_corrected[:, :, np.newaxis], m_twotheta,
                                0, wavelengths[:, :, np.newaxis])

        reduction = {}
        reduction['x'] = self.x = xdata
        reduction['x_err'] = self.x_err = xdata_sd
        reduction['y'] = self.y = ydata / scale
        reduction['y_err'] = self.y_err = ydata_sd / scale
        reduction['omega'] = omega_corrected
        reduction['m_twotheta'] = m_twotheta
        reduction['m_ref'] = self.m_ref = m_ref
        reduction['m_ref_err'] = self.m_ref_err = m_ref_sd
        reduction['qz'] = self.m_qz = qz
        reduction['qx'] = self.m_qx = qx
        reduction['nspectra'] = self.n_spectra = n_spectra
        reduction['start_time'] = self.reflected_beam.start_time
        reduction['datafile_number'] = self.datafile_number = (
            self.reflected_beam.datafile_number)

        fnames = []
        datasets = []
        datafilename = self.reflected_beam.datafilename
        datafilename = os.path.basename(datafilename.split('.nx.hdf')[0])

        for i in range(n_spectra):
            data_tup = self.data(scanpoint=i)
            datasets.append(ReflectDataset(data_tup))

        if self.save:
            for i, dataset in enumerate(datasets):
                fname = '{0}_{1}.dat'.format(datafilename, i)
                fnames.append(fname)
                with open(fname, 'wb') as f:
                    dataset.save(f)

                fname = '{0}_{1}.xml'.format(datafilename, i)
                with open(fname, 'wb') as f:
                    dataset.save_xml(f, start_time=reduction['start_time'][i])

        reduction['fname'] = fnames
        return datasets, deepcopy(reduction)
Beispiel #20
0
 def test_log10(self):
     c, dc = EP.EPlog10(1.1, 0.5)
     assert_array_almost_equal(0.04139268515822508, c)
     assert_array_almost_equal(0.19740658268329625, dc)