Exemple #1
0
 def test_nsplice(self):
     # test splicing of two datasets
     scale, dscale, overlap = nsplice.get_scaling_in_overlap(
         self.qv0, self.rv0, self.drv0, self.qv1, self.rv1, self.drv1
     )
     assert_almost_equal(scale, self.scale)
     assert_almost_equal(dscale, self.dscale)
Exemple #2
0
    def test_nsplice_overlap_points(self):
        scale, dscale, overlap = nsplice.get_scaling_in_overlap(
            self.x0, self.y0, self.dy0, self.x1, self.y1, self.dy1
        )

        assert_almost_equal(scale, 0.5)
        assert_equal(np.count_nonzero(overlap), 4)
Exemple #3
0
 def test_nsplice_nooverlap(self):
     # test splicing of two datasets if there's no overlap
     # the scale factor should be np.nan
     qv0 = self.qv0[0:10]
     scale, dscale, overlap = nsplice.get_scaling_in_overlap(
         qv0, self.rv0, self.drv0, self.qv1, self.rv1, self.drv1)
     assert_(not np.isfinite(scale))
     assert_(not np.isfinite(dscale))
     assert_equal(np.size(overlap, 0), 0)
Exemple #4
0
    def test_nsplice_overlap_points(self):
        scale, dscale, overlap = nsplice.get_scaling_in_overlap(self.x0,
                                                                self.y0,
                                                                self.dy0,
                                                                self.x1,
                                                                self.y1,
                                                                self.dy1)

        assert_almost_equal(scale, 0.5)
        assert_equal(np.count_nonzero(overlap), 4)
Exemple #5
0
 def test_nsplice(self):
     # test splicing of two datasets
     scale, dscale, overlap = nsplice.get_scaling_in_overlap(self.qv0,
                                                             self.rv0,
                                                             self.drv0,
                                                             self.qv1,
                                                             self.rv1,
                                                             self.drv1)
     assert_almost_equal(scale, self.scale)
     assert_almost_equal(dscale, self.dscale)
Exemple #6
0
 def test_nsplice_nooverlap(self):
     # test splicing of two datasets if there's no overlap
     # the scale factor should be np.nan
     qv0 = self.qv0[0:10]
     scale, dscale, overlap = nsplice.get_scaling_in_overlap(qv0,
                                                             self.rv0,
                                                             self.drv0,
                                                             self.qv1,
                                                             self.rv1,
                                                             self.drv1)
     assert_(not np.isfinite(scale))
     assert_(not np.isfinite(dscale))
Exemple #7
0
    def test_nsplice_notsorted(self):
        # test splicing of two datasets if there's no sorting
        # of the data
        vec0 = np.arange(np.size(self.qv0))
        np.random.shuffle(vec0)
        vec1 = np.arange(np.size(self.qv1))
        np.random.shuffle(vec1)

        scale, dscale, overlap = (nsplice.get_scaling_in_overlap(
            self.qv0[vec0], self.rv0[vec0], self.drv0[vec0], self.qv1[vec1],
            self.rv1[vec1], self.drv1[vec1]))

        assert_almost_equal(scale, self.scale)
        assert_almost_equal(dscale, self.dscale)
Exemple #8
0
    def test_nsplice_notsorted(self):
        # test splicing of two datasets if there's no sorting
        # of the data
        vec0 = np.arange(np.size(self.qv0))
        np.random.shuffle(vec0)
        vec1 = np.arange(np.size(self.qv1))
        np.random.shuffle(vec1)

        scale, dscale, overlap = (
            nsplice.get_scaling_in_overlap(self.qv0[vec0],
                                           self.rv0[vec0],
                                           self.drv0[vec0],
                                           self.qv1[vec1],
                                           self.rv1[vec1],
                                           self.drv1[vec1]))

        assert_almost_equal(scale, self.scale)
        assert_almost_equal(dscale, self.dscale)
Exemple #9
0
    def add_data(self, data_tuple, requires_splice=False, trim_trailing=True):
        """
        Adds more data to the dataset

        Parameters
        ----------
        data_tuple : tuple
            2 to 4 member tuple containing the (x, y, y_sd, x_sd) data to add
            to the dataset. `y_sd` and `x_sd` are optional.
        requires_splice : bool, optional
            When the new data is added to the dataset do you want to scale it
            vertically so that it overlaps with the existing data? `y` and
            `y_sd` in `data_tuple` are both multiplied by the scaling factor.
        trim_trailing : bool, optional
            When the new data is concatenated do you want to remove points from
            the existing data that are in the overlap region? This might be
            done because the datapoints in the `data_tuple` you are adding have
            have lower `y_sd` than the preceding data.
        """
        xdata, ydata, ydata_sd, xdata_sd = self.data

        axdata, aydata = data_tuple[0:2]

        if len(data_tuple) > 2:
            aydata_sd = np.asfarray(data_tuple[2]).flatten()
        else:
            aydata_sd = np.ones_like(axdata)

        if len(data_tuple) > 3:
            axdata_sd = np.asfarray(data_tuple[3]).flatten()
        else:
            axdata_sd = np.zeros(np.size(axdata))

        qq = np.r_[xdata]
        rr = np.r_[ydata]
        dr = np.r_[ydata_sd]
        dq = np.r_[xdata_sd]

        # which values in the first dataset overlap with the second
        overlap_points = np.zeros_like(qq, 'bool')

        # go through and stitch them together.
        scale = 1.
        dscale = 0.
        if requires_splice and self.npoints > 1:
            scale, dscale, overlap_points = (
                get_scaling_in_overlap(qq,
                                       rr,
                                       dr,
                                       axdata,
                                       aydata,
                                       aydata_sd))
        if not trim_trailing:
            overlap_points[:] = False

        qq = np.r_[qq[~overlap_points], axdata]
        dq = np.r_[dq[~overlap_points], axdata_sd]

        rr = np.r_[rr[~overlap_points], aydata * scale]
        dr = np.r_[dr[~overlap_points], aydata_sd * scale]

        self.data = (qq, rr, dr, dq)
        self.sort()
Exemple #10
0
    def add_data(self, data_tuple, requires_splice=False, trim_trailing=True):
        """
        Adds more data to the dataset.

        Parameters
        ----------
        data_tuple : tuple
            2 to 4 member tuple containing the (x, y, y_err, x_err) data to add
            to the dataset. `y_err` and `x_err` are optional.
        requires_splice : bool, optional
            When the new data is added to the dataset do you want to scale it
            vertically so that it overlaps with the existing data? `y` and
            `y_err` in `data_tuple` are both multiplied by the scaling factor.
        trim_trailing : bool, optional
            When the new data is concatenated do you want to remove points from
            the existing data that are in the overlap region? This might be
            done because the datapoints in the `data_tuple` you are adding have
            have lower `y_err` than the preceding data.

        Notes
        -----
        Raises `ValueError` if there are no points in the overlap region and
        `requires_splice` was True. The added data is not masked.

        """
        x, y, y_err, x_err = self.data

        # dataset has no points, can just initialise with the tuple
        if not len(self):
            self.data = data_tuple
            return

        ax, ay = data_tuple[0:2]

        # if ((len(data_tuple) > 2 and self.y_err is None) or
        #         (len(data_tuple) == 2 and self.y_err is not None)):
        #     raise ValueError("Both the existing Data1D and the data you're"
        #                      " trying to add need to have y_err")
        #
        # if ((len(data_tuple) > 3 and self.x_err is None) or
        #         (len(data_tuple) == 3 and self.x_err is not None)):
        #     raise ValueError("Both the existing Data1D and the data you're"
        #                      " trying to add need to have x_err")

        ay_err = None
        ax_err = None

        if len(data_tuple) > 2:
            ay_err = np.array(data_tuple[2], dtype=float)

        if len(data_tuple) > 3:
            ax_err = np.array(data_tuple[3], dtype=float)

        mask2 = np.full_like(data_tuple[0], True, bool)

        # which values in the first dataset overlap with the second
        overlap_points = np.zeros_like(x, 'bool')

        # go through and stitch them together.
        scale = 1.
        dscale = 0.
        if requires_splice and len(self) > 1:
            scale, dscale, overlap_points = (
                get_scaling_in_overlap(x,
                                       y,
                                       y_err,
                                       ax,
                                       ay,
                                       ay_err))

            if ((not np.isfinite(scale)) or (not np.isfinite(dscale)) or
                    (not np.size(overlap_points, 0))):
                raise ValueError("No points in overlap region")

        if not trim_trailing:
            overlap_points[:] = False

        qq = np.r_[x[~overlap_points], ax]
        rr = np.r_[y[~overlap_points], ay * scale]
        overall_mask = np.r_[self.mask[~overlap_points], mask2]

        try:
            dr = np.r_[y_err[~overlap_points], ay_err * scale]
        except (TypeError, ValueError):
            if (ay_err is not None) or (y_err is not None):
                raise ValueError("Both the existing Data1D and the data you're"
                                 " trying to add need to have y_err")
            dr = None

        try:
            dq = np.r_[x_err[~overlap_points], ax_err]
        except (TypeError, ValueError):
            if (ax_err is not None) or (x_err is not None):
                raise ValueError("Both the existing Data1D and the data you're"
                                 " trying to add need to have x_err")
            dq = None

        self.data = (qq, rr, dr, dq)
        self.mask = overall_mask

        self.sort()