예제 #1
0
def test_time_interval_constructor_set():

    t1 = TimeInterval(-10.0, 20.0)
    t2 = TimeInterval(10.0, 30.0)

    ts = TimeIntervalSet([t1, t2])

    assert ts[0] == t1
    assert ts[1] == t2

    # Use strings
    ts2 = TimeIntervalSet.from_strings("-10 - -5", "10 - 20", "20-30","-10--5")

    assert ts2[0].start_time == -10
    assert ts2[0].stop_time == -5
    assert ts2[-1].start_time == -10
    assert ts2[-1].stop_time == -5
    assert ts2[1].start_time == 10
    assert ts2[1].stop_time == 20
    assert ts2[2].start_time == 20
    assert ts2[2].stop_time == 30

    # Use edges
    ts3 = TimeIntervalSet.from_list_of_edges([-2,-1,0,1,2])

    assert ts3[0].start_time == -2
    assert ts3[0].stop_time == -1
    assert ts3[-1].start_time == 1
    assert ts3[-1].stop_time == 2
    assert ts3[1].start_time == -1
    assert ts3[1].stop_time == 0
    assert ts3[2].start_time == 0
    assert ts3[2].stop_time == 1

    # Use start and stops
    ts5 = TimeIntervalSet.from_starts_and_stops([-2, -1, 0, 1],  [-1, 0, 1, 2])

    assert ts5[0].start_time == -2
    assert ts5[0].stop_time == -1
    assert ts5[-1].start_time == 1
    assert ts5[-1].stop_time == 2
    assert ts5[1].start_time == -1
    assert ts5[1].stop_time == 0
    assert ts5[2].start_time == 0
    assert ts5[2].stop_time == 1

    with pytest.raises(AssertionError):

        ts6 = TimeIntervalSet.from_starts_and_stops([-2, -1, 0, 1], [-1, 0, 1])


    # test display

    ts5.display()
예제 #2
0
def test_time_interval_argsort_set():

    t1 = TimeInterval(-10.0, 20.0)
    t2 = TimeInterval(10.0, 30.0)
    t3 = TimeInterval(-30.0, 50.0)

    ts = TimeIntervalSet([t1, t2, t3])

    idx = ts.argsort()

    assert idx == [2, 0, 1]
예제 #3
0
def test_time_interval_set_pop():

    t1 = TimeInterval(-10.0, 20.0)
    t2 = TimeInterval(10.0, 30.0)
    t3 = TimeInterval(-30.0, 50.0)

    ts = TimeIntervalSet([t1, t2, t3])

    popped = ts.pop(1)

    assert popped == t2
예제 #4
0
def test_time_interval_sort_set():

    t1 = TimeInterval(-10.0, 20.0)
    t2 = TimeInterval(10.0, 30.0)
    t3 = TimeInterval(-30.0, 50.0)

    ts = TimeIntervalSet([t1, t2, t3])

    ts2 = ts.sort()

    assert ts2[0] == t3
    assert ts2[1] == t1
    assert ts2[2] == t2
예제 #5
0
파일: response.py 프로젝트: giacomov/3ML
    def _get_weighted_matrix(self, switch, *intervals):

        assert len(intervals) > 0, "You have to provide at least one interval"

        intervals_set = TimeIntervalSet.from_strings(*intervals)

        # Compute a set of weights for each interval
        weights = np.zeros(len(self._matrix_list))

        for interval in intervals_set:

            weights += self._weight_response(interval, switch)

        # Normalize to 1
        weights /= np.sum(weights)

        # Weight matrices
        matrix = np.dot(np.array(map(attrgetter("matrix"), self._matrix_list)).T, weights.T).T

        # Now generate the instance of the response

        # get EBOUNDS from the first matrix
        ebounds = self._matrix_list[0].ebounds

        # Get mc channels from the first matrix
        mc_channels = self._matrix_list[0].monte_carlo_energies

        matrix_instance = InstrumentResponse(matrix, ebounds, mc_channels)

        return matrix_instance
예제 #6
0
def test_binned_fit():
    with within_directory(datasets_dir):
        start, stop = 0, 50

        poly = [1]

        arrival_times = np.loadtxt('test_event_data.txt')

        evt_list = EventListWithDeadTime(arrival_times=arrival_times,
                                         measurement=np.zeros_like(arrival_times),
                                         n_channels=1,
                                         start_time=arrival_times[0],
                                         stop_time=arrival_times[-1],
                                         dead_time=np.zeros_like(arrival_times)
                                         )

        evt_list.set_polynomial_fit_interval("%f-%f" % (start + 1, stop - 1), unbinned=False)

        evt_list.set_active_time_intervals("0-1")

        results = evt_list.get_poly_info()['coefficients']

        assert evt_list.time_intervals == TimeIntervalSet.from_list_of_edges([0,1])


        assert evt_list._poly_counts.sum() > 0

        evt_list.__repr__()
예제 #7
0
def test_binned_fit():
    with within_directory(datasets_directory):
        start, stop = 0, 50

        poly = [1]

        arrival_times = np.loadtxt('test_event_data.txt')

        evt_list = EventListWithDeadTime(arrival_times=arrival_times,
                                         energies=np.zeros_like(arrival_times),
                                         n_channels=1,
                                         start_time=arrival_times[0],
                                         stop_time=arrival_times[-1],
                                         dead_time=np.zeros_like(arrival_times)
                                         )

        evt_list.set_polynomial_fit_interval("%f-%f" % (start + 1, stop - 1), unbinned=False)

        evt_list.set_active_time_intervals("0-1")

        results = evt_list.get_poly_info()['coefficients']

        assert evt_list.time_intervals == TimeIntervalSet.from_list_of_edges([0, 1])

        assert evt_list._poly_counts.sum() > 0

        evt_list.__repr__()
예제 #8
0
def test_time_interval_extend_set():

    t1 = TimeInterval(-10.0, 20.0)
    t2 = TimeInterval(10.0, 30.0)

    ts = TimeIntervalSet([t1, t2])

    t3 = TimeInterval(30.0, 40.0)
    t4 = TimeInterval(40.0, 50.0)

    ts.extend([t3, t4])

    assert len(ts) == 4

    ts.extend(TimeIntervalSet([t3, t4]))

    assert len(ts) == 6
예제 #9
0
    def restore_fit(self, filename):

        filename_sanitized = sanitize_filename(filename)

        with HDFStore(filename_sanitized) as store:

            coefficients = store['coefficients']

            covariance = store['covariance']

            self._polynomials = []

            # create new polynomials

            for i in range(len(coefficients)):

                coeff = np.array(coefficients.loc[i])

                # make sure we get the right order
                # pandas stores the non-needed coeff
                # as nans.

                coeff = coeff[np.isfinite(coeff)]

                cov = covariance.loc[i]

                self._polynomials.append(
                    Polynomial.from_previous_fit(coeff, cov))

            metadata = store.get_storer('coefficients').attrs.metadata

            self._optimal_polynomial_grade = metadata['poly_order']
            poly_selections = np.array(metadata['poly_selections'])

            self._poly_intervals = TimeIntervalSet.from_starts_and_stops(
                poly_selections[:, 0], poly_selections[:, 1])
            self._unbinned = metadata['unbinned']

            if self._unbinned:
                self._fit_method_info['bin type'] = 'unbinned'

            else:

                self._fit_method_info['bin type'] = 'binned'

            self._fit_method_info['fit method'] = metadata['fit_method']

        # go thru and count the counts!

        self._poly_fit_exists = True

        if self._time_selection_exists:

            self.set_active_time_intervals(
                *self._time_intervals.to_string().split(','))
def test_time_edges():

    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(0.0, 10.0)
    t3 = TimeInterval(10.0, 20.0)

    ts1 = TimeIntervalSet([t1, t2, t3])

    assert ts1.time_edges[0] == -10.0
    assert ts1.time_edges[1] == 0.0
    assert ts1.time_edges[2] == 10.0
    assert ts1.time_edges[3] == 20.0

    with pytest.raises(IntervalsNotContiguous):
        t1 = TimeInterval(-10.0, -5.0)
        t2 = TimeInterval(0.0, 10.0)
        t3 = TimeInterval(10.0, 20.0)

        ts1 = TimeIntervalSet([t1, t2, t3])

        _ = ts1.time_edges
예제 #11
0
def test_interval_set_to_string():

    # also tests the time interval to string

    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(5., 10.0)
    t3 = TimeInterval(15.0, 20.0)

    ts1 = TimeIntervalSet([t1, t2, t3])

    strings = ts1.to_string()

    strings_split = strings.split(',')

    assert t1.to_string() == strings_split[0]
    assert t2.to_string() == strings_split[1]
    assert t3.to_string() == strings_split[2]

    ts2 = TimeIntervalSet.from_strings(t1.to_string())

    assert ts2[0] == t1
def test_interval_set_to_string():

    # also tests the time interval to string

    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(5.0, 10.0)
    t3 = TimeInterval(15.0, 20.0)

    ts1 = TimeIntervalSet([t1, t2, t3])

    strings = ts1.to_string()

    strings_split = strings.split(",")

    assert t1.to_string() == strings_split[0]
    assert t2.to_string() == strings_split[1]
    assert t3.to_string() == strings_split[2]

    ts2 = TimeIntervalSet.from_strings(t1.to_string())

    assert ts2[0] == t1
def test_time_interval_sets_starts_stops():

    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(5.0, 10.0)
    t3 = TimeInterval(15.0, 20.0)

    ts1 = TimeIntervalSet([t1, t2, t3])

    for start, stop, interval in zip(ts1.start_times, ts1.stop_times,
                                     [t1, t2, t3]):

        assert interval.start_time == start
        assert interval.stop_time == stop
def test_time_interval_iterator_set():

    t1 = TimeInterval(-10.0, 20.0)
    t2 = TimeInterval(10.0, 30.0)

    ts = TimeIntervalSet([t1, t2])

    for i, tt in enumerate(ts):

        if i == 0:

            assert tt == t1

        else:

            assert tt == t2
def test_time_interval_add_sub_set():

    t1 = TimeInterval(-10.0, 20.0)
    t2 = TimeInterval(10.0, 30.0)

    ts = TimeIntervalSet([t1, t2])

    ts2 = ts + 10.0  # type: TimeIntervalSet

    assert ts2[0].start_time == 0.0
    assert ts2[1].stop_time == 40.0

    ts3 = ts - 10.0  # type: TimeIntervalSet

    assert ts3[0].start_time == -20.0
    assert ts3[1].stop_time == 20.0
예제 #16
0
    def _adjust_to_true_intervals(self, time_intervals):
        """

        adjusts time selections to those of the Binned spectrum set


        :param time_intervals: a time interval set
        :return: an adjusted time interval set
        """

        # get all the starts and stops from these time intervals

        true_starts = np.array(self._binned_spectrum_set.time_intervals.start_times)
        true_stops = np.array(self._binned_spectrum_set.time_intervals.stop_times)

        new_starts = []
        new_stops = []

        # now go thru all the intervals
        for interval in time_intervals:

            # find where the suggest intervals hits the true interval

            # searchsorted is fast, but is not returing what we want
            # we want the actaul values of the bins closest to the input

            #idx = np.searchsorted(true_starts, interval.start_time,side)

            idx = (np.abs(true_starts - interval.start_time)).argmin()


            new_start = true_starts[idx]

            #idx = np.searchsorted(true_stops, interval.stop_time)

            idx = (np.abs(true_stops - interval.stop_time)).argmin()


            new_stop = true_stops[idx]

            new_starts.append(new_start)

            new_stops.append(new_stop)

        # alright, now we can make appropriate time intervals

        return TimeIntervalSet.from_starts_and_stops(new_starts, new_stops)
    def _adjust_to_true_intervals(self, time_intervals):
        """

        adjusts time selections to those of the Binned spectrum set


        :param time_intervals: a time interval set
        :return: an adjusted time interval set
        """

        # get all the starts and stops from these time intervals

        true_starts = np.array(
            self._binned_spectrum_set.time_intervals.start_times)
        true_stops = np.array(
            self._binned_spectrum_set.time_intervals.stop_times)

        new_starts = []
        new_stops = []

        # now go thru all the intervals
        for interval in time_intervals:

            # find where the suggest intervals hits the true interval

            # searchsorted is fast, but is not returing what we want
            # we want the actaul values of the bins closest to the input

            # idx = np.searchsorted(true_starts, interval.start_time,side)

            idx = (np.abs(true_starts - interval.start_time)).argmin()

            new_start = true_starts[idx]

            # idx = np.searchsorted(true_stops, interval.stop_time)

            idx = (np.abs(true_stops - interval.stop_time)).argmin()

            new_stop = true_stops[idx]

            new_starts.append(new_start)

            new_stops.append(new_stop)

        # alright, now we can make appropriate time intervals

        return TimeIntervalSet.from_starts_and_stops(new_starts, new_stops)
예제 #18
0
def test_time_interval_set_is_contiguous():

    t1 = TimeInterval(-10.0, 20.0)
    t2 = TimeInterval(10.0, 30.0)
    t3 = TimeInterval(-30.0, 50.0)

    ts = TimeIntervalSet([t1, t2, t3])

    assert ts.is_contiguous() == False

    t1 = TimeInterval(0.0, 1.0)
    t2 = TimeInterval(1.0, 2.0)
    t3 = TimeInterval(2.0, 3.0)

    ts = TimeIntervalSet([t1, t2, t3])

    assert ts.is_contiguous() == True

    t1 = TimeInterval(0.0, 1.0)
    t2 = TimeInterval(1.1, 2.0)
    t3 = TimeInterval(2.0, 3.0)

    ts = TimeIntervalSet([t1, t2, t3])

    assert ts.is_contiguous() == False

    t1 = TimeInterval(0.0, 1.0)
    t2 = TimeInterval(2.0, 3.0)
    t3 = TimeInterval(1.0, 2.0)

    ts = TimeIntervalSet([t1, t2, t3])

    assert ts.is_contiguous() == False

    new_ts = ts.sort()

    assert new_ts.is_contiguous() == True
def test_time_interval_extend_set():

    t1 = TimeInterval(-10.0, 20.0)
    t2 = TimeInterval(10.0, 30.0)

    ts = TimeIntervalSet([t1, t2])

    t3 = TimeInterval(30.0, 40.0)
    t4 = TimeInterval(40.0, 50.0)

    ts.extend([t3, t4])

    assert len(ts) == 4

    ts.extend(TimeIntervalSet([t3, t4]))

    assert len(ts) == 6
예제 #20
0
    def _get_weighted_matrix(self, switch: str,
                             *intervals) -> InstrumentResponse:

        if not len(intervals) > 0:

            log.error("You have to provide at least one interval")

            raise RuntimeError()

        intervals_set = TimeIntervalSet.from_strings(*intervals)

        # Compute a set of weights for each interval
        weights = np.zeros(len(self._matrix_list))

        for interval in intervals_set:

            weights += self._weight_response(interval, switch)

        # Normalize to 1
        weights /= np.sum(weights)

        # Weight matrices
        matrix = np.dot(
            np.array(list(map(attrgetter("matrix"), self._matrix_list))).T,
            weights.T).T

        # Now generate the instance of the response

        # get EBOUNDS from the first matrix
        ebounds = self._matrix_list[0].ebounds

        # Get mc channels from the first matrix
        mc_channels = self._matrix_list[0].monte_carlo_energies

        matrix_instance = InstrumentResponse(matrix, ebounds, mc_channels)

        return matrix_instance
def test_unbinned_fit(event_time_series):

    start, stop = 0, 50

    poly = [1]

    arrival_times = event_time_series

    print(len(event_time_series))

    evt_list = EventListWithDeadTime(
        arrival_times=arrival_times,
        measurement=np.zeros_like(arrival_times),
        n_channels=1,
        start_time=arrival_times[0],
        stop_time=arrival_times[-1],
        dead_time=np.zeros_like(arrival_times),
    )

    evt_list.set_polynomial_fit_interval("%f-%f" % (start + 1, stop - 1),
                                         unbinned=True,
                                         bayes=False)

    results = evt_list.get_poly_info()["coefficients"]

    print(results)

    evt_list.set_active_time_intervals("0-10")

    assert evt_list.time_intervals == TimeIntervalSet.from_list_of_edges(
        [0, 10])

    print(evt_list._poly_counts)

    assert evt_list._poly_counts.sum() > 0

    evt_list.__repr__()
예제 #22
0
def test_merging_set_intervals():

    # test that non overlapping intervals
    # do not result in a merge

    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(5., 10.0)
    t3 = TimeInterval(15.0, 20.0)

    ts1 = TimeIntervalSet([t1,t2,t3])

    ts2 = ts1.merge_intersecting_intervals(in_place=False)

    assert len(ts2) == 3
    assert t1 == ts2[0]
    assert t2 == ts2[1]
    assert t3 == ts2[2]

    # end merge works

    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(5., 10.0)
    t3 = TimeInterval(7.0, 20.0)

    ts1 = TimeIntervalSet([t1, t2, t3])

    ts2 = ts1.merge_intersecting_intervals(in_place=False)

    assert len(ts2) == 2
    assert t1 == ts2[0]
    assert TimeInterval(5.0,20.0) == ts2[1]

    # begin merge works

    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(-5., 10.0)
    t3 = TimeInterval(15, 20.0)

    ts1 = TimeIntervalSet([t1, t2, t3])

    ts2 = ts1.merge_intersecting_intervals(in_place=False)

    assert len(ts2) == 2
    assert TimeInterval(-10.0, 10.0) == ts2[0]
    assert TimeInterval(15.0, 20.0) == ts2[1]

    # middle merge works

    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(5.0, 10.0)
    t3 = TimeInterval(7.0, 20.0)
    t4 = TimeInterval(35.0, 40.0)

    ts1 = TimeIntervalSet([t1, t2, t3, t4])

    ts2 = ts1.merge_intersecting_intervals(in_place=False)

    assert len(ts2) == 3
    assert t1 == ts2[0]
    assert TimeInterval(5.0, 20.0) == ts2[1]
    assert t4 == ts2[2]

    # both end merge works

    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(-5.0, 10.0)
    t3 = TimeInterval(15.0, 20.0)
    t4 = TimeInterval(35.0, 45.0)
    t5 = TimeInterval(40.0, 50.0)

    ts1 = TimeIntervalSet([t1, t2, t3, t4, t5])

    ts2 = ts1.merge_intersecting_intervals(in_place=False)

    assert len(ts2) == 3
    assert TimeInterval(-10.0, 10.0) == ts2[0]
    assert t3 == ts2[1]
    assert TimeInterval(35.0, 50.0) == ts2[2]

    # multi merge works


    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(-5., 10.0)
    t3 = TimeInterval(7, 20.0)

    ts1 = TimeIntervalSet([t1, t2, t3])

    ts2 = ts1.merge_intersecting_intervals(in_place=False)

    assert len(ts2) == 1
    assert TimeInterval(-10.0, 20.0) == ts2[0]

    # complete overlap merge works


    t1 = TimeInterval(-10.0, 25.0)
    t2 = TimeInterval(-5., 10.0)
    t3 = TimeInterval(7, 20.0)

    ts1 = TimeIntervalSet([t1, t2, t3])

    ts2 = ts1.merge_intersecting_intervals(in_place=False)

    assert len(ts2) == 1
    assert TimeInterval(-10.0, 25.0) == ts2[0]


    # tests the inplace operation

    t1 = TimeInterval(-10.0, 25.0)
    t2 = TimeInterval(-5., 10.0)
    t3 = TimeInterval(7, 20.0)

    ts1 = TimeIntervalSet([t1, t2, t3])

    ts1.merge_intersecting_intervals(in_place=True)

    assert len(ts1) == 1
    assert TimeInterval(-10.0, 25.0) == ts1[0]
def test_time_interval_set_is_contiguous():

    t1 = TimeInterval(-10.0, 20.0)
    t2 = TimeInterval(10.0, 30.0)
    t3 = TimeInterval(-30.0, 50.0)

    ts = TimeIntervalSet([t1, t2, t3])

    assert ts.is_contiguous() == False

    t1 = TimeInterval(0.0, 1.0)
    t2 = TimeInterval(1.0, 2.0)
    t3 = TimeInterval(2.0, 3.0)

    ts = TimeIntervalSet([t1, t2, t3])

    assert ts.is_contiguous() == True

    t1 = TimeInterval(0.0, 1.0)
    t2 = TimeInterval(1.1, 2.0)
    t3 = TimeInterval(2.0, 3.0)

    ts = TimeIntervalSet([t1, t2, t3])

    assert ts.is_contiguous() == False

    t1 = TimeInterval(0.0, 1.0)
    t2 = TimeInterval(2.0, 3.0)
    t3 = TimeInterval(1.0, 2.0)

    ts = TimeIntervalSet([t1, t2, t3])

    assert ts.is_contiguous() == False

    new_ts = ts.sort()

    assert new_ts.is_contiguous() == True
예제 #24
0
파일: time_series.py 프로젝트: giacomov/3ML
    def set_polynomial_fit_interval(self, *time_intervals, **options):
        """Set the time interval to fit the background.
        Multiple intervals can be input as separate arguments
        Specified as 'tmin-tmax'. Intervals are in seconds. Example:

        set_polynomial_fit_interval("-10.0-0.0","10.-15.")

        :param time_intervals: intervals to fit on
        :param options:

        """

        # Find out if we want to binned or unbinned.
        # TODO: add the option to config file
        if 'unbinned' in options:
            unbinned = options.pop('unbinned')
            assert type(unbinned) == bool, 'unbinned option must be True or False'

        else:

            # assuming unbinned
            # could use config file here
            # unbinned = threeML_config['ogip']['use-unbinned-poly-fitting']

            unbinned = True

        # we create some time intervals

        poly_intervals = TimeIntervalSet.from_strings(*time_intervals)

        # adjust the selections to the data

        new_intervals = []

        self._poly_selected_counts = []

        self._poly_exposure = 0.

        for i, time_interval in enumerate(poly_intervals):

            t1 = time_interval.start_time
            t2 = time_interval.stop_time

            if (self._stop_time <= t1) or (t2 <= self._start_time):
                custom_warnings.warn(
                    "The time interval %f-%f is out side of the arrival times and will be dropped" % (
                        t1, t2))




            else:

                if t1 < self._start_time:
                    custom_warnings.warn(
                        "The time interval %f-%f started before the first arrival time (%f), so we are changing the intervals to %f-%f" % (
                            t1, t2, self._start_time, self._start_time, t2))

                    t1 = self._start_time  # + 1

                if t2 > self._stop_time:
                    custom_warnings.warn(
                        "The time interval %f-%f ended after the last arrival time (%f), so we are changing the intervals to %f-%f" % (
                            t1, t2, self._stop_time, t1, self._stop_time))

                    t2 = self._stop_time  # - 1.

                new_intervals.append('%f-%f' % (t1, t2))


                self._poly_selected_counts.append(self.count_per_channel_over_interval(t1,t2))
                self._poly_exposure += self.exposure_over_interval(t1,t2)

        # make new intervals after checks

        poly_intervals = TimeIntervalSet.from_strings(*new_intervals)

        self._poly_selected_counts = np.sum(self._poly_selected_counts, axis=0)

        # set the poly intervals as an attribute

        self._poly_intervals = poly_intervals

        # Fit the events with the given intervals
        if unbinned:

            self._unbinned = True  # keep track!

            self._unbinned_fit_polynomials()

        else:

            self._unbinned = False

            self._fit_polynomials()

        # we have a fit now

        self._poly_fit_exists = True

        if self._verbose:
            print("%s %d-order polynomial fit with the %s method" % (
                self._fit_method_info['bin type'], self._optimal_polynomial_grade, self._fit_method_info['fit method']))
            print('\n')

        # recalculate the selected counts

        if self._time_selection_exists:
            self.set_active_time_intervals(*self._time_intervals.to_string().split(','))
예제 #25
0
    def restore_fit(self, filename):

        filename_sanitized: Path = sanitize_filename(filename)

        with h5py.File(filename_sanitized, "r") as store:

            coefficients = store["coefficients"][()]

            covariance = store["covariance"][()]

            self._polynomials = []

            # create new polynomials

            for i in range(len(coefficients)):
                coeff = np.array(coefficients[i])

                # make sure we get the right order
                # pandas stores the non-needed coeff
                # as nans.

                coeff = coeff[np.isfinite(coeff)]

                cov = covariance[i]

                self._polynomials.append(
                    Polynomial.from_previous_fit(coeff, cov))

            metadata = store.attrs

            self._optimal_polynomial_grade = metadata["poly_order"]
            poly_selections = np.array(metadata["poly_selections"])

            self._poly_intervals = TimeIntervalSet.from_starts_and_stops(
                poly_selections[:, 0], poly_selections[:, 1])
            self._unbinned = metadata["unbinned"]

            if self._unbinned:
                self._fit_method_info["bin type"] = "unbinned"

            else:

                self._fit_method_info["bin type"] = "binned"

            self._fit_method_info["fit method"] = metadata["fit_method"]

        # go thru and count the counts!
        log.debug("resest the poly form the file")
        self._poly_fit_exists = True

        # we must go thru and collect the polynomial exposure and counts
        # so that they be extracted if needed
        self._poly_exposure = 0.0
        self._poly_selected_counts = []
        for i, time_interval in enumerate(self._poly_intervals):

            t1 = time_interval.start_time
            t2 = time_interval.stop_time

            self._poly_selected_counts.append(
                self.count_per_channel_over_interval(t1, t2))
            self._poly_exposure += self.exposure_over_interval(t1, t2)

        self._poly_selected_counts = np.sum(self._poly_selected_counts, axis=0)
        if self._time_selection_exists:
            self.set_active_time_intervals(
                *self._time_intervals.to_string().split(","))
예제 #26
0
    def set_polynomial_fit_interval(self, *time_intervals, **kwargs) -> None:
        """Set the time interval to fit the background.
        Multiple intervals can be input as separate arguments
        Specified as 'tmin-tmax'. Intervals are in seconds. Example:

        set_polynomial_fit_interval("-10.0-0.0","10.-15.")

        :param time_intervals: intervals to fit on
        :param unbinned:
        :param bayes:
        :param kwargs:

        """

        # Find out if we want to binned or unbinned.
        # TODO: add the option to config file
        if "unbinned" in kwargs:
            unbinned = kwargs.pop("unbinned")
            assert type(
                unbinned) == bool, "unbinned option must be True or False"

        else:

            # assuming unbinned
            # could use config file here
            # unbinned = threeML_config['ogip']['use-unbinned-poly-fitting']

            unbinned = True

        # check if we are doing a bayesian
        # fit and record this info

        if "bayes" in kwargs:
            bayes = kwargs.pop("bayes")

        else:

            bayes = False

        if bayes:

            self._fit_method_info["fit method"] = "bayes"

        else:

            self._fit_method_info["fit method"] = "bayes"

        # we create some time intervals

        poly_intervals = TimeIntervalSet.from_strings(*time_intervals)

        # adjust the selections to the data

        new_intervals = []

        self._poly_selected_counts = []

        self._poly_exposure = 0.0

        for i, time_interval in enumerate(poly_intervals):

            t1 = time_interval.start_time
            t2 = time_interval.stop_time

            if (self._stop_time <= t1) or (t2 <= self._start_time):
                log.warning(
                    "The time interval %f-%f is out side of the arrival times and will be dropped"
                    % (t1, t2))

            else:

                if t1 < self._start_time:
                    log.warning(
                        "The time interval %f-%f started before the first arrival time (%f), so we are changing the intervals to %f-%f"
                        % (t1, t2, self._start_time, self._start_time, t2))

                    t1 = self._start_time  # + 1

                if t2 > self._stop_time:
                    log.warning(
                        "The time interval %f-%f ended after the last arrival time (%f), so we are changing the intervals to %f-%f"
                        % (t1, t2, self._stop_time, t1, self._stop_time))

                    t2 = self._stop_time  # - 1.

                new_intervals.append("%f-%f" % (t1, t2))

                self._poly_selected_counts.append(
                    self.count_per_channel_over_interval(t1, t2))
                self._poly_exposure += self.exposure_over_interval(t1, t2)

        # make new intervals after checks

        poly_intervals = TimeIntervalSet.from_strings(*new_intervals)

        self._poly_selected_counts = np.sum(self._poly_selected_counts, axis=0)

        # set the poly intervals as an attribute

        self._poly_intervals = poly_intervals

        # Fit the events with the given intervals
        if unbinned:

            self._unbinned = True  # keep track!

            self._unbinned_fit_polynomials(bayes=bayes)

        else:

            self._unbinned = False

            self._fit_polynomials(bayes=bayes)

        # we have a fit now

        self._poly_fit_exists = True

        log.info(
            f"{self._fit_method_info['bin type']} {self._optimal_polynomial_grade}-order polynomial fit with the {self._fit_method_info['fit method']} method"
        )

        # recalculate the selected counts

        if self._time_selection_exists:
            self.set_active_time_intervals(
                *self._time_intervals.to_string().split(","))
    def set_active_time_intervals(self, *args):
        """
        Set the time interval(s) to be used during the analysis.
        Specified as 'tmin-tmax'. Intervals are in seconds. Example:

        set_active_time_intervals("0.0-10.0")

        which will set the time range 0-10. seconds.
        """

        # mark that we now have a time selection

        self._time_selection_exists = True

        # lets build a time interval set from the selections
        # and then merge intersecting intervals

        time_intervals = TimeIntervalSet.from_strings(*args)
        time_intervals.merge_intersecting_intervals(in_place=True)

        # lets adjust the time intervals to the actual ones since they are prebinned

        time_intervals = self._adjust_to_true_intervals(time_intervals)

        # start out with no time bins selection
        all_idx = np.zeros(
            len(self._binned_spectrum_set.time_intervals), dtype=bool)

        # now we need to sum up the counts and total time

        total_time = 0

        for interval in time_intervals:

            # the select bins method is called.
            # since we are sure that the interval bounds
            # are aligned with the true ones, we do not care if
            # it is inner or outer

            all_idx = np.logical_or(
                all_idx, self._select_bins(
                    interval.start_time, interval.stop_time)
            )

            total_time += interval.duration

        # sum along the time axis
        self._counts = self._binned_spectrum_set.counts_per_bin[all_idx].sum(
            axis=0)

        # the selected time intervals

        self._time_intervals = time_intervals

        tmp_counts = []
        tmp_err = []  # Temporary list to hold the err counts per chan

        if self._poly_fit_exists:

            if not self._poly_fit_exists:
                raise RuntimeError(
                    "A polynomial fit to the channels does not exist!")

            for chan in range(self._n_channels):

                total_counts = 0
                counts_err = 0

                for tmin, tmax in zip(
                    self._time_intervals.start_times, self._time_intervals.stop_times
                ):
                    # Now integrate the appropriate background polynomial
                    total_counts += self._polynomials[chan].integral(
                        tmin, tmax)
                    counts_err += (
                        self._polynomials[chan].integral_error(tmin, tmax)
                    ) ** 2

                tmp_counts.append(total_counts)

                tmp_err.append(np.sqrt(counts_err))

            self._poly_counts = np.array(tmp_counts)

            self._poly_count_err = np.array(tmp_err)

        self._exposure = self._binned_spectrum_set.exposure_per_bin[all_idx].sum(
        )

        self._active_dead_time = total_time - self._exposure
예제 #28
0
    def __init__(
        self,
        trigdat_file,
        fine=False,
        time_resolved=False,
        verbose=True,
        poly_order=-1,
        restore_poly_fit=None,
    ):

        # self._backgroundexists = False
        # self._sourceexists = False

        self._verbose = verbose
        self._time_resolved = time_resolved
        self._poly_order = poly_order
        self._restore_poly_fit = restore_poly_fit
        # Read the trig data file and get the appropriate info

        trigdat = fits.open(trigdat_file)
        self._filename = trigdat_file
        self._out_edge_bgo = np.array(
            [150.0, 400.0, 850.0, 1500.0, 3000.0, 5500.0, 10000.0, 20000.0, 50000.0],
            dtype=np.float32,
        )
        self._out_edge_nai = np.array(
            [3.4, 10.0, 22.0, 44.0, 95.0, 300.0, 500.0, 800.0, 2000.0], dtype=np.float32
        )
        self._binwidth_bgo = self._out_edge_bgo[1:] - self._out_edge_bgo[:-1]
        self._binwidth_nai = self._out_edge_nai[1:] - self._out_edge_nai[:-1]

        # Get the times
        evntrate = "EVNTRATE"

        self._trigtime = trigdat[evntrate].header["TRIGTIME"]
        self._tstart = trigdat[evntrate].data["TIME"] - self._trigtime
        self._tstop = trigdat[evntrate].data["ENDTIME"] - self._trigtime

        self._rates = trigdat[evntrate].data["RATE"]

        num_times = len(self._tstart)
        self._rates = self._rates.reshape(num_times, 14, 8)

        # Obtain the positional information
        self._qauts = trigdat[evntrate].data["SCATTITD"]  # [condition][0]
        self._sc_pos = trigdat[evntrate].data["EIC"]  # [condition][0]

        # Get the flight software location
        self._fsw_ra = trigdat["PRIMARY"].header["RA_OBJ"]
        self._fsw_dec = trigdat["PRIMARY"].header["DEC_OBJ"]
        self._fsw_err = trigdat["PRIMARY"].header["ERR_RAD"]

        # Clean up
        trigdat.close()

        # Sort out the high res times because they are dispersed with the normal
        # times.

        # The delta time in the file.
        # This routine is modeled off the procedure in RMFIT.
        myDelta = self._tstop - self._tstart
        self._tstart[myDelta < 0.1] = np.round(self._tstart[myDelta < 0.1], 4)
        self._tstop[myDelta < 0.1] = np.round(self._tstop[myDelta < 0.1], 4)

        self._tstart[~(myDelta < 0.1)] = np.round(self._tstart[~(myDelta < 0.1)], 3)
        self._tstop[~(myDelta < 0.1)] = np.round(self._tstop[~(myDelta < 0.1)], 3)

        if fine:

            # Create a starting list of array indices.
            # We will dumb then ones that are not needed

            all_index = list(range(len(self._tstart)))

            # masks for all the different delta times and
            # the mid points for the different binnings
            temp1 = myDelta < 0.1
            temp2 = np.logical_and(myDelta > 0.1, myDelta < 1.0)
            temp3 = np.logical_and(myDelta > 1.0, myDelta < 2.0)
            temp4 = myDelta > 2.0
            midT1 = (self._tstart[temp1] + self._tstop[temp1]) / 2.0
            midT2 = (self._tstart[temp2] + self._tstop[temp2]) / 2.0
            midT3 = (self._tstart[temp3] + self._tstop[temp3]) / 2.0

            # Dump any index that occurs in a lower resolution
            # binning when a finer resolution covers the interval

            for indx in np.where(temp2)[0]:
                for x in midT1:
                    if self._tstart[indx] < x < self._tstop[indx]:
                        if indx in all_index:
                            all_index.remove(indx)

            for indx in np.where(temp3)[0]:
                for x in midT2:
                    if self._tstart[indx] < x < self._tstop[indx]:
                        if indx in all_index:
                            all_index.remove(indx)

            for indx in np.where(temp4)[0]:
                for x in midT3:
                    if self._tstart[indx] < x < self._tstop[indx]:
                        if indx in all_index:
                            all_index.remove(indx)

            all_index = np.array(all_index)
        else:

            # Just deal with the first level of fine data
            all_index = np.where(myDelta > 1.0)[0].tolist()

            temp1 = np.logical_and(myDelta > 1.0, myDelta < 2.0)
            temp2 = myDelta > 2.0
            midT1 = (self._tstart[temp1] + self._tstop[temp1]) / 2.0

            for indx in np.where(temp2)[0]:
                for x in midT1:
                    if self._tstart[indx] < x < self._tstop[indx]:

                        try:

                            all_index.remove(indx)

                        except:
                            pass

            all_index = np.array(all_index)

        # Now dump the indices we do not need
        self._tstart = self._tstart[all_index]
        self._tstop = self._tstop[all_index]
        self._qauts = self._qauts[all_index]
        self._sc_pos = self._sc_pos[all_index]
        self._rates = self._rates[all_index, :, :]

        # Now we need to sort because GBM may not have done this!

        sort_mask = np.argsort(self._tstart)
        self._tstart = self._tstart[sort_mask]
        self._tstop = self._tstop[sort_mask]
        self._qauts = self._qauts[sort_mask]
        self._sc_pos = self._sc_pos[sort_mask]
        self._rates = self._rates[sort_mask, :, :]

        self._time_intervals = TimeIntervalSet.from_starts_and_stops(
            self._tstart, self._tstop
        )

        # self._pos_interp = PositionInterpolator(trigdat=trigdat_file)

        self._create_timeseries()
예제 #29
0
    def set_active_time_intervals(self, *args):
        '''Set the time interval(s) to be used during the analysis.

        Specified as 'tmin-tmax'. Intervals are in seconds. Example:

        set_active_time_intervals("0.0-10.0")

        which will set the energy range 0-10. seconds.
        '''

        self._time_selection_exists = True

        interval_masks = []

        time_intervals = TimeIntervalSet.from_strings(*args)

        time_intervals.merge_intersecting_intervals(in_place=True)

        for interval in time_intervals:
            tmin = interval.start_time
            tmax = interval.stop_time

            mask = self._select_events(tmin, tmax)

            interval_masks.append(mask)

        self._time_intervals = time_intervals

        time_mask = interval_masks[0]
        if len(interval_masks) > 1:
            for mask in interval_masks[1:]:
                time_mask = np.logical_or(time_mask, mask)

        tmp_counts = []  # Temporary list to hold the total counts per chan

        for chan in range(self._first_channel,
                          self._n_channels + self._first_channel):
            channel_mask = self._measurement == chan
            counts_mask = np.logical_and(channel_mask, time_mask)
            total_counts = len(self._arrival_times[counts_mask])

            tmp_counts.append(total_counts)

        self._counts = np.array(tmp_counts)

        tmp_counts = []
        tmp_err = []  # Temporary list to hold the err counts per chan

        if self._poly_fit_exists:

            if not self._poly_fit_exists:
                raise RuntimeError(
                    'A polynomial fit to the channels does not exist!')

            for chan in range(self._n_channels):

                total_counts = 0
                counts_err = 0

                for tmin, tmax in zip(self._time_intervals.start_times,
                                      self._time_intervals.stop_times):
                    # Now integrate the appropriate background polynomial
                    total_counts += self._polynomials[chan].integral(
                        tmin, tmax)
                    counts_err += (self._polynomials[chan].integral_error(
                        tmin, tmax))**2

                tmp_counts.append(total_counts)

                tmp_err.append(np.sqrt(counts_err))

            self._poly_counts = np.array(tmp_counts)

            self._poly_count_err = np.array(tmp_err)

        # Dead time correction

        exposure = 0.
        total_dead_time = 0.
        for interval, imask in zip(self._time_intervals, interval_masks):
            exposure += interval.duration
            if self._dead_time_fraction is not None:
                total_dead_time += interval.duration * self._dead_time_fraction[
                    imask].mean()

        self._exposure = exposure - total_dead_time

        self._active_dead_time = total_dead_time
def test_merging_set_intervals():

    # test that non overlapping intervals
    # do not result in a merge

    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(5.0, 10.0)
    t3 = TimeInterval(15.0, 20.0)

    ts1 = TimeIntervalSet([t1, t2, t3])

    ts2 = ts1.merge_intersecting_intervals(in_place=False)

    assert len(ts2) == 3
    assert t1 == ts2[0]
    assert t2 == ts2[1]
    assert t3 == ts2[2]

    # end merge works

    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(5.0, 10.0)
    t3 = TimeInterval(7.0, 20.0)

    ts1 = TimeIntervalSet([t1, t2, t3])

    ts2 = ts1.merge_intersecting_intervals(in_place=False)

    assert len(ts2) == 2
    assert t1 == ts2[0]
    assert TimeInterval(5.0, 20.0) == ts2[1]

    # begin merge works

    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(-5.0, 10.0)
    t3 = TimeInterval(15, 20.0)

    ts1 = TimeIntervalSet([t1, t2, t3])

    ts2 = ts1.merge_intersecting_intervals(in_place=False)

    assert len(ts2) == 2
    assert TimeInterval(-10.0, 10.0) == ts2[0]
    assert TimeInterval(15.0, 20.0) == ts2[1]

    # middle merge works

    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(5.0, 10.0)
    t3 = TimeInterval(7.0, 20.0)
    t4 = TimeInterval(35.0, 40.0)

    ts1 = TimeIntervalSet([t1, t2, t3, t4])

    ts2 = ts1.merge_intersecting_intervals(in_place=False)

    assert len(ts2) == 3
    assert t1 == ts2[0]
    assert TimeInterval(5.0, 20.0) == ts2[1]
    assert t4 == ts2[2]

    # both end merge works

    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(-5.0, 10.0)
    t3 = TimeInterval(15.0, 20.0)
    t4 = TimeInterval(35.0, 45.0)
    t5 = TimeInterval(40.0, 50.0)

    ts1 = TimeIntervalSet([t1, t2, t3, t4, t5])

    ts2 = ts1.merge_intersecting_intervals(in_place=False)

    assert len(ts2) == 3
    assert TimeInterval(-10.0, 10.0) == ts2[0]
    assert t3 == ts2[1]
    assert TimeInterval(35.0, 50.0) == ts2[2]

    # multi merge works

    t1 = TimeInterval(-10.0, 0.0)
    t2 = TimeInterval(-5.0, 10.0)
    t3 = TimeInterval(7, 20.0)

    ts1 = TimeIntervalSet([t1, t2, t3])

    ts2 = ts1.merge_intersecting_intervals(in_place=False)

    assert len(ts2) == 1
    assert TimeInterval(-10.0, 20.0) == ts2[0]

    # complete overlap merge works

    t1 = TimeInterval(-10.0, 25.0)
    t2 = TimeInterval(-5.0, 10.0)
    t3 = TimeInterval(7, 20.0)

    ts1 = TimeIntervalSet([t1, t2, t3])

    ts2 = ts1.merge_intersecting_intervals(in_place=False)

    assert len(ts2) == 1
    assert TimeInterval(-10.0, 25.0) == ts2[0]

    # tests the inplace operation

    t1 = TimeInterval(-10.0, 25.0)
    t2 = TimeInterval(-5.0, 10.0)
    t3 = TimeInterval(7, 20.0)

    ts1 = TimeIntervalSet([t1, t2, t3])

    ts1.merge_intersecting_intervals(in_place=True)

    assert len(ts1) == 1
    assert TimeInterval(-10.0, 25.0) == ts1[0]
예제 #31
0
파일: response.py 프로젝트: giacomov/3ML
    def __init__(self, matrix_list, exposure_getter, counts_getter, reference_time=0.0):
        """

        :param matrix_list:
        :type matrix_list : list[InstrumentResponse]
        :param exposure_getter : a function returning the exposure between t1 and t2
        :param counts_getter : a function returning the number of counts between t1 and t2
        :param reference_time : a reference time to be added to the specifications of the intervals used in the
        weight_by_* methods. Use this if you want to express the time intervals in time units from the reference_time,
        instead of "absolute" time. For GRBs, this is the trigger time. NOTE: if you use a reference time, the
        counts_getter and the exposure_getter must accept times relative to the reference time.
        """

        # Store list of matrices

        self._matrix_list = list(matrix_list)  # type: list[InstrumentResponse]

        # Create the corresponding list of coverage intervals

        self._coverage_intervals = TimeIntervalSet(map(lambda x: x.coverage_interval,
                                                       self._matrix_list))

        # Make sure that all matrices have coverage interval set

        if None in self._coverage_intervals:

            raise NoCoverageIntervals("You need to specify the coverage interval for all matrices in the matrix_list")

        # Remove from the list matrices that cover intervals of zero duration (yes, the GBM publishes those too,
        # one example is in data/ogip_test_gbm_b0.rsp2)
        to_be_removed = []
        for i, interval in enumerate(self._coverage_intervals):

            if interval.duration == 0:

                # Remove it
                with custom_warnings.catch_warnings():

                    custom_warnings.simplefilter("always", RuntimeWarning)

                    custom_warnings.warn("Removing matrix %s (numbering starts at zero) because it has a coverage of "
                                         "zero seconds" % i, RuntimeWarning)

                to_be_removed.append(i)

        # Actually remove them
        if len(to_be_removed) > 0:

            [self._matrix_list.pop(index) for index in to_be_removed]
            [self._coverage_intervals.pop(index) for index in to_be_removed]

        # Order the matrices by time

        idx = self._coverage_intervals.argsort()

        # It is possible that there is only one coverage interval (these are published by GBM e.g. GRB090819607)
        # so we need to be sure that the array is a least 1D

        self._coverage_intervals = TimeIntervalSet(np.atleast_1d(itemgetter(*idx)(self._coverage_intervals)))
        self._matrix_list = np.atleast_1d(itemgetter(*idx)(self._matrix_list))
        # Now make sure that the coverage intervals are contiguous (i.e., there are no gaps)
        if not self._coverage_intervals.is_contiguous():

            raise NonContiguousCoverageIntervals("The provided responses have coverage intervals which are not contiguous!")

        # Apply the reference time shift, if any
        self._coverage_intervals -= reference_time

        # Store callable

        self._exposure_getter = exposure_getter  # type: callable

        self._counts_getter = counts_getter  # type: callable

        # Store reference time

        self._reference_time = float(reference_time)
예제 #32
0
    def set_polynomial_fit_interval(self, *time_intervals, **options):
        """Set the time interval to fit the background.
        Multiple intervals can be input as separate arguments
        Specified as 'tmin-tmax'. Intervals are in seconds. Example:

        set_polynomial_fit_interval("-10.0-0.0","10.-15.")

        :param time_intervals: intervals to fit on
        :param options:

        """

        # Find out if we want to binned or unbinned.
        # TODO: add the option to config file
        if 'unbinned' in options:
            unbinned = options.pop('unbinned')
            assert type(
                unbinned) == bool, 'unbinned option must be True or False'

        else:

            # assuming unbinned
            # could use config file here
            # unbinned = threeML_config['ogip']['use-unbinned-poly-fitting']

            unbinned = True

        # we create some time intervals

        poly_intervals = TimeIntervalSet.from_strings(*time_intervals)

        # adjust the selections to the data

        new_intervals = []

        self._poly_selected_counts = []

        self._poly_exposure = 0.

        for i, time_interval in enumerate(poly_intervals):

            t1 = time_interval.start_time
            t2 = time_interval.stop_time

            if (self._stop_time <= t1) or (t2 <= self._start_time):
                custom_warnings.warn(
                    "The time interval %f-%f is out side of the arrival times and will be dropped"
                    % (t1, t2))

            else:

                if t1 < self._start_time:
                    custom_warnings.warn(
                        "The time interval %f-%f started before the first arrival time (%f), so we are changing the intervals to %f-%f"
                        % (t1, t2, self._start_time, self._start_time, t2))

                    t1 = self._start_time  # + 1

                if t2 > self._stop_time:
                    custom_warnings.warn(
                        "The time interval %f-%f ended after the last arrival time (%f), so we are changing the intervals to %f-%f"
                        % (t1, t2, self._stop_time, t1, self._stop_time))

                    t2 = self._stop_time  # - 1.

                new_intervals.append('%f-%f' % (t1, t2))

                self._poly_selected_counts.append(
                    self.count_per_channel_over_interval(t1, t2))
                self._poly_exposure += self.exposure_over_interval(t1, t2)

        # make new intervals after checks

        poly_intervals = TimeIntervalSet.from_strings(*new_intervals)

        self._poly_selected_counts = np.sum(self._poly_selected_counts, axis=0)

        # set the poly intervals as an attribute

        self._poly_intervals = poly_intervals

        # Fit the events with the given intervals
        if unbinned:

            self._unbinned = True  # keep track!

            self._unbinned_fit_polynomials()

        else:

            self._unbinned = False

            self._fit_polynomials()

        # we have a fit now

        self._poly_fit_exists = True

        if self._verbose:
            print("%s %d-order polynomial fit with the %s method" %
                  (self._fit_method_info['bin type'],
                   self._optimal_polynomial_grade,
                   self._fit_method_info['fit method']))
            print('\n')

        # recalculate the selected counts

        if self._time_selection_exists:
            self.set_active_time_intervals(
                *self._time_intervals.to_string().split(','))
예제 #33
0
    def __init__(self,
                 pha_file_or_instance: Union[str, Path, PHAII],
                 file_type: str = "observed",
                 rsp_file: Optional[str] = None,
                 arf_file: Optional[str] = None):
        """
        A spectrum with dispersion build from an OGIP-compliant PHA FITS file. Both Type I & II files can be read. Type II
        spectra are selected either by specifying the spectrum_number or via the {spectrum_number} file name convention used
        in XSPEC. If the file_type is background, a 3ML InstrumentResponse or subclass must be passed so that the energy
        bounds can be obtained.


        :param pha_file_or_instance: either a PHA file name or threeML.plugins.OGIP.pha.PHAII instance
        :param spectrum_number: (optional) the spectrum number of the TypeII file to be used
        :param file_type: observed or background
        :param rsp_file: RMF filename or threeML.plugins.OGIP.response.InstrumentResponse instance
        :param arf_file: (optional) and ARF filename
        """

        # extract the spectrum number if needed

        for t in _valid_input_types:

            if isinstance(pha_file_or_instance, t):
                break

        else:

            log.error(
                f"Must provide a FITS file name or PHAII instance. Got {type(pha_file_or_instance)}"
            )

            raise RuntimeError()

        with fits.open(pha_file_or_instance) as f:

            try:

                HDUidx = f.index_of("SPECTRUM")

            except:

                raise RuntimeError("The input file %s is not in PHA format" %
                                   (pha_file_or_instance))

            spectrum = f[HDUidx]
            data = spectrum.data

            if "COUNTS" in data.columns.names:

                has_rates = False
                data_column_name = "COUNTS"

            elif "RATE" in data.columns.names:

                has_rates = True
                data_column_name = "RATE"

            else:

                log.error(
                    "This file does not contain a RATE nor a COUNTS column. "
                    "This is not a valid PHA file")

                raise RuntimeError()

                # Determine if this is a PHA I or PHA II
            if len(data.field(data_column_name).shape) == 2:

                num_spectra = data.field(data_column_name).shape[0]

            else:

                log.error("This appears to be a PHA I and not PHA II file")

                raise RuntimeError()

        pha_information = _read_pha_or_pha2_file(
            pha_file_or_instance,
            None,
            file_type,
            rsp_file,
            arf_file,
            treat_as_time_series=True,
        )

        # default the grouping to all open bins
        # this will only be altered if the spectrum is rebinned
        self._grouping = np.ones_like(pha_information["counts"])

        # this saves the extra properties to the class

        self._gathered_keywords = pha_information["gathered_keywords"]

        self._file_type = file_type

        # need to see if we have count errors, tstart, tstop
        # if not, we create an list of None

        if pha_information["count_errors"] is None:

            count_errors = [None] * num_spectra

        else:

            count_errors = pha_information["count_errors"]

        if pha_information["tstart"] is None:

            tstart = [None] * num_spectra

        else:

            tstart = pha_information["tstart"]

        if pha_information["tstop"] is None:

            tstop = [None] * num_spectra

        else:

            tstop = pha_information["tstop"]

        # now build the list of binned spectra

        list_of_binned_spectra = []

        for i in trange(num_spectra, desc="Loading PHAII Spectra"):

            list_of_binned_spectra.append(
                BinnedSpectrumWithDispersion(
                    counts=pha_information["counts"][i],
                    exposure=pha_information["exposure"][i, 0],
                    response=pha_information["rsp"],
                    count_errors=count_errors[i],
                    sys_errors=pha_information["sys_errors"][i],
                    is_poisson=pha_information["is_poisson"],
                    quality=pha_information["quality"].get_slice(i),
                    mission=pha_information["gathered_keywords"]["mission"],
                    instrument=pha_information["gathered_keywords"]
                    ["instrument"],
                    tstart=tstart[i],
                    tstop=tstop[i],
                ))

        # now get the time intervals

        _allowed_time_keys = (("TIME", "ENDTIME"), ("TSTART", "TSTOP"))

        for keys in _allowed_time_keys:

            try:

                start_times = data.field(keys[0])
                stop_times = data.field(keys[1])
                break

            except (KeyError):

                pass

        else:

            log.error(
                f"Could not find times in {pha_file_or_instance}. Tried: {_allowed_time_keys}"
            )

            raise RuntimeError()

        time_intervals = TimeIntervalSet.from_starts_and_stops(
            start_times, stop_times)

        reference_time = 0

        # see if there is a reference time in the file

        if "TRIGTIME" in spectrum.header:
            reference_time = spectrum.header["TRIGTIME"]

        for t_number in range(spectrum.header["TFIELDS"]):

            if "TZERO%d" % t_number in spectrum.header:
                reference_time = spectrum.header["TZERO%d" % t_number]

        super(PHASpectrumSet, self).__init__(
            list_of_binned_spectra,
            reference_time=reference_time,
            time_intervals=time_intervals,
        )
예제 #34
0
    def __init__(self,
                 pha_file_or_instance,
                 file_type='observed',
                 rsp_file=None,
                 arf_file=None):
        """
        A spectrum with dispersion build from an OGIP-compliant PHA FITS file. Both Type I & II files can be read. Type II
        spectra are selected either by specifying the spectrum_number or via the {spectrum_number} file name convention used
        in XSPEC. If the file_type is background, a 3ML InstrumentResponse or subclass must be passed so that the energy
        bounds can be obtained.


        :param pha_file_or_instance: either a PHA file name or threeML.plugins.OGIP.pha.PHAII instance
        :param spectrum_number: (optional) the spectrum number of the TypeII file to be used
        :param file_type: observed or background
        :param rsp_file: RMF filename or threeML.plugins.OGIP.response.InstrumentResponse instance
        :param arf_file: (optional) and ARF filename
        """

        # extract the spectrum number if needed

        assert isinstance(pha_file_or_instance, str) or isinstance(
            pha_file_or_instance,
            PHAII), 'Must provide a FITS file name or PHAII instance'

        with fits.open(pha_file_or_instance) as f:

            try:

                HDUidx = f.index_of("SPECTRUM")

            except:

                raise RuntimeError("The input file %s is not in PHA format" %
                                   (pha2_file))

            spectrum = f[HDUidx]
            data = spectrum.data

            if "COUNTS" in data.columns.names:

                has_rates = False
                data_column_name = "COUNTS"

            elif "RATE" in data.columns.names:

                has_rates = True
                data_column_name = "RATE"

            else:

                raise RuntimeError(
                    "This file does not contain a RATE nor a COUNTS column. "
                    "This is not a valid PHA file")

                # Determine if this is a PHA I or PHA II
            if len(data.field(data_column_name).shape) == 2:

                num_spectra = data.field(data_column_name).shape[0]

            else:

                raise RuntimeError(
                    "This appears to be a PHA I and not PHA II file")

        pha_information = _read_pha_or_pha2_file(pha_file_or_instance,
                                                 None,
                                                 file_type,
                                                 rsp_file,
                                                 arf_file,
                                                 treat_as_time_series=True)

        # default the grouping to all open bins
        # this will only be altered if the spectrum is rebinned
        self._grouping = np.ones_like(pha_information['counts'])

        # this saves the extra properties to the class

        self._gathered_keywords = pha_information['gathered_keywords']

        self._file_type = file_type

        # need to see if we have count errors, tstart, tstop
        # if not, we create an list of None

        if pha_information['count_errors'] is None:

            count_errors = [None] * num_spectra

        else:

            count_errors = pha_information['count_errors']

        if pha_information['tstart'] is None:

            tstart = [None] * num_spectra

        else:

            tstart = pha_information['tstart']

        if pha_information['tstop'] is None:

            tstop = [None] * num_spectra

        else:

            tstop = pha_information['tstop']

        # now build the list of binned spectra

        list_of_binned_spectra = []

        with progress_bar(num_spectra, title='Loading PHAII spectra') as p:
            for i in range(num_spectra):

                list_of_binned_spectra.append(
                    BinnedSpectrumWithDispersion(
                        counts=pha_information['counts'][i],
                        exposure=pha_information['exposure'][i, 0],
                        response=pha_information['rsp'],
                        count_errors=count_errors[i],
                        sys_errors=pha_information['sys_errors'][i],
                        is_poisson=pha_information['is_poisson'],
                        quality=pha_information['quality'].get_slice(i),
                        mission=pha_information['gathered_keywords']
                        ['mission'],
                        instrument=pha_information['gathered_keywords']
                        ['instrument'],
                        tstart=tstart[i],
                        tstop=tstop[i]))

                p.increase()

        # now get the time intervals

        start_times = data.field('TIME')
        stop_times = data.field('ENDTIME')

        time_intervals = TimeIntervalSet.from_starts_and_stops(
            start_times, stop_times)

        reference_time = 0

        # see if there is a reference time in the file

        if 'TRIGTIME' in spectrum.header:
            reference_time = spectrum.header['TRIGTIME']

        for t_number in range(spectrum.header['TFIELDS']):

            if 'TZERO%d' % t_number in spectrum.header:
                reference_time = spectrum.header['TZERO%d' % t_number]

        super(PHASpectrumSet, self).__init__(list_of_binned_spectra,
                                             reference_time=reference_time,
                                             time_intervals=time_intervals)
예제 #35
0
    def set_active_time_intervals(self, *args):
        """Set the time interval(s) to be used during the analysis.

        Specified as 'tmin-tmax'. Intervals are in seconds. Example:

        set_active_time_intervals("0.0-10.0")

        which will set the energy range 0-10. seconds.
        """
        self._time_selection_exists = True

        interval_masks = []

        time_intervals = TimeIntervalSet.from_strings(*args)

        time_intervals.merge_intersecting_intervals(in_place=True)

        for interval in time_intervals:
            tmin = interval.start_time
            tmax = interval.stop_time

            mask = self._select_events(tmin, tmax)

            interval_masks.append(mask)

        self._time_intervals = time_intervals

        time_mask = interval_masks[0]
        if len(interval_masks) > 1:
            for mask in interval_masks[1:]:
                time_mask = np.logical_or(time_mask, mask)

        # calulate exposure and deadtime
        exposure = 0
        dead_time = 0
        for interval in time_intervals:
            tmin = interval.start_time
            tmax = interval.stop_time
            this_exposure = self.exposure_over_interval(tmin, tmax)
            # check that the exposure is not larger than the total time
            if this_exposure > (tmax - tmin):
                log.error("The exposure in the active time bin is larger "
                          "than the total active time. "
                          "Something must be wrong!")
                raise RuntimeError()
            exposure += this_exposure
            dead_time += (tmax - tmin) - this_exposure

        self._exposure = exposure
        self._active_dead_time = dead_time

        tmp_counts = []  # Temporary list to hold the total counts per chan

        for chan in range(self._first_channel,
                          self._n_channels + self._first_channel):

            channel_mask = self._measurement == chan
            counts_mask = np.logical_and(channel_mask, time_mask)
            total_counts = len(self._arrival_times[counts_mask])

            tmp_counts.append(total_counts)

        self._counts = np.array(tmp_counts)

        tmp_counts = []
        tmp_err = []  # Temporary list to hold the err counts per chan

        if self._poly_fit_exists:

            if not self._poly_fit_exists:
                raise RuntimeError(
                    "A polynomial fit to the channels does not exist!")

            for chan in range(self._n_channels):

                total_counts = 0
                counts_err = 0

                for tmin, tmax in zip(self._time_intervals.start_times,
                                      self._time_intervals.stop_times):
                    # Now integrate the appropriate background polynomial
                    total_counts += self._polynomials[chan].integral(
                        tmin, tmax)
                    counts_err += (self._polynomials[chan].integral_error(
                        tmin, tmax))**2

                tmp_counts.append(total_counts)

                tmp_err.append(np.sqrt(counts_err))

            self._poly_counts = np.array(tmp_counts)

            self._poly_count_err = np.array(tmp_err)

            # apply the dead time correction to the background counts
            # and errors
            corr = self._exposure / (self._active_dead_time + self._exposure)

            self._poly_counts *= corr

            self._poly_count_err *= corr
예제 #36
0
    def __init__(self, pha_file_or_instance, file_type='observed',rsp_file=None, arf_file=None):
        """
        A spectrum with dispersion build from an OGIP-compliant PHA FITS file. Both Type I & II files can be read. Type II
        spectra are selected either by specifying the spectrum_number or via the {spectrum_number} file name convention used
        in XSPEC. If the file_type is background, a 3ML InstrumentResponse or subclass must be passed so that the energy
        bounds can be obtained.


        :param pha_file_or_instance: either a PHA file name or threeML.plugins.OGIP.pha.PHAII instance
        :param spectrum_number: (optional) the spectrum number of the TypeII file to be used
        :param file_type: observed or background
        :param rsp_file: RMF filename or threeML.plugins.OGIP.response.InstrumentResponse instance
        :param arf_file: (optional) and ARF filename
        """

        # extract the spectrum number if needed



        assert isinstance(pha_file_or_instance, str) or isinstance(pha_file_or_instance,
                                                                   PHAII), 'Must provide a FITS file name or PHAII instance'

        with fits.open(pha_file_or_instance) as f:

            try:

                HDUidx = f.index_of("SPECTRUM")

            except:

                raise RuntimeError("The input file %s is not in PHA format" % (pha2_file))

            spectrum = f[HDUidx]
            data = spectrum.data

            if "COUNTS" in data.columns.names:

                has_rates = False
                data_column_name = "COUNTS"

            elif "RATE" in data.columns.names:

                has_rates = True
                data_column_name = "RATE"

            else:

                raise RuntimeError("This file does not contain a RATE nor a COUNTS column. "
                                   "This is not a valid PHA file")

                # Determine if this is a PHA I or PHA II
            if len(data.field(data_column_name).shape) == 2:

                num_spectra = data.field(data_column_name).shape[0]

            else:

                raise RuntimeError("This appears to be a PHA I and not PHA II file")

        pha_information = _read_pha_or_pha2_file(pha_file_or_instance,
                                                 None,
                                                 file_type,
                                                 rsp_file,
                                                 arf_file,
                                                 treat_as_time_series=True)

        # default the grouping to all open bins
        # this will only be altered if the spectrum is rebinned
        self._grouping = np.ones_like(pha_information['counts'])

        # this saves the extra properties to the class

        self._gathered_keywords = pha_information['gathered_keywords']

        self._file_type = file_type


        # need to see if we have count errors, tstart, tstop
        # if not, we create an list of None

        if pha_information['count_errors'] is None:

            count_errors = [None]*num_spectra

        else:

            count_errors = pha_information['count_errors']

        if pha_information['tstart'] is None:

            tstart = [None] * num_spectra

        else:

            tstart = pha_information['tstart']

        if pha_information['tstop'] is None:

            tstop = [None] * num_spectra

        else:

            tstop = pha_information['tstop']


        # now build the list of binned spectra

        list_of_binned_spectra = []


        with progress_bar(num_spectra,title='Loading PHAII spectra') as p:
            for i in xrange(num_spectra):


                list_of_binned_spectra.append(BinnedSpectrumWithDispersion(counts=pha_information['counts'][i],
                                                                           exposure=pha_information['exposure'][i,0],
                                                                           response=pha_information['rsp'],
                                                                           count_errors=count_errors[i],
                                                                           sys_errors=pha_information['sys_errors'][i],
                                                                           is_poisson=pha_information['is_poisson'],
                                                                           quality=pha_information['quality'].get_slice(i),
                                                                           mission=pha_information['gathered_keywords']['mission'],
                                                                           instrument=pha_information['gathered_keywords']['instrument'],
                                                                           tstart=tstart[i],
                                                                           tstop=tstop[i]))

                p.increase()

        # now get the time intervals

        start_times = data.field('TIME')
        stop_times = data.field('ENDTIME')

        time_intervals = TimeIntervalSet.from_starts_and_stops(start_times, stop_times)

        reference_time = 0

        # see if there is a reference time in the file

        if 'TRIGTIME' in spectrum.header:
            reference_time = spectrum.header['TRIGTIME']

        for t_number in range(spectrum.header['TFIELDS']):

            if 'TZERO%d' % t_number in spectrum.header:
                reference_time = spectrum.header['TZERO%d' % t_number]

        super(PHASpectrumSet, self).__init__(list_of_binned_spectra,
                                             reference_time=reference_time,
                                             time_intervals=time_intervals)
예제 #37
0
class InstrumentResponseSet(object):
    """
    A set of responses

    """
    def __init__(self,
                 matrix_list: List[InstrumentResponse],
                 exposure_getter: Callable,
                 counts_getter: Callable,
                 reference_time: float = 0.0):
        """

        :param matrix_list:
        :type matrix_list : list[InstrumentResponse]
        :param exposure_getter : a function returning the exposure between t1 and t2
        :param counts_getter : a function returning the number of counts between t1 and t2
        :param reference_time : a reference time to be added to the specifications of the intervals used in the
        weight_by_* methods. Use this if you want to express the time intervals in time units from the reference_time,
        instead of "absolute" time. For GRBs, this is the trigger time. NOTE: if you use a reference time, the
        counts_getter and the exposure_getter must accept times relative to the reference time.
        """

        # Store list of matrices

        self._matrix_list: Union[List[InstrumentResponse],
                                 np.ndarray] = list(matrix_list)

        # Create the corresponding list of coverage intervals

        self._coverage_intervals: TimeIntervalSet = TimeIntervalSet(
            [x.coverage_interval for x in self._matrix_list])

        # Make sure that all matrices have coverage interval set

        if None in self._coverage_intervals:

            log.error(
                "You need to specify the coverage interval for all matrices in the matrix_list"
            )

            raise NoCoverageIntervals(
                "You need to specify the coverage interval for all matrices in the matrix_list"
            )

        # Remove from the list matrices that cover intervals of zero duration (yes, the GBM publishes those too,
        # one example is in data/ogip_test_gbm_b0.rsp2)
        to_be_removed = []
        for i, interval in enumerate(self._coverage_intervals):

            if interval.duration == 0:

                # Remove it
                with custom_warnings.catch_warnings():

                    custom_warnings.simplefilter("always", RuntimeWarning)

                    log.warning(
                        "Removing matrix %s (numbering starts at zero) because it has a coverage of "
                        "zero seconds" % i,
                        # RuntimeWarning,
                    )

                to_be_removed.append(i)

        # Actually remove them
        if len(to_be_removed) > 0:

            [self._matrix_list.pop(index) for index in to_be_removed]
            [self._coverage_intervals.pop(index) for index in to_be_removed]

        # Order the matrices by time

        idx = self._coverage_intervals.argsort()

        # It is possible that there is only one coverage interval (these are published by GBM e.g. GRB090819607)
        # so we need to be sure that the array is a least 1D

        self._coverage_intervals = TimeIntervalSet(
            np.atleast_1d(itemgetter(*idx)(self._coverage_intervals)))
        self._matrix_list = np.atleast_1d(itemgetter(*idx)(self._matrix_list))
        # Now make sure that the coverage intervals are contiguous (i.e., there are no gaps)
        if not self._coverage_intervals.is_contiguous():

            log.error(
                "The provided responses have coverage intervals which are not contiguous!"
            )

            raise NonContiguousCoverageIntervals()

        # Apply the reference time shift, if any
        self._coverage_intervals -= reference_time

        # Store callable

        self._exposure_getter: Callable = exposure_getter

        self._counts_getter: Callable = counts_getter

        # Store reference time

        self._reference_time: float = float(reference_time)

    @property
    def reference_time(self) -> float:

        return self._reference_time

    def __getitem__(self, item) -> InstrumentResponse:

        return self._matrix_list[item]

    def __len__(self) -> int:

        return len(self._matrix_list)

    @classmethod
    def from_rsp2_file(
        cls,
        rsp2_file: Union[str, Path],
        exposure_getter: Callable,
        counts_getter: Callable,
        reference_time: float = 0.0,
        half_shifted: bool = True,
    ) -> "InstrumentResponseSet":

        # This assumes the Fermi/GBM rsp2 file format

        # make the rsp file proper
        rsp_file: Path = sanitize_filename(rsp2_file)

        if not file_existing_and_readable(rsp_file):
            log.error("OGIPResponse file %s not existing or not readable" %
                      rsp_file)

            raise RuntimeError()

        # Will fill up the list of matrices
        list_of_matrices = []

        # Read the response
        with pyfits.open(rsp_file) as f:

            n_responses = f["PRIMARY"].header["DRM_NUM"]

            # we will read all the matrices and save them
            for rsp_number in range(1, n_responses + 1):

                this_response = OGIPResponse(
                    str(rsp2_file) + "{%i}" % rsp_number)

                list_of_matrices.append(this_response)

        if half_shifted:

            # Now the GBM format has a strange feature: the matrix, instead of covering from TSTART to TSTOP, covers
            # from (TSTART + TSTOP) / 2.0 of the previous matrix to the (TSTART + TSTOP) / 2.0 of itself.
            # So let's adjust the coverage intervals accordingly

            if len(list_of_matrices) > 1:

                for i, this_matrix in enumerate(list_of_matrices):

                    if i == 0:

                        # The first matrix covers from its TSTART to its half time

                        this_matrix._coverage_interval = TimeInterval(
                            this_matrix.coverage_interval.start_time,
                            this_matrix.coverage_interval.half_time,
                        )

                    else:

                        # Any other matrix covers from the half time of the previous matrix to its half time
                        # However, the previous matrix has been already processed, so we use its stop time which
                        # has already begun the half time of what it was before processing

                        prev_matrix = list_of_matrices[i - 1]

                        this_matrix._coverage_interval = TimeInterval(
                            prev_matrix.coverage_interval.stop_time,
                            this_matrix.coverage_interval.half_time,
                        )

        return InstrumentResponseSet(list_of_matrices, exposure_getter,
                                     counts_getter, reference_time)

    # I didn't re-implement this at the moment
    # def _display_response_weighting(self, weights, tstarts, tstops):
    #
    #     fig, ax = plt.subplots()
    #
    #     # plot the time intervals
    #
    #     ax.hlines(min(weights) - .1, tstarts, tstops, color='red', label='selected intervals')
    #
    #     ax.hlines(np.median(weights), self._true_rsp_intervals[0], self._true_rsp_intervals[1], color='green',
    #               label='true rsp intervals')
    #
    #     ax.hlines(max(self._weight) + .1, self._matrix_start, self._matrix_stop, color='blue',
    #               label='rsp header intervals')
    #
    #     mean_true_rsp_time = np.mean(self._true_rsp_intervals.T, axis=1)
    #
    #     ax.plot(mean_true_rsp_time, self._weight, '+k', label='weight')

    def weight_by_exposure(self, *intervals) -> InstrumentResponse:

        return self._get_weighted_matrix("exposure", *intervals)

    def weight_by_counts(self, *intervals) -> InstrumentResponse:

        return self._get_weighted_matrix("counts", *intervals)

    def _get_weighted_matrix(self, switch: str,
                             *intervals) -> InstrumentResponse:

        if not len(intervals) > 0:

            log.error("You have to provide at least one interval")

            raise RuntimeError()

        intervals_set = TimeIntervalSet.from_strings(*intervals)

        # Compute a set of weights for each interval
        weights = np.zeros(len(self._matrix_list))

        for interval in intervals_set:

            weights += self._weight_response(interval, switch)

        # Normalize to 1
        weights /= np.sum(weights)

        # Weight matrices
        matrix = np.dot(
            np.array(list(map(attrgetter("matrix"), self._matrix_list))).T,
            weights.T).T

        # Now generate the instance of the response

        # get EBOUNDS from the first matrix
        ebounds = self._matrix_list[0].ebounds

        # Get mc channels from the first matrix
        mc_channels = self._matrix_list[0].monte_carlo_energies

        matrix_instance = InstrumentResponse(matrix, ebounds, mc_channels)

        return matrix_instance

    def _weight_response(self, interval_of_interest: TimeInterval,
                         switch: str) -> np.ndarray:
        """

        :param interval_start : start time of the interval
        :param interval_stop : stop time of the interval
        :param switch: either 'counts' or 'exposure'

        """

        #######################
        # NOTE: the weights computed here are *not* normalized to one so that they can be combined if there is
        # more than one interval
        #######################

        # Now mark all responses which overlap with the interval of interest
        # NOTE: this is a mask of the same length as _matrix_list and _coverage_intervals

        matrices_mask = [
            c_i.overlaps_with(interval_of_interest)
            for c_i in self._coverage_intervals
        ]

        # Check that we have at least one matrix

        if not np.any(matrices_mask):

            log.error(
                "Could not find any matrix applicable to %s\n Have intervals:%s"
                % (
                    interval_of_interest,
                    ", ".join([
                        str(interval) for interval in self._coverage_intervals
                    ]),
                ))

            raise NoMatrixForInterval()

        # Compute the weights

        weights = np.empty_like(self._matrix_list, float)

        # These "effective intervals" are how much of the coverage interval is really used for each matrix
        # NOTE: the length of effective_intervals list *will not be* the same as the weight mask or the matrix_list.
        # There are as many effective intervals as matrices with weight > 0

        effective_intervals = []

        for i, matrix in enumerate(self._matrix_list):

            if matrices_mask[i]:

                # A matrix of interest
                this_coverage_interval = self._coverage_intervals[i]

                # See how much it overlaps with the interval of interest
                this_effective_interval = this_coverage_interval.intersect(
                    interval_of_interest)

                effective_intervals.append(this_effective_interval)

                # Now compute the weight

                if switch == "counts":

                    # Weight according to the number of events
                    weights[i] = self._counts_getter(
                        this_effective_interval.start_time,
                        this_effective_interval.stop_time,
                    )

                elif switch == "exposure":

                    # Weight according to the exposure
                    weights[i] = self._exposure_getter(
                        this_effective_interval.start_time,
                        this_effective_interval.stop_time,
                    )

            else:

                # Uninteresting matrix
                weights[i] = 0.0

        # if all weights are zero, there is something clearly wrong with the exposure or the counts computation
        if not np.sum(weights) > 0:

            log.error(
                "All weights are zero. There must be a bug in the exposure or counts computation"
            )

            raise RuntimeError()

        # Check that the first matrix with weight > 0 has an effective interval starting at the beginning of
        # the interval of interest (otherwise it means that part of the interval of interest is not covered!)

        if effective_intervals[0].start_time != interval_of_interest.start_time:

            log.error("The interval of interest (%s) is not covered by %s" %
                      (interval_of_interest, effective_intervals[0]))

            raise IntervalOfInterestNotCovered()

        # Check that the last matrix with weight > 0 has an effective interval starting at the beginning of
        # the interval of interest (otherwise it means that part of the interval of interest is not covered!)

        if effective_intervals[-1].stop_time != interval_of_interest.stop_time:

            log.error("The interval of interest (%s) is not covered by %s" %
                      (interval_of_interest, effective_intervals[0]))

            raise IntervalOfInterestNotCovered()

        # Lastly, check that there is no interruption in coverage (bad time intervals are *not* supported)
        all_tstarts = np.array([x.start_time for x in effective_intervals])
        all_tstops = np.array([x.stop_time for x in effective_intervals])

        if not np.all((all_tstops[:-1] == all_tstarts[1:])):

            log.error("Gap in coverage! Bad time intervals are not supported!")

            raise GapInCoverageIntervals()

        return weights

    @property
    def ebounds(self) -> np.ndarray:

        return self._matrix_list[0].ebounds

    @property
    def monte_carlo_energies(self) -> np.ndarray:

        return self._matrix_list[0].monte_carlo_energies
예제 #38
0
파일: response.py 프로젝트: giacomov/3ML
class InstrumentResponseSet(object):
    """
    A set of responses

    """
    def __init__(self, matrix_list, exposure_getter, counts_getter, reference_time=0.0):
        """

        :param matrix_list:
        :type matrix_list : list[InstrumentResponse]
        :param exposure_getter : a function returning the exposure between t1 and t2
        :param counts_getter : a function returning the number of counts between t1 and t2
        :param reference_time : a reference time to be added to the specifications of the intervals used in the
        weight_by_* methods. Use this if you want to express the time intervals in time units from the reference_time,
        instead of "absolute" time. For GRBs, this is the trigger time. NOTE: if you use a reference time, the
        counts_getter and the exposure_getter must accept times relative to the reference time.
        """

        # Store list of matrices

        self._matrix_list = list(matrix_list)  # type: list[InstrumentResponse]

        # Create the corresponding list of coverage intervals

        self._coverage_intervals = TimeIntervalSet(map(lambda x: x.coverage_interval,
                                                       self._matrix_list))

        # Make sure that all matrices have coverage interval set

        if None in self._coverage_intervals:

            raise NoCoverageIntervals("You need to specify the coverage interval for all matrices in the matrix_list")

        # Remove from the list matrices that cover intervals of zero duration (yes, the GBM publishes those too,
        # one example is in data/ogip_test_gbm_b0.rsp2)
        to_be_removed = []
        for i, interval in enumerate(self._coverage_intervals):

            if interval.duration == 0:

                # Remove it
                with custom_warnings.catch_warnings():

                    custom_warnings.simplefilter("always", RuntimeWarning)

                    custom_warnings.warn("Removing matrix %s (numbering starts at zero) because it has a coverage of "
                                         "zero seconds" % i, RuntimeWarning)

                to_be_removed.append(i)

        # Actually remove them
        if len(to_be_removed) > 0:

            [self._matrix_list.pop(index) for index in to_be_removed]
            [self._coverage_intervals.pop(index) for index in to_be_removed]

        # Order the matrices by time

        idx = self._coverage_intervals.argsort()

        # It is possible that there is only one coverage interval (these are published by GBM e.g. GRB090819607)
        # so we need to be sure that the array is a least 1D

        self._coverage_intervals = TimeIntervalSet(np.atleast_1d(itemgetter(*idx)(self._coverage_intervals)))
        self._matrix_list = np.atleast_1d(itemgetter(*idx)(self._matrix_list))
        # Now make sure that the coverage intervals are contiguous (i.e., there are no gaps)
        if not self._coverage_intervals.is_contiguous():

            raise NonContiguousCoverageIntervals("The provided responses have coverage intervals which are not contiguous!")

        # Apply the reference time shift, if any
        self._coverage_intervals -= reference_time

        # Store callable

        self._exposure_getter = exposure_getter  # type: callable

        self._counts_getter = counts_getter  # type: callable

        # Store reference time

        self._reference_time = float(reference_time)

    @property
    def reference_time(self):

        return self._reference_time

    def __getitem__(self, item):

        return self._matrix_list[item]

    def __len__(self):

        return len(self._matrix_list)

    @classmethod
    def from_rsp2_file(cls, rsp2_file, exposure_getter, counts_getter, reference_time=0.0, half_shifted=True):

        # This assumes the Fermi/GBM rsp2 file format

        # make the rsp file proper
        rsp_file = sanitize_filename(rsp2_file)

        assert file_existing_and_readable(rsp_file), "OGIPResponse file %s not existing or not readable" % rsp_file

        # Will fill up the list of matrices
        list_of_matrices = []

        # Read the response
        with pyfits.open(rsp_file) as f:

            n_responses = f['PRIMARY'].header['DRM_NUM']

            # we will read all the matrices and save them
            for rsp_number in range(1, n_responses + 1):

                this_response = OGIPResponse(rsp2_file + '{%i}' % rsp_number)

                list_of_matrices.append(this_response)

        if half_shifted:

            # Now the GBM format has a strange feature: the matrix, instead of covering from TSTART to TSTOP, covers
            # from (TSTART + TSTOP) / 2.0 of the previous matrix to the (TSTART + TSTOP) / 2.0 of itself.
            # So let's adjust the coverage intervals accordingly

            if len(list_of_matrices) > 1:

                for i, this_matrix in enumerate(list_of_matrices):

                    if i == 0:

                        # The first matrix covers from its TSTART to its half time

                        this_matrix._coverage_interval = TimeInterval(this_matrix.coverage_interval.start_time,
                                                                      this_matrix.coverage_interval.half_time)

                    else:

                        # Any other matrix covers from the half time of the previous matrix to its half time
                        # However, the previous matrix has been already processed, so we use its stop time which
                        # has already begun the half time of what it was before processing

                        prev_matrix = list_of_matrices[i-1]

                        this_matrix._coverage_interval = TimeInterval(prev_matrix.coverage_interval.stop_time,
                                                                      this_matrix.coverage_interval.half_time)


        return InstrumentResponseSet(list_of_matrices, exposure_getter, counts_getter, reference_time)

    # I didn't re-implement this at the moment
    # def _display_response_weighting(self, weights, tstarts, tstops):
    #
    #     fig, ax = plt.subplots()
    #
    #     # plot the time intervals
    #
    #     ax.hlines(min(weights) - .1, tstarts, tstops, color='red', label='selected intervals')
    #
    #     ax.hlines(np.median(weights), self._true_rsp_intervals[0], self._true_rsp_intervals[1], color='green',
    #               label='true rsp intervals')
    #
    #     ax.hlines(max(self._weight) + .1, self._matrix_start, self._matrix_stop, color='blue',
    #               label='rsp header intervals')
    #
    #     mean_true_rsp_time = np.mean(self._true_rsp_intervals.T, axis=1)
    #
    #     ax.plot(mean_true_rsp_time, self._weight, '+k', label='weight')

    def weight_by_exposure(self, *intervals):

        return self._get_weighted_matrix("exposure", *intervals)

    def weight_by_counts(self, *intervals):

        return self._get_weighted_matrix("counts", *intervals)

    def _get_weighted_matrix(self, switch, *intervals):

        assert len(intervals) > 0, "You have to provide at least one interval"

        intervals_set = TimeIntervalSet.from_strings(*intervals)

        # Compute a set of weights for each interval
        weights = np.zeros(len(self._matrix_list))

        for interval in intervals_set:

            weights += self._weight_response(interval, switch)

        # Normalize to 1
        weights /= np.sum(weights)

        # Weight matrices
        matrix = np.dot(np.array(map(attrgetter("matrix"), self._matrix_list)).T, weights.T).T

        # Now generate the instance of the response

        # get EBOUNDS from the first matrix
        ebounds = self._matrix_list[0].ebounds

        # Get mc channels from the first matrix
        mc_channels = self._matrix_list[0].monte_carlo_energies

        matrix_instance = InstrumentResponse(matrix, ebounds, mc_channels)

        return matrix_instance

    def _weight_response(self, interval_of_interest, switch):

        """

        :param interval_start : start time of the interval
        :param interval_stop : stop time of the interval
        :param switch: either 'counts' or 'exposure'

        """

        #######################
        # NOTE: the weights computed here are *not* normalized to one so that they can be combined if there is
        # more than one interval
        #######################

        # Now mark all responses which overlap with the interval of interest
        # NOTE: this is a mask of the same length as _matrix_list and _coverage_intervals

        matrices_mask = map(lambda c_i: c_i.overlaps_with(interval_of_interest), self._coverage_intervals)

        # Check that we have at least one matrix

        if not np.any(matrices_mask):

            raise NoMatrixForInterval("Could not find any matrix applicable to %s\n Have intervals:%s" % (interval_of_interest,', '.join([str(interval) for interval in self._coverage_intervals]) ))

        # Compute the weights

        weights = np.empty_like(self._matrix_list, float)

        # These "effective intervals" are how much of the coverage interval is really used for each matrix
        # NOTE: the length of effective_intervals list *will not be* the same as the weight mask or the matrix_list.
        # There are as many effective intervals as matrices with weight > 0

        effective_intervals = []

        for i, matrix in enumerate(self._matrix_list):

            if matrices_mask[i]:

                # A matrix of interest
                this_coverage_interval = self._coverage_intervals[i]

                # See how much it overlaps with the interval of interest
                this_effective_interval = this_coverage_interval.intersect(interval_of_interest)

                effective_intervals.append(this_effective_interval)

                # Now compute the weight

                if switch == 'counts':

                    # Weight according to the number of events
                    weights[i] = self._counts_getter(this_effective_interval.start_time,
                                                     this_effective_interval.stop_time)

                elif switch == 'exposure':

                    # Weight according to the exposure
                    weights[i] = self._exposure_getter(this_effective_interval.start_time,
                                                       this_effective_interval.stop_time)

            else:

                # Uninteresting matrix
                weights[i] = 0.0

        # if all weights are zero, there is something clearly wrong with the exposure or the counts computation
        assert np.sum(weights) > 0, "All weights are zero. There must be a bug in the exposure or counts computation"

        # Check that the first matrix with weight > 0 has an effective interval starting at the beginning of
        # the interval of interest (otherwise it means that part of the interval of interest is not covered!)

        if effective_intervals[0].start_time != interval_of_interest.start_time:

            raise IntervalOfInterestNotCovered('The interval of interest (%s) is not covered by %s'% (interval_of_interest,effective_intervals[0]))

        # Check that the last matrix with weight > 0 has an effective interval starting at the beginning of
        # the interval of interest (otherwise it means that part of the interval of interest is not covered!)

        if effective_intervals[-1].stop_time != interval_of_interest.stop_time:
            raise IntervalOfInterestNotCovered(
                'The interval of interest (%s) is not covered by %s' % (interval_of_interest, effective_intervals[0]))


        # Lastly, check that there is no interruption in coverage (bad time intervals are *not* supported)
        all_tstarts = np.array(map(lambda x:x.start_time, effective_intervals))
        all_tstops = np.array(map(lambda x:x.stop_time, effective_intervals))

        if not np.all((all_tstops[:-1] == all_tstarts[1:])):

            raise GapInCoverageIntervals("Gap in coverage! Bad time intervals are not supported!")



        return weights

    @property
    def ebounds(self):

        return self._matrix_list[0].ebounds

    @property
    def monte_carlo_energies(self):

        return self._matrix_list[0].monte_carlo_energies
예제 #39
0
    def __init__(self,
                 matrix_list: List[InstrumentResponse],
                 exposure_getter: Callable,
                 counts_getter: Callable,
                 reference_time: float = 0.0):
        """

        :param matrix_list:
        :type matrix_list : list[InstrumentResponse]
        :param exposure_getter : a function returning the exposure between t1 and t2
        :param counts_getter : a function returning the number of counts between t1 and t2
        :param reference_time : a reference time to be added to the specifications of the intervals used in the
        weight_by_* methods. Use this if you want to express the time intervals in time units from the reference_time,
        instead of "absolute" time. For GRBs, this is the trigger time. NOTE: if you use a reference time, the
        counts_getter and the exposure_getter must accept times relative to the reference time.
        """

        # Store list of matrices

        self._matrix_list: Union[List[InstrumentResponse],
                                 np.ndarray] = list(matrix_list)

        # Create the corresponding list of coverage intervals

        self._coverage_intervals: TimeIntervalSet = TimeIntervalSet(
            [x.coverage_interval for x in self._matrix_list])

        # Make sure that all matrices have coverage interval set

        if None in self._coverage_intervals:

            log.error(
                "You need to specify the coverage interval for all matrices in the matrix_list"
            )

            raise NoCoverageIntervals(
                "You need to specify the coverage interval for all matrices in the matrix_list"
            )

        # Remove from the list matrices that cover intervals of zero duration (yes, the GBM publishes those too,
        # one example is in data/ogip_test_gbm_b0.rsp2)
        to_be_removed = []
        for i, interval in enumerate(self._coverage_intervals):

            if interval.duration == 0:

                # Remove it
                with custom_warnings.catch_warnings():

                    custom_warnings.simplefilter("always", RuntimeWarning)

                    log.warning(
                        "Removing matrix %s (numbering starts at zero) because it has a coverage of "
                        "zero seconds" % i,
                        # RuntimeWarning,
                    )

                to_be_removed.append(i)

        # Actually remove them
        if len(to_be_removed) > 0:

            [self._matrix_list.pop(index) for index in to_be_removed]
            [self._coverage_intervals.pop(index) for index in to_be_removed]

        # Order the matrices by time

        idx = self._coverage_intervals.argsort()

        # It is possible that there is only one coverage interval (these are published by GBM e.g. GRB090819607)
        # so we need to be sure that the array is a least 1D

        self._coverage_intervals = TimeIntervalSet(
            np.atleast_1d(itemgetter(*idx)(self._coverage_intervals)))
        self._matrix_list = np.atleast_1d(itemgetter(*idx)(self._matrix_list))
        # Now make sure that the coverage intervals are contiguous (i.e., there are no gaps)
        if not self._coverage_intervals.is_contiguous():

            log.error(
                "The provided responses have coverage intervals which are not contiguous!"
            )

            raise NonContiguousCoverageIntervals()

        # Apply the reference time shift, if any
        self._coverage_intervals -= reference_time

        # Store callable

        self._exposure_getter: Callable = exposure_getter

        self._counts_getter: Callable = counts_getter

        # Store reference time

        self._reference_time: float = float(reference_time)
예제 #40
0
파일: time_series.py 프로젝트: giacomov/3ML
    def restore_fit(self, filename):


        filename_sanitized = sanitize_filename(filename)

        with HDFStore(filename_sanitized) as store:

            coefficients = store['coefficients']

            covariance = store['covariance']

            self._polynomials = []

            # create new polynomials

            for i in range(len(coefficients)):
                coeff = np.array(coefficients.loc[i])

                # make sure we get the right order
                # pandas stores the non-needed coeff
                # as nans.

                coeff = coeff[np.isfinite(coeff)]

                cov = covariance.loc[i]

                self._polynomials.append(Polynomial.from_previous_fit(coeff, cov))

            metadata = store.get_storer('coefficients').attrs.metadata

            self._optimal_polynomial_grade = metadata['poly_order']
            poly_selections = np.array(metadata['poly_selections'])

            self._poly_intervals = TimeIntervalSet.from_starts_and_stops(poly_selections[:, 0], poly_selections[:, 1])
            self._unbinned = metadata['unbinned']

            if self._unbinned:
                self._fit_method_info['bin type'] = 'unbinned'

            else:

                self._fit_method_info['bin type'] = 'binned'

            self._fit_method_info['fit method'] = metadata['fit_method']

        # go thru and count the counts!

        self._poly_fit_exists = True


        # we must go thru and collect the polynomial exposure and counts
        # so that they be extracted if needed
        self._poly_exposure = 0.
        self._poly_selected_counts = []
        for i, time_interval in enumerate(self._poly_intervals):

            t1 = time_interval.start_time
            t2 = time_interval.stop_time

            self._poly_selected_counts.append(self.count_per_channel_over_interval(t1,t2))
            self._poly_exposure += self.exposure_over_interval(t1,t2)

        self._poly_selected_counts = np.sum(self._poly_selected_counts, axis=0)
        if self._time_selection_exists:
            self.set_active_time_intervals(*self._time_intervals.to_string().split(','))
예제 #41
0
    def restore_fit(self, filename):

        filename_sanitized = sanitize_filename(filename)

        with HDFStore(filename_sanitized) as store:

            coefficients = store['coefficients']

            covariance = store['covariance']

            self._polynomials = []

            # create new polynomials

            for i in range(len(coefficients)):
                coeff = np.array(coefficients.loc[i])

                # make sure we get the right order
                # pandas stores the non-needed coeff
                # as nans.

                coeff = coeff[np.isfinite(coeff)]

                cov = covariance.loc[i]

                self._polynomials.append(
                    Polynomial.from_previous_fit(coeff, cov))

            metadata = store.get_storer('coefficients').attrs.metadata

            self._optimal_polynomial_grade = metadata['poly_order']
            poly_selections = np.array(metadata['poly_selections'])

            self._poly_intervals = TimeIntervalSet.from_starts_and_stops(
                poly_selections[:, 0], poly_selections[:, 1])
            self._unbinned = metadata['unbinned']

            if self._unbinned:
                self._fit_method_info['bin type'] = 'unbinned'

            else:

                self._fit_method_info['bin type'] = 'binned'

            self._fit_method_info['fit method'] = metadata['fit_method']

        # go thru and count the counts!

        self._poly_fit_exists = True

        # we must go thru and collect the polynomial exposure and counts
        # so that they be extracted if needed
        self._poly_exposure = 0.
        self._poly_selected_counts = []
        for i, time_interval in enumerate(self._poly_intervals):

            t1 = time_interval.start_time
            t2 = time_interval.stop_time

            self._poly_selected_counts.append(
                self.count_per_channel_over_interval(t1, t2))
            self._poly_exposure += self.exposure_over_interval(t1, t2)

        self._poly_selected_counts = np.sum(self._poly_selected_counts, axis=0)
        if self._time_selection_exists:
            self.set_active_time_intervals(
                *self._time_intervals.to_string().split(','))
예제 #42
0
    def set_active_time_intervals(self, *args):
        """
        Set the time interval(s) to be used during the analysis.
        Specified as 'tmin-tmax'. Intervals are in seconds. Example:

        set_active_time_intervals("0.0-10.0")

        which will set the time range 0-10. seconds.
        """

        # mark that we now have a time selection

        self._time_selection_exists = True

        # lets build a time interval set from the selections
        # and then merge intersecting intervals

        time_intervals = TimeIntervalSet.from_strings(*args)
        time_intervals.merge_intersecting_intervals(in_place=True)

        # lets adjust the time intervals to the actual ones since they are prebinned

        time_intervals = self._adjust_to_true_intervals(time_intervals)


        # start out with no time bins selection
        all_idx = np.zeros(len(self._binned_spectrum_set.time_intervals),dtype=bool)

        # now we need to sum up the counts and total time

        total_time = 0

        for interval in time_intervals:

            # the select bins method is called.
            # since we are sure that the interval bounds
            # are aligned with the true ones, we do not care if
            # it is inner or outer

            all_idx = np.logical_or(all_idx,self._select_bins(interval.start_time,interval.stop_time))

            total_time += interval.duration

        # sum along the time axis
        self._counts = self._binned_spectrum_set.counts_per_bin[all_idx].sum(axis=0)


        # the selected time intervals

        self._time_intervals = time_intervals


        tmp_counts = []
        tmp_err = []  # Temporary list to hold the err counts per chan

        if self._poly_fit_exists:

            if not self._poly_fit_exists:
                raise RuntimeError('A polynomial fit to the channels does not exist!')

            for chan in range(self._n_channels):

                total_counts = 0
                counts_err = 0

                for tmin, tmax in zip(self._time_intervals.start_times, self._time_intervals.stop_times):
                    # Now integrate the appropriate background polynomial
                    total_counts += self._polynomials[chan].integral(tmin, tmax)
                    counts_err += (self._polynomials[chan].integral_error(tmin, tmax)) ** 2

                tmp_counts.append(total_counts)

                tmp_err.append(np.sqrt(counts_err))

            self._poly_counts = np.array(tmp_counts)

            self._poly_count_err = np.array(tmp_err)


        self._exposure = self._binned_spectrum_set.exposure_per_bin[all_idx].sum()

        self._active_dead_time = total_time - self._exposure