def test_instrument_response_constructor(): # Make a fake test matrix matrix, mc_energies, ebounds = get_matrix_elements() rsp = InstrumentResponse(matrix, ebounds, mc_energies) assert np.all(rsp.matrix == matrix) assert np.all(rsp.ebounds == ebounds) assert np.all(rsp.monte_carlo_energies == mc_energies) # Now with coverage interval with pytest.raises(AssertionError): _ = InstrumentResponse(matrix, ebounds, mc_energies, "10-20") rsp = InstrumentResponse(matrix, ebounds, mc_energies, TimeInterval(10.0, 20.0)) assert rsp.rsp_filename is None assert rsp.arf_filename is None assert rsp.coverage_interval == TimeInterval(10.0, 20.0) # Check that we do not accept nans in the matrix matrix[2, 2] = np.nan with pytest.raises(AssertionError): _ = InstrumentResponse(matrix, ebounds, mc_energies, "10-20")
def test_response_set_constructor(): [rsp_aw, rsp_bw], exposure_getter, counts_getter = get_matrix_set_elements() with pytest.raises(RuntimeError): # This should raise because there is no time information for the matrices _ = InstrumentResponseSet([rsp_aw, rsp_bw], exposure_getter, counts_getter) # Add the time information ( [rsp_a, rsp_b], exposure_getter, counts_getter, ) = get_matrix_set_elements_with_coverage() # This should work now rsp_set = InstrumentResponseSet([rsp_a, rsp_b], exposure_getter, counts_getter) assert rsp_set[0] == rsp_a assert rsp_set[1] == rsp_b # Check that the constructor order the matrices by time when needed # This should work now rsp_set = InstrumentResponseSet([rsp_b, rsp_a], exposure_getter, counts_getter) assert rsp_set[0] == rsp_a assert rsp_set[1] == rsp_b # Now test construction from the .from_rsp2 method rsp2_file = get_path_of_data_file("ogip_test_gbm_b0.rsp2") with warnings.catch_warnings(): warnings.simplefilter("error", np.VisibleDeprecationWarning) rsp_set = InstrumentResponseSet.from_rsp2_file(rsp2_file, exposure_getter, counts_getter) assert len(rsp_set) == 3 # Now test that we cannot initialize a response set with matrices which have non-contiguous coverage intervals matrix, mc_energies, ebounds = get_matrix_elements() rsp_c = InstrumentResponse(matrix, ebounds, mc_energies, TimeInterval(0.0, 10.0)) rsp_d = InstrumentResponse(matrix, ebounds, mc_energies, TimeInterval(20.0, 30.0)) with pytest.raises(RuntimeError): _ = InstrumentResponseSet([rsp_c, rsp_d], exposure_getter, counts_getter)
def get_matrix_set_elements(): matrix, mc_energies, ebounds = get_matrix_elements() rsp_a = InstrumentResponse(matrix, ebounds, mc_energies) # Make another matrix with the same matrix but divided by 2 other_matrix = matrix / 2.0 rsp_b = InstrumentResponse(other_matrix, ebounds, mc_energies) # Remember: the second matrix is like the first one divided by two, and it covers twice as much time. # They cover 0-10 s the first one, and 10-30 the second one. # Fake an exposure getter by using a fixed 10% deadtime livetime_fraction = 0.9 exposure_getter = lambda t1, t2: livetime_fraction * (t2 - t1) # Fake a count getter law = lambda x: 1.23 * x # The counts getter is the integral of the law counts_getter = (lambda t1, t2: 1.23 * 0.5 * (t2**2.0 - t1**2.0) * livetime_fraction) return [rsp_a, rsp_b], exposure_getter, counts_getter
def test_instrument_response_plot_response(): matrix, mc_energies, ebounds = get_matrix_elements() rsp = InstrumentResponse(matrix, ebounds, mc_energies) rsp.plot_matrix()
def test__instrument_response_energy_to_channel(): matrix, mc_energies, ebounds = get_matrix_elements() rsp = InstrumentResponse(matrix, ebounds, mc_energies) assert rsp.energy_to_channel(1.5) == 0 assert rsp.energy_to_channel(2.6) == 1 assert rsp.energy_to_channel(4.75) == 2 assert rsp.energy_to_channel(100.0) == 3
def to_3ML_response_direct_sat_coord(self, az, el): self.set_location_direct_sat_coord(az, el) response = InstrumentResponse(self.matrix, self.ebounds, self.monte_carlo_energies) return response
def to_3ML_response(self, ra, dec): self.set_location(ra, dec, use_numba=True) response = InstrumentResponse(self.matrix, self.ebounds, self.monte_carlo_energies) return response
def test_instrument_response_set_function_and_convolve(): # A very basic test. More tests will be made against XSpec later matrix, mc_energies, ebounds = get_matrix_elements() rsp = InstrumentResponse(matrix, ebounds, mc_energies) # Integral of a constant, so we know easily what the output should be integral_function = lambda e1, e2: e2 - e1 rsp.set_function(integral_function) folded_counts = rsp.convolve() assert np.all(folded_counts == [1.0, 2.0, 3.0])
def test_response_write_to_fits1(): matrix, mc_energies, ebounds = get_matrix_elements() rsp = InstrumentResponse(matrix, ebounds, mc_energies) temp_file = "__test.rsp" rsp.to_fits(temp_file, "TEST", "TEST", overwrite=True) # Now check that reloading gives back the same matrix rsp_reloaded = OGIPResponse(temp_file) assert np.allclose(rsp_reloaded.matrix, rsp.matrix) assert np.allclose(rsp_reloaded.ebounds, rsp.ebounds) assert np.allclose(rsp_reloaded.monte_carlo_energies, rsp.monte_carlo_energies) os.remove(temp_file)
def __init__(self, polar_root_file, reference_time=0., rsp_file=None): """ container class that converts raw POLAR root data into useful python variables :param polar_root_file: path to polar event file :param reference_time: reference time of the events (tunix?) :param rsp_file: path to rsp file """ # open the event file with open_ROOT_file(polar_root_file) as f: tmp = tree_to_ndarray(f.Get('polar_out')) # extract the pedestal corrected ADC channels # which are non-integer and possibly # less than zero pha = tmp['Energy'] # non-zero ADC channels are invalid idx = pha >= 0 pha = pha[idx] # get the dead time fraction self._dead_time_fraction = tmp['dead_ratio'][idx] # get the arrival time, in tunix of the events self._time = tmp['tunix'][idx] - reference_time # digitize the ADC channels into bins # these bins are preliminary with open_ROOT_file(rsp_file) as f: matrix = th2_to_arrays(f.Get('rsp'))[-1] ebounds = th2_to_arrays(f.Get('EM_bounds'))[-1] mc_low = th2_to_arrays(f.Get('ER_low'))[-1] mc_high = th2_to_arrays(f.Get('ER_high'))[-1] mc_energies = np.append(mc_low, mc_high[-1]) # build the POLAR response self._rsp = InstrumentResponse(matrix=matrix, ebounds=ebounds, monte_carlo_energies=mc_energies) # bin the ADC channels self._binned_pha = np.digitize(pha, ebounds)
def test_instrument_response_replace_matrix(): matrix, mc_energies, ebounds = get_matrix_elements() rsp = InstrumentResponse(matrix, ebounds, mc_energies) new_matrix = matrix / 2.0 rsp.replace_matrix(new_matrix) assert np.all(rsp.matrix == new_matrix) with pytest.raises(AssertionError): rsp.replace_matrix(np.random.uniform(0, 1, 100).reshape(10, 10))
def __init__(self, name, time_series, response=None, poly_order=-1, unbinned=True, verbose=True, restore_poly_fit=None, container_type=BinnedSpectrumWithDispersion): """ Class for handling generic time series data including binned and event list series. Depending on the data, this class builds either a SpectrumLike or DisperisonSpectrumLike plugin For specific instruments, use the TimeSeries.from() classmethods :param name: name for the plugin :param time_series: a TimeSeries instance :param response: options InstrumentResponse instance :param poly_order: the polynomial order to use for background fitting :param unbinned: if the background should be fit unbinned :param verbose: the verbosity switch :param restore_poly_fit: file from which to read a prefitted background """ assert isinstance(time_series, TimeSeries), "must be a TimeSeries instance" assert issubclass(container_type,Histogram), 'must be a subclass of Histogram' self._name = name self._container_type = container_type self._time_series = time_series # type: TimeSeries # make sure we have a proper response if response is not None: assert isinstance(response, InstrumentResponse) or isinstance(response, InstrumentResponseSet) or isinstance(response, str), 'Response must be an instance of InstrumentResponse' # deal with RSP weighting if need be if isinstance(response, InstrumentResponseSet): # we have a weighted response self._rsp_is_weighted = True self._weighted_rsp = response # just get a dummy response for the moment # it will be corrected when we set the interval self._response = InstrumentResponse.create_dummy_response(response.ebounds, response.monte_carlo_energies) else: self._rsp_is_weighted = False self._weighted_rsp = None self._response = response self._verbose = verbose self._active_interval = None self._observed_spectrum = None self._background_spectrum = None self._measured_background_spectrum = None self._time_series.poly_order = poly_order self._default_unbinned = unbinned # try and restore the poly fit if requested if restore_poly_fit is not None: if file_existing_and_readable(restore_poly_fit): self._time_series.restore_fit(restore_poly_fit) if verbose: print('Successfully restored fit from %s'%restore_poly_fit) else: custom_warnings.warn( "Could not find saved background %s." % restore_poly_fit)
def test_response_against_xspec(): # Make a response and write to a FITS OGIP file matrix, mc_energies, ebounds = get_matrix_elements() rsp = InstrumentResponse(matrix, ebounds, mc_energies) temp_file = "__test.rsp" rsp.to_fits(temp_file, "TEST", "TEST", overwrite=True) # Test for various photon indexes for index in np.linspace(-2.0, 2.0, 10): if index == 1.0: # This would make the integral of the power law different, so let's just # skip it continue # First reset xspec xspec.AllData.clear() # Create a model in XSpec mo = xspec.Model("po") # Change the default value for the photon index # (remember that in XSpec the definition of the powerlaw is norm * E^(-PhoIndex), # so PhoIndex is positive normally. This is the opposite of astromodels. mo.powerlaw.PhoIndex = index mo.powerlaw.norm = 12.2 # Now repeat the same in 3ML # Generate the astromodels function and set it to the same values as the XSpec power law # (the pivot in XSpec is set to 1). Remember also that the definition in xspec has the # sign of the photon index opposite powerlaw = Powerlaw() powerlaw.piv = 1.0 powerlaw.index = -mo.powerlaw.PhoIndex.values[0] powerlaw.K = mo.powerlaw.norm.values[0] # Exploit the fact that the power law integral is analytic powerlaw_integral = Powerlaw() powerlaw_integral.K._transformation = None powerlaw_integral.K.bounds = (None, None) powerlaw_integral.index = powerlaw.index.value + 1 powerlaw_integral.K = old_div(powerlaw.K.value, (powerlaw.index.value + 1)) integral_function = lambda e1, e2: powerlaw_integral(e2) - powerlaw_integral(e1) # Now check that the two convoluted model give the same number of counts in each channel # Fake a spectrum so we can actually compute the convoluted model # Get path of response file fs1 = xspec.FakeitSettings( temp_file, exposure=1.0, fileName="_fake_spectrum.pha" ) xspec.AllData.fakeit(noWrite=True, applyStats=False, settings=fs1) # Get the expected counts xspec_counts = mo.folded(1) # Now get the convolution from 3ML rsp.set_function(integral_function) threeML_counts = rsp.convolve() # Compare them assert np.allclose(xspec_counts, threeML_counts) os.remove(temp_file)
def __init__(self, polar_hdf5_file, polar_hdf5_response=None, reference_time=0.): """ container class that converts raw POLAR HDF5 data into useful python variables This can build both the polarization and spectral data :param polar_root_file: path to polar event file :param reference_time: reference time of the events (tunix?) :param rsp_file: path to rsp file """ with h5py.File(polar_hdf5_file, 'r') as f: # This gets the spectral response rsp_grp = f['rsp'] matrix = rsp_grp['matrix'].value ebounds = rsp_grp['ebounds'].value mc_low = rsp_grp['mc_low'].value mc_high = rsp_grp['mc_high'].value # open the event file # extract the pedestal corrected ADC channels # which are non-integer and possibly # less than zero pha = f['energy'].value # non-zero ADC channels are invalid idx = pha >= 0 #pha = pha[idx] idx2 = (pha <= ebounds.max()) & (pha >= ebounds.min()) pha = pha[idx2 & idx] # get the dead time fraction self._dead_time_fraction = (f['dead_ratio'].value)[idx & idx2] # get the arrival time, in tunix of the events self._time = (f['time'].value)[idx & idx2] - reference_time # digitize the ADC channels into bins # these bins are preliminary # now do the scattering angles scattering_angles = f['scatter_angle'].value # clear the bad scattering angles idx = scattering_angles != -1 self._scattering_angle_time = (f['time'].value)[idx] - reference_time self._scattering_angle_dead_time_fraction = (f['dead_ratio'].value)[idx] self._scattering_angles = scattering_angles[idx] # build the POLAR response mc_energies = np.append(mc_low, mc_high[-1]) self._rsp = InstrumentResponse(matrix=matrix, ebounds=ebounds, monte_carlo_energies=mc_energies) # bin the ADC channels self._binned_pha = np.digitize(pha, ebounds) # bin the scattering_angles if polar_hdf5_response is not None: with h5py.File(polar_hdf5_response, 'r') as f: scatter_bounds = f['bins'].value self._scattering_bins = scatter_bounds self._binned_scattering_angles = np.digitize(self._scattering_angles, scatter_bounds) else: self._scattering_bins = None self._binned_scattering_angles = None
def __init__(self, name, time_series, response=None, poly_order=-1, unbinned=True, verbose=True, restore_poly_fit=None, container_type=BinnedSpectrumWithDispersion): """ Class for handling generic time series data including binned and event list series. Depending on the data, this class builds either a SpectrumLike or DisperisonSpectrumLike plugin For specific instruments, use the TimeSeries.from() classmethods :param name: name for the plugin :param time_series: a TimeSeries instance :param response: options InstrumentResponse instance :param poly_order: the polynomial order to use for background fitting :param unbinned: if the background should be fit unbinned :param verbose: the verbosity switch :param restore_poly_fit: file from which to read a prefitted background """ assert isinstance(time_series, TimeSeries), "must be a TimeSeries instance" assert issubclass(container_type, Histogram), 'must be a subclass of Histogram' self._name = name self._container_type = container_type self._time_series = time_series # type: TimeSeries # make sure we have a proper response if response is not None: assert isinstance(response, InstrumentResponse) or isinstance( response, InstrumentResponseSet) or isinstance( response, str), 'Response must be an instance of InstrumentResponse' # deal with RSP weighting if need be if isinstance(response, InstrumentResponseSet): # we have a weighted response self._rsp_is_weighted = True self._weighted_rsp = response # just get a dummy response for the moment # it will be corrected when we set the interval self._response = InstrumentResponse.create_dummy_response( response.ebounds, response.monte_carlo_energies) else: self._rsp_is_weighted = False self._weighted_rsp = None self._response = response self._verbose = verbose self._active_interval = None self._observed_spectrum = None self._background_spectrum = None self._measured_background_spectrum = None self._time_series.poly_order = poly_order self._default_unbinned = unbinned # try and restore the poly fit if requested if restore_poly_fit is not None: if file_existing_and_readable(restore_poly_fit): self._time_series.restore_fit(restore_poly_fit) if verbose: print('Successfully restored fit from %s' % restore_poly_fit) else: custom_warnings.warn("Could not find saved background %s." % restore_poly_fit)
def test_response_against_xspec(): # Make a response and write to a FITS OGIP file matrix, mc_energies, ebounds = get_matrix_elements() rsp = InstrumentResponse(matrix, ebounds, mc_energies) temp_file = "__test.rsp" rsp.to_fits(temp_file, "TEST", "TEST", overwrite=True) # Test for various photon indexes for index in np.linspace(-2.0, 2.0, 10): if index == 1.0: # This would make the integral of the power law different, so let's just # skip it continue # First reset xspec xspec.AllData.clear() # Create a model in XSpec mo = xspec.Model("po") # Change the default value for the photon index # (remember that in XSpec the definition of the powerlaw is norm * E^(-PhoIndex), # so PhoIndex is positive normally. This is the opposite of astromodels. mo.powerlaw.PhoIndex = index mo.powerlaw.norm = 12.2 # Now repeat the same in 3ML # Generate the astromodels function and set it to the same values as the XSpec power law # (the pivot in XSpec is set to 1). Remember also that the definition in xspec has the # sign of the photon index opposite powerlaw = Powerlaw() powerlaw.piv = 1.0 powerlaw.index = -mo.powerlaw.PhoIndex.values[0] powerlaw.K = mo.powerlaw.norm.values[0] # Exploit the fact that the power law integral is analytic powerlaw_integral = Powerlaw() powerlaw_integral.K._transformation = None powerlaw_integral.K.bounds = (None, None) powerlaw_integral.index = powerlaw.index.value + 1 powerlaw_integral.K = powerlaw.K.value / (powerlaw.index.value + 1) integral_function = lambda e1, e2: powerlaw_integral(e2) - powerlaw_integral(e1) # Now check that the two convoluted model give the same number of counts in each channel # Fake a spectrum so we can actually compute the convoluted model # Get path of response file fs1 = xspec.FakeitSettings(temp_file, exposure=1.0, fileName="_fake_spectrum.pha") xspec.AllData.fakeit(noWrite=True, applyStats=False, settings=fs1) # Get the expected counts xspec_counts = mo.folded(1) # Now get the convolution from 3ML rsp.set_function(integral_function) threeML_counts = rsp.convolve() # Compare them assert np.allclose(xspec_counts, threeML_counts) os.remove(temp_file)