Пример #1
0
def test_file_check(test_directory, test_file):

    assert not file_existing_and_readable(test_directory)

    assert not file_existing_and_readable("this_does_not_exist")

    assert file_existing_and_readable(test_file)
Пример #2
0
    def __init__(self, name, maptree, response, n_transits=None, **kwargs):

        # This controls if the likeHAWC class should load the entire
        # map or just a small disc around a source (faster).
        # Default is the latter, which is way faster. LIFF will decide
        # autonomously which ROI to use depending on the source model

        self.fullsky = False

        if 'fullsky' in kwargs.keys():
            self.fullsky = bool(kwargs['fullsky'])

        self.name = str(name)

        # Sanitize files in input (expand variables and so on)

        self.maptree = os.path.abspath(sanitize_filename(maptree))

        self.response = os.path.abspath(sanitize_filename(response))

        # Check that they exists and can be read

        if not file_existing_and_readable(self.maptree):
            raise IOError("MapTree %s does not exist or is not readable" % maptree)

        if not file_existing_and_readable(self.response):
            raise IOError("Response %s does not exist or is not readable" % response)

        # Post-pone the creation of the LIFF instance to when
        # we have the likelihood model

        self.instanced = False

        # Number of transits
        if n_transits is not None:

            self._n_transits = float(n_transits)

        else:

            self._n_transits = None

            # Default value for minChannel and maxChannel

        self.minChannel = int(defaultMinChannel)
        self.maxChannel = int(defaultMaxChannel)

        # By default the fit of the CommonNorm is deactivated

        self.deactivateCommonNorm()

        # This is to keep track of whether the user defined a ROI or not

        self.roi_ra = None

        # Further setup

        self.__setup()
Пример #3
0
def build_filter_library():

    if not file_existing_and_readable(os.path.join(get_speclite_filter_path(),'filter_lib.yml')):

        print ('Downloading optical filters. This will take a while.\n')

        if internet_connection_is_active():

            filter_dict={}

            filter_dict = download_SVO_filters(filter_dict)

            filter_dict = download_grond(filter_dict)

            # Ok, finally, we want to keep track of the SVO filters we have
            # so we will save this to a YAML file for future reference
            with open(os.path.join(get_speclite_filter_path(),'filter_lib.yml'), 'w') as f:

                yaml.safe_dump(filter_dict, f, default_flow_style=False)

            return True

        else:

            print ("You do not have the 3ML filter library and you do not have an active internet connection.")
            print ("Please connect to the internet to use the 3ML filter library.")
            print ("pyspeclite filter library is still available.")

            return False

    else:


        return True
Пример #4
0
def build_filter_library():

    if not file_existing_and_readable(os.path.join(get_speclite_filter_path(),'filter_lib.yml')):

        print ('Downloading optical filters. This will take a while.\n')

        if internet_connection_is_active():

            filter_dict={}

            filter_dict = download_SVO_filters(filter_dict)

            filter_dict = download_grond(filter_dict)

            # Ok, finally, we want to keep track of the SVO filters we have
            # so we will save this to a YAML file for future reference
            with open(os.path.join(get_speclite_filter_path(),'filter_lib.yml'), 'w') as f:

                yaml.safe_dump(filter_dict, f, default_flow_style=False)

            return True

        else:

            print ("You do not have the 3ML filter library and you do not have an active internet connection.")
            print ("Please connect to the internet to use the 3ML filter library.")
            print ("pyspeclite filter library is still available.")

            return False

    else:


        return True
Пример #5
0
    def _read_arf_file(self, arf_file):
        """
        read an arf file and apply it to the current_matrix

        :param arf_file:
        :param current_matrix:
        :param current_mc_channels:
        :return:
        """

        arf_file = sanitize_filename(arf_file)

        self._arf_file = arf_file

        assert file_existing_and_readable(arf_file.split("{")[0]), "Ancillary file %s not existing or not " \
                                                                   "readable" % arf_file

        with pyfits.open(arf_file) as f:

            data = f['SPECRESP'].data

        arf = data.field('SPECRESP')

        # Check that arf and rmf have same dimensions

        if arf.shape[0] != self.matrix.shape[1]:
            raise IOError(
                "The ARF and the RMF file does not have the same number of channels"
            )

        # Check that the ENERG_LO and ENERG_HI for the RMF and the ARF
        # are the same

        energ_lo = data.field("ENERG_LO")
        energ_hi = data.field("ENERG_HI")

        assert self._are_contiguous(
            energ_lo,
            energ_hi), "Monte carlo energies in ARF are not contiguous!"

        arf_mc_channels = np.append(energ_lo, [energ_hi[-1]])

        # Declare the mc channels different if they differ by more than 1%

        idx = (self.monte_carlo_energies > 0)

        diff = old_div((self.monte_carlo_energies[idx] - arf_mc_channels[idx]),
                       self.monte_carlo_energies[idx])

        if diff.max() > 0.01:
            raise IOError(
                "The ARF and the RMF have one or more MC channels which differ by more than 1%"
            )

        # Multiply ARF and RMF

        matrix = self.matrix * arf

        # Override the matrix with the one multiplied by the arf
        self.replace_matrix(matrix)
Пример #6
0
    def from_rsp2_file(cls, rsp2_file, exposure_getter, counts_getter, reference_time=0.0, half_shifted=True):

        # This assumes the Fermi/GBM rsp2 file format

        # make the rsp file proper
        rsp_file = sanitize_filename(rsp2_file)

        assert file_existing_and_readable(rsp_file), "OGIPResponse file %s not existing or not readable" % rsp_file

        # Will fill up the list of matrices
        list_of_matrices = []

        # Read the response
        with pyfits.open(rsp_file) as f:

            n_responses = f['PRIMARY'].header['DRM_NUM']

            # we will read all the matrices and save them
            for rsp_number in range(1, n_responses + 1):

                this_response = OGIPResponse(rsp2_file + '{%i}' % rsp_number)

                list_of_matrices.append(this_response)

        if half_shifted:

            # Now the GBM format has a strange feature: the matrix, instead of covering from TSTART to TSTOP, covers
            # from (TSTART + TSTOP) / 2.0 of the previous matrix to the (TSTART + TSTOP) / 2.0 of itself.
            # So let's adjust the coverage intervals accordingly

            if len(list_of_matrices) > 1:

                for i, this_matrix in enumerate(list_of_matrices):

                    if i == 0:

                        # The first matrix covers from its TSTART to its half time

                        this_matrix._coverage_interval = TimeInterval(this_matrix.coverage_interval.start_time,
                                                                      this_matrix.coverage_interval.half_time)

                    else:

                        # Any other matrix covers from the half time of the previous matrix to its half time
                        # However, the previous matrix has been already processed, so we use its stop time which
                        # has already begun the half time of what it was before processing

                        prev_matrix = list_of_matrices[i-1]

                        this_matrix._coverage_interval = TimeInterval(prev_matrix.coverage_interval.stop_time,
                                                                      this_matrix.coverage_interval.half_time)


        return InstrumentResponseSet(list_of_matrices, exposure_getter, counts_getter, reference_time)
Пример #7
0
    def _read_arf_file(self, arf_file):
        """
        read an arf file and apply it to the current_matrix

        :param arf_file:
        :param current_matrix:
        :param current_mc_channels:
        :return:
        """

        arf_file = sanitize_filename(arf_file)

        self._arf_file = arf_file

        assert file_existing_and_readable(arf_file.split("{")[0]), "Ancillary file %s not existing or not " \
                                                                   "readable" % arf_file

        with pyfits.open(arf_file) as f:

            data = f['SPECRESP'].data

        arf = data.field('SPECRESP')

        # Check that arf and rmf have same dimensions

        if arf.shape[0] != self.matrix.shape[1]:
            raise IOError("The ARF and the RMF file does not have the same number of channels")

        # Check that the ENERG_LO and ENERG_HI for the RMF and the ARF
        # are the same

        energ_lo = data.field("ENERG_LO")
        energ_hi = data.field("ENERG_HI")

        assert self._are_contiguous(energ_lo, energ_hi), "Monte carlo energies in ARF are not contiguous!"

        arf_mc_channels = np.append(energ_lo, [energ_hi[-1]])

        # Declare the mc channels different if they differ by more than 1%

        idx = (self.monte_carlo_energies > 0)

        diff = (self.monte_carlo_energies[idx] - arf_mc_channels[idx]) / self.monte_carlo_energies[idx]

        if diff.max() > 0.01:
            raise IOError("The ARF and the RMF have one or more MC channels which differ by more than 1%")

        # Multiply ARF and RMF

        matrix = self.matrix * arf

        # Override the matrix with the one multiplied by the arf
        self.replace_matrix(matrix)
Пример #8
0
    def download(
        self,
        remote_filename,
        destination_path,
        new_filename=None,
        progress=True,
        compress=False,
    ):

        assert remote_filename in self.files, (
            "File %s is not contained in this directory (%s)" %
            (remote_filename, self._request_result.url))

        destination_path = sanitize_filename(destination_path, abspath=True)

        assert path_exists_and_is_directory(destination_path), (
            "Provided destination does not exist or "
            "is not a directory" % destination_path)

        # If no filename is specified, use the same name that the file has on the remote server

        if new_filename is None:
            new_filename = remote_filename.split("/")[-1]

        # Get the fully qualified path for the remote and the local file

        remote_path = self._request_result.url + remote_filename
        local_path = os.path.join(destination_path, new_filename)

        # Ask the server for the file, but do not download it just yet
        # (stream=True will get the HTTP header but nothing else)
        # Use stream=True for two reasons:
        # * so that the file is not downloaded all in memory before being written to the disk
        # * so that we can report progress is requested

        this_request = requests.get(remote_path, stream=True)

        # Figure out the size of the file

        file_size = int(this_request.headers["Content-Length"])

        # Now check if we really need to download this file

        if compress:
            # Add a .gz at the end of the file path

            local_path += ".gz"

        if file_existing_and_readable(local_path):

            local_size = os.path.getsize(local_path)

            if local_size == file_size or compress:
                # if the compressed file already exists
                # it will have a smaller size

                # No need to download it again
                return local_path

        # Chunk size shouldn't bee too small otherwise we are causing a bottleneck in the download speed
        chunk_size = 1024 * 10

        # If the user wants to compress the file, use gzip, otherwise the normal opener
        if compress:

            import gzip

            opener = gzip.open

        else:

            opener = open

        if progress:

            # Set a title for the progress bar
            bar_title = "Downloading %s" % new_filename

            with progress_bar(file_size,
                              scale=1024 * 1024,
                              units="Mb",
                              title=bar_title) as bar:  # type: ProgressBarBase

                with opener(local_path, "wb") as f:

                    for chunk in this_request.iter_content(
                            chunk_size=chunk_size):

                        if chunk:  # filter out keep-alive new chunks

                            f.write(chunk)
                            bar.increase(len(chunk))

            this_request.close()

        else:

            with opener(local_path, "wb") as f:

                for chunk in this_request.iter_content(chunk_size=chunk_size):

                    if chunk:  # filter out keep-alive new chunks

                        f.write(chunk)

            this_request.close()

        return local_path
Пример #9
0
def add_svo_filter_to_speclite(observatory, instrument, ffilter, update=False):
    """
    download an SVO filter file and then add it to the user library
    :param observatory:
    :param instrument:
    :param ffilter:
    :return:
    """

    # make a directory for this observatory and instrument

    filter_path = os.path.join(get_speclite_filter_path(), to_valid_python_name(observatory))

    if_directory_not_existing_then_make(filter_path)


    # grab the filter file from SVO

    # reconvert 2MASS so we can grab it



    if observatory == 'TwoMASS':
        observatory = '2MASS'



    if not file_existing_and_readable(os.path.join(filter_path,
                                                   "%s-%s.ecsv"%(to_valid_python_name(instrument),
                                                                             to_valid_python_name(ffilter)))) or update:

        url_response = urllib2.urlopen(
            'http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?PhotCalID=%s/%s.%s/AB' % (observatory,
                                                                                           instrument,
                                                                                           ffilter))
        # now parse it
        data = votable.parse_single_table(url_response).to_table()

        # save the waveunit

        waveunit = data['Wavelength'].unit


        # the filter files are masked arrays, which do not go to zero on
        # the boundaries. This confuses speclite and will throw an error.
        # so we add a zero on the boundaries

        if data['Transmission'][0] != 0.:

            w1 = data['Wavelength'][0] * .9
            data.insert_row(0, [w1, 0])

        if data['Transmission'][-1] != 0.:

            w2 = data['Wavelength'][-1]* 1.1
            data.add_row([w2, 0])


        # filter any negative values

        idx = data['Transmission'] < 0
        data['Transmission'][idx] = 0





        # build the transmission. # we will force all the wavelengths
        # to Angstroms because sometimes AA is misunderstood

        try:

            transmission = spec_filter.FilterResponse(
                wavelength=data['Wavelength'] * waveunit.to('Angstrom') * u.Angstrom,
                response=data['Transmission'],
                meta=dict(group_name=to_valid_python_name(instrument),
                          band_name=to_valid_python_name(ffilter)))


            # save the filter

            transmission.save(filter_path)

            success = True

        except(ValueError):

            success = False

            print('%s:%s:%s has an invalid wave table, SKIPPING'% (observatory, instrument, ffilter))

        return success

    else:

        return True
    def __init__(self,
                 name,
                 time_series,
                 response=None,
                 poly_order=-1,
                 unbinned=True,
                 verbose=True,
                 restore_poly_fit=None,
                 container_type=BinnedSpectrumWithDispersion):
        """
        Class for handling generic time series data including binned and event list
        series. Depending on the data, this class builds either a  SpectrumLike or
        DisperisonSpectrumLike plugin

        For specific instruments, use the TimeSeries.from() classmethods


        :param name: name for the plugin
        :param time_series: a TimeSeries instance
        :param response: options InstrumentResponse instance
        :param poly_order: the polynomial order to use for background fitting
        :param unbinned: if the background should be fit unbinned
        :param verbose: the verbosity switch
        :param restore_poly_fit: file from which to read a prefitted background
        """

        assert isinstance(time_series,
                          TimeSeries), "must be a TimeSeries instance"

        assert issubclass(container_type,
                          Histogram), 'must be a subclass of Histogram'

        self._name = name

        self._container_type = container_type

        self._time_series = time_series  # type: TimeSeries

        # make sure we have a proper response

        if response is not None:
            assert isinstance(response, InstrumentResponse) or isinstance(
                response, InstrumentResponseSet) or isinstance(
                    response,
                    str), 'Response must be an instance of InstrumentResponse'

        # deal with RSP weighting if need be

        if isinstance(response, InstrumentResponseSet):

            # we have a weighted response
            self._rsp_is_weighted = True
            self._weighted_rsp = response

            # just get a dummy response for the moment
            # it will be corrected when we set the interval

            self._response = InstrumentResponse.create_dummy_response(
                response.ebounds, response.monte_carlo_energies)

        else:

            self._rsp_is_weighted = False
            self._weighted_rsp = None

            self._response = response

        self._verbose = verbose
        self._active_interval = None
        self._observed_spectrum = None
        self._background_spectrum = None
        self._measured_background_spectrum = None

        self._time_series.poly_order = poly_order

        self._default_unbinned = unbinned

        # try and restore the poly fit if requested

        if restore_poly_fit is not None:

            if file_existing_and_readable(restore_poly_fit):
                self._time_series.restore_fit(restore_poly_fit)

                if verbose:
                    print('Successfully restored fit from %s' %
                          restore_poly_fit)

            else:

                custom_warnings.warn("Could not find saved background %s." %
                                     restore_poly_fit)
Пример #11
0
    def download(self, remote_filename, destination_path, new_filename=None, progress=True, compress=False):

        assert remote_filename in self.files, "File %s is not contained in this directory (%s)" % (remote_filename,
                                                                                                   self._request_result.url)

        destination_path = sanitize_filename(destination_path, abspath=True)

        assert path_exists_and_is_directory(destination_path), "Provided destination does not exist or " \
                                                               "is not a directory" % destination_path

        # If no filename is specified, use the same name that the file has on the remote server

        if new_filename is None:
            new_filename = remote_filename.split("/")[-1]

        # Get the fully qualified path for the remote and the local file

        remote_path = self._request_result.url + remote_filename
        local_path = os.path.join(destination_path, new_filename)

        # Ask the server for the file, but do not download it just yet
        # (stream=True will get the HTTP header but nothing else)
        # Use stream=True for two reasons:
        # * so that the file is not downloaded all in memory before being written to the disk
        # * so that we can report progress is requested

        this_request = requests.get(remote_path, stream=True)

        # Figure out the size of the file

        file_size = int(this_request.headers['Content-Length'])

        # Now check if we really need to download this file

        if compress:
            # Add a .gz at the end of the file path

            local_path += '.gz'

        if file_existing_and_readable(local_path):

            local_size = os.path.getsize(local_path)

            if local_size == file_size or compress:
                # if the compressed file already exists
                # it will have a smaller size

                # No need to download it again
                return local_path

        # Chunk size shouldn't bee too small otherwise we are causing a bottleneck in the download speed
        chunk_size = 1024 * 10

        # If the user wants to compress the file, use gzip, otherwise the normal opener
        if compress:

            import gzip

            opener = gzip.open




        else:

            opener = open

        if progress:

            # Set a title for the progress bar
            bar_title = "Downloading %s" % new_filename

            with progress_bar(file_size, scale=1024 * 1024, units='Mb',
                              title=bar_title) as bar:  # type: ProgressBarBase

                with opener(local_path, 'wb') as f:

                    for chunk in this_request.iter_content(chunk_size=chunk_size):

                        if chunk:  # filter out keep-alive new chunks

                            f.write(chunk)
                            bar.increase(len(chunk))

            this_request.close()

        else:

            with opener(local_path, 'wb') as f:

                for chunk in this_request.iter_content(chunk_size=chunk_size):

                    if chunk:  # filter out keep-alive new chunks

                        f.write(chunk)

            this_request.close()

        return local_path
Пример #12
0
def get_heasarc_table_as_pandas(heasarc_table_name, update=False, cache_time_days=1):
    """
    Obtain a a VO table from the HEASARC archives and return it as a pandas table indexed
    by object/trigger names. The heasarc_table_name values are the ones referenced at:

    https://heasarc.gsfc.nasa.gov/docs/archive/vo/

    In order to speed up the processing of the tables, 3ML can cache the XML table in a cache
    that is updated every cache_time_days. The cache can be forced to update, i.e, reload from
    the web, by setting update to True.


    :param heasarc_table_name: the name of a HEASARC browse table
    :param update: force web read of the table and update cache
    :param cache_time_days: number of days to hold the current cache
    :return: pandas DataFrame with results and astropy table
    """

    # make sure the table is a string

    assert type(heasarc_table_name) is str

    # point to the cache directory and create it if it is not existing

    cache_directory = os.path.join(os.path.expanduser('~'), '.threeML', '.cache')

    if_directory_not_existing_then_make(cache_directory)

    cache_file = os.path.join(cache_directory, '%s_cache.yml' % heasarc_table_name)

    cache_file_sanatized = sanitize_filename(cache_file)

    # build and sanitize the votable XML file that will be saved

    file_name = os.path.join(cache_directory, '%s_votable.xml' % heasarc_table_name)

    file_name_sanatized = sanitize_filename(file_name)

    if not file_existing_and_readable(cache_file_sanatized):

        print("The cache for %s does not yet exist. We will try to build it\n" % heasarc_table_name)

        write_cache = True
        cache_exists = False


    else:

        with open(cache_file_sanatized) as cache:

            # the cache file is two lines. The first is a datetime string that
            # specifies the last time the XML file was obtained

            yaml_cache = yaml.safe_load(cache)

            cached_time = astro_time.Time(datetime.datetime(*map(int, yaml_cache['last save'].split('-'))))

            # the second line how many seconds to keep the file around

            cache_valid_for = float(yaml_cache['cache time'])

            # now we will compare it to the current time in UTC
            current_time = astro_time.Time(datetime.datetime.utcnow(), scale='utc')

            delta_time = current_time - cached_time

            if delta_time.sec >= cache_valid_for:

                # ok, this is an old file, we will update it

                write_cache = True
                cache_exists = True

            else:

                # we

                write_cache = False
                cache_exists = True

    if write_cache or update:

        print("Building cache for %s.\n" % heasarc_table_name)

        # go to HEASARC and get the requested table
        heasarc_url = 'http://heasarc.gsfc.nasa.gov/cgi-bin/W3Browse/getvotable.pl?name=%s' % heasarc_table_name

        try:

            urllib.urlretrieve(heasarc_url, filename=file_name_sanatized)

        except(IOError):

            warnings.warn('The cache is outdated but the internet cannot be reached. Please check your connection')

        else:

            # Make sure the lines are interpreted as Unicode (otherwise some characters will fail)
            with open(file_name_sanatized) as table_file:

                new_lines = map(lambda x: x.decode("utf-8", errors="ignore"), table_file.readlines())

            # now write the decoded lines back to the file
            with codecs.open(file_name_sanatized, "w+", "utf-8") as table_file:

                table_file.write("".join(new_lines))

            # save the time that we go this table

            with open(cache_file_sanatized, 'w') as cache:

                yaml_dict = {}

                current_time = astro_time.Time(datetime.datetime.utcnow(), scale='utc')

                yaml_dict['last save'] = current_time.datetime.strftime('%Y-%m-%d-%H-%M-%S')

                seconds_in_day = 86400.

                yaml_dict['cache time'] = seconds_in_day * cache_time_days

                yaml.dump(yaml_dict, stream=cache, default_flow_style=False)

    # use astropy routines to read the votable
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        vo_table = votable.parse(file_name_sanatized)

    table = vo_table.get_first_table().to_table(use_names_over_ids=True)

    # create a pandas table indexed by name

    pandas_df = table.to_pandas().set_index('name')

    del vo_table

    return pandas_df
    def download(
        self,
        remote_filename,
        destination_path: str,
        new_filename=None,
        progress=True,
        compress=False,
    ):

        assert (remote_filename in self.files
                ), "File %s is not contained in this directory (%s)" % (
                    remote_filename,
                    self._request_result.url,
                )

        destination_path: Path = sanitize_filename(destination_path,
                                                   abspath=True)

        assert path_exists_and_is_directory(destination_path), (
            f"Provided destination {destination_path} does not exist or "
            "is not a directory")

        # If no filename is specified, use the same name that the file has on the remote server

        if new_filename is None:
            new_filename: str = remote_filename.split("/")[-1]

        # Get the fully qualified path for the remote and the local file

        remote_path: str = self._request_result.url + remote_filename
        local_path: Path = destination_path / new_filename

        # Ask the server for the file, but do not download it just yet
        # (stream=True will get the HTTP header but nothing else)
        # Use stream=True for two reasons:
        # * so that the file is not downloaded all in memory before being written to the disk
        # * so that we can report progress is requested

        this_request = requests.get(remote_path, stream=True)

        # Figure out the size of the file

        file_size = int(this_request.headers["Content-Length"])

        log.debug(f"downloading {remote_filename} of size {file_size}")

        # Now check if we really need to download this file

        if compress:
            # Add a .gz at the end of the file path

            log.debug(
                f"file {remote_filename} will be downloaded and compressed")

            local_path: Path = Path(f"{local_path}.gz")

        if file_existing_and_readable(local_path):

            local_size = os.path.getsize(local_path)

            if local_size == file_size or compress:
                # if the compressed file already exists
                # it will have a smaller size

                # No need to download it again

                log.info(f"file {remote_filename} is already downloaded!")

                return local_path

        if local_path.is_file():

            first_byte = os.path.getsize(local_path)

        else:

            first_byte = 0

        # Chunk size shouldn't bee too small otherwise we are causing a bottleneck in the download speed
        chunk_size = 1024 * 10

        # If the user wants to compress the file, use gzip, otherwise the normal opener
        if compress:

            import gzip

            opener = gzip.open

        else:

            opener = open

        if threeML_config["interface"]["progress_bars"]:

            # Set a title for the progress bar
            bar_title = "Downloading %s" % new_filename

            total_size = int(this_request.headers.get('content-length', 0))

            bar = tqdm(
                initial=first_byte,
                unit_scale=True,
                unit_divisor=1024,
                unit="B",
                total=int(this_request.headers["Content-Length"]),
                desc=bar_title,
            )

            with opener(local_path, "wb") as f:

                for chunk in this_request.iter_content(chunk_size=chunk_size):

                    if chunk:  # filter out keep-alive new chunks

                        f.write(chunk)
                        bar.update(len(chunk))

            this_request.close()
            bar.close()

        else:

            with opener(local_path, "wb") as f:

                for chunk in this_request.iter_content(chunk_size=chunk_size):

                    if chunk:  # filter out keep-alive new chunks

                        f.write(chunk)

            this_request.close()

        return local_path
Пример #14
0
    def from_root_file(cls, map_tree_file, roi):
        """
        Create a MapTree object from a ROOT file and a ROI. Do not use this directly, use map_tree_factory instead.

        :param map_tree_file:
        :param roi:
        :return:
        """

        map_tree_file = sanitize_filename(map_tree_file)

        # Check that they exists and can be read

        if not file_existing_and_readable(map_tree_file):

            raise IOError("MapTree %s does not exist or is not readable" %
                          map_tree_file)

        # Make sure we have a proper ROI (or None)

        assert isinstance(roi, HealpixROIBase) or roi is None, "You have to provide an ROI choosing from the " \
                                                               "available ROIs in the region_of_interest module"

        if roi is None:

            custom_warnings.warn(
                "You have set roi=None, so you are reading the entire sky")

        # Read map tree

        with open_ROOT_file(map_tree_file) as f:

            data_bins_labels = list(
                root_numpy.tree2array(f.Get("BinInfo"), "name"))

            # A transit is defined as 1 day, and totalDuration is in hours
            # Get the number of transit from bin 0 (as LiFF does)

            n_transits = root_numpy.tree2array(f.Get("BinInfo"),
                                               "totalDuration") / 24.0

            n_bins = len(data_bins_labels)

            # These are going to be Healpix maps, one for each data analysis bin_name

            data_analysis_bins = []

            for i in range(n_bins):

                name = data_bins_labels[i]

                bin_label = "nHit0%s/%s" % (name, "data")

                bkg_label = "nHit0%s/%s" % (name, "bkg")

                # Get ordering scheme
                nside = f.Get(bin_label).GetUserInfo().FindObject(
                    "Nside").GetVal()
                nside_bkg = f.Get(bkg_label).GetUserInfo().FindObject(
                    "Nside").GetVal()

                assert nside == nside_bkg

                scheme = f.Get(bin_label).GetUserInfo().FindObject(
                    "Scheme").GetVal()
                scheme_bkg = f.Get(bkg_label).GetUserInfo().FindObject(
                    "Scheme").GetVal()

                assert scheme == scheme_bkg

                assert scheme == 0, "NESTED scheme is not supported yet"

                if roi is not None:

                    # Only read the elements in the ROI

                    active_pixels = roi.active_pixels(nside,
                                                      system='equatorial',
                                                      ordering='RING')

                    counts = cls._read_partial_tree(nside, f.Get(bin_label),
                                                    active_pixels)
                    bkg = cls._read_partial_tree(nside, f.Get(bkg_label),
                                                 active_pixels)

                    this_data_analysis_bin = DataAnalysisBin(
                        name,
                        SparseHealpix(counts, active_pixels, nside),
                        SparseHealpix(bkg, active_pixels, nside),
                        active_pixels_ids=active_pixels,
                        n_transits=n_transits[i],
                        scheme='RING')

                else:

                    # Read the entire sky.

                    counts = tree_to_ndarray(f.Get(bin_label),
                                             "count").astype(np.float64)
                    bkg = tree_to_ndarray(f.Get(bkg_label),
                                          "count").astype(np.float64)

                    this_data_analysis_bin = DataAnalysisBin(
                        name,
                        DenseHealpix(counts),
                        DenseHealpix(bkg),
                        active_pixels_ids=None,
                        n_transits=n_transits[i],
                        scheme='RING')

                data_analysis_bins.append(this_data_analysis_bin)

        return cls(data_bins_labels, data_analysis_bins, roi)
Пример #15
0
    def __init__(self, response_file_name):

        # Make sure file is readable

        response_file_name = sanitize_filename(response_file_name)

        # Check that they exists and can be read

        if not file_existing_and_readable(response_file_name):

            raise IOError("Response %s does not exist or is not readable" % response_file_name)

        self._response_file_name = response_file_name

        # Read response

        with open_ROOT_file(response_file_name) as f:

            # Get the name of the trees
            object_names = get_list_of_keys(f)

            # Make sure we have all the things we need

            assert 'LogLogSpectrum' in object_names
            assert 'DecBins' in object_names
            assert 'AnalysisBins' in object_names

            # Read spectrum used during the simulation
            self._log_log_spectrum = TF1Wrapper(f.Get("LogLogSpectrum"))

            # Get the analysis bins definition
            dec_bins = tree_to_ndarray(f.Get("DecBins"))

            dec_bins_lower_edge = dec_bins['lowerEdge']  # type: np.ndarray
            dec_bins_upper_edge = dec_bins['upperEdge']  # type: np.ndarray
            dec_bins_center = dec_bins['simdec']  # type: np.ndarray

            self._dec_bins = zip(dec_bins_lower_edge, dec_bins_center, dec_bins_upper_edge)

            # Read in the ids of the response bins ("analysis bins" in LiFF jargon)
            response_bins_ids = tree_to_ndarray(f.Get("AnalysisBins"), "id")  # type: np.ndarray

            # Now we create a list of ResponseBin instances for each dec bin_name
            self._response_bins = collections.OrderedDict()

            for dec_id in range(len(self._dec_bins)):

                this_response_bins = []

                min_dec, dec_center, max_dec = self._dec_bins[dec_id]

                for response_bin_id in response_bins_ids:

                    this_response_bin = ResponseBin(f, dec_id, response_bin_id, self._log_log_spectrum,
                                                    min_dec, dec_center, max_dec)

                    this_response_bins.append(this_response_bin)

                self._response_bins[self._dec_bins[dec_id][1]] = this_response_bins

        del f
Пример #16
0
    def __init__(self, name, time_series, response=None,
                 poly_order=-1, unbinned=True, verbose=True, restore_poly_fit=None, container_type=BinnedSpectrumWithDispersion):
        """
        Class for handling generic time series data including binned and event list
        series. Depending on the data, this class builds either a  SpectrumLike or
        DisperisonSpectrumLike plugin

        For specific instruments, use the TimeSeries.from() classmethods


        :param name: name for the plugin
        :param time_series: a TimeSeries instance
        :param response: options InstrumentResponse instance
        :param poly_order: the polynomial order to use for background fitting
        :param unbinned: if the background should be fit unbinned
        :param verbose: the verbosity switch
        :param restore_poly_fit: file from which to read a prefitted background
        """

        assert isinstance(time_series, TimeSeries), "must be a TimeSeries instance"

        assert issubclass(container_type,Histogram), 'must be a subclass of Histogram'


        self._name = name

        self._container_type = container_type

        self._time_series = time_series  # type: TimeSeries

        # make sure we have a proper response

        if response is not None:
            assert isinstance(response, InstrumentResponse) or isinstance(response,
                                                                          InstrumentResponseSet) or isinstance(response, str), 'Response must be an instance of InstrumentResponse'

        # deal with RSP weighting if need be

        if isinstance(response, InstrumentResponseSet):

            # we have a weighted response
            self._rsp_is_weighted = True
            self._weighted_rsp = response

            # just get a dummy response for the moment
            # it will be corrected when we set the interval

            self._response = InstrumentResponse.create_dummy_response(response.ebounds,
                                                                      response.monte_carlo_energies)

        else:

            self._rsp_is_weighted = False
            self._weighted_rsp = None

            self._response = response

        self._verbose = verbose
        self._active_interval = None
        self._observed_spectrum = None
        self._background_spectrum = None
        self._measured_background_spectrum = None

        self._time_series.poly_order = poly_order

        self._default_unbinned = unbinned

        # try and restore the poly fit if requested

        if restore_poly_fit is not None:

            if file_existing_and_readable(restore_poly_fit):
                self._time_series.restore_fit(restore_poly_fit)

                if verbose:
                    print('Successfully restored fit from %s'%restore_poly_fit)


            else:

                custom_warnings.warn(
                    "Could not find saved background %s." % restore_poly_fit)
Пример #17
0
def get_heasarc_table_as_pandas(heasarc_table_name, update=False, cache_time_days=1):
    """
    Obtain a a VO table from the HEASARC archives and return it as a pandas table indexed
    by object/trigger names. The heasarc_table_name values are the ones referenced at:

    https://heasarc.gsfc.nasa.gov/docs/archive/vo/

    In order to speed up the processing of the tables, 3ML can cache the XML table in a cache
    that is updated every cache_time_days. The cache can be forced to update, i.e, reload from
    the web, by setting update to True.


    :param heasarc_table_name: the name of a HEASARC browse table
    :param update: force web read of the table and update cache
    :param cache_time_days: number of days to hold the current cache
    :return: pandas DataFrame with results and astropy table
    """

    # make sure the table is a string

    assert type(heasarc_table_name) is str

    # point to the cache directory and create it if it is not existing

    cache_directory = os.path.join(os.path.expanduser("~"), ".threeML", ".cache")

    if_directory_not_existing_then_make(cache_directory)

    cache_file = os.path.join(cache_directory, "%s_cache.yml" % heasarc_table_name)

    cache_file_sanatized = sanitize_filename(cache_file)

    # build and sanitize the votable XML file that will be saved

    file_name = os.path.join(cache_directory, "%s_votable.xml" % heasarc_table_name)

    file_name_sanatized = sanitize_filename(file_name)

    if not file_existing_and_readable(cache_file_sanatized):

        print(
            "The cache for %s does not yet exist. We will try to build it\n"
            % heasarc_table_name
        )

        write_cache = True
        cache_exists = False

    else:

        with open(cache_file_sanatized) as cache:

            # the cache file is two lines. The first is a datetime string that
            # specifies the last time the XML file was obtained

            yaml_cache = yaml.load(cache, Loader=yaml.SafeLoader)

            cached_time = astro_time.Time(
                datetime.datetime(*list(map(int, yaml_cache["last save"].split("-"))))
            )

            # the second line how many seconds to keep the file around

            cache_valid_for = float(yaml_cache["cache time"])

            # now we will compare it to the current time in UTC
            current_time = astro_time.Time(datetime.datetime.utcnow(), scale="utc")

            delta_time = current_time - cached_time

            if delta_time.sec >= cache_valid_for:

                # ok, this is an old file, we will update it

                write_cache = True
                cache_exists = True

            else:

                # we

                write_cache = False
                cache_exists = True

    if write_cache or update:

        print("Building cache for %s.\n" % heasarc_table_name)

        # go to HEASARC and get the requested table
        heasarc_url = (
            "http://heasarc.gsfc.nasa.gov/cgi-bin/W3Browse/getvotable.pl?name=%s"
            % heasarc_table_name
        )

        try:

            urllib.request.urlretrieve(heasarc_url, filename=file_name_sanatized)

        except (IOError):

            warnings.warn(
                "The cache is outdated but the internet cannot be reached. Please check your connection"
            )

        else:

            # # Make sure the lines are interpreted as Unicode (otherwise some characters will fail)
            with open(file_name_sanatized) as table_file:

                # might have to add this in for back compt J MICHAEL

                # new_lines = [x. for x in table_file.readlines()]

                new_lines = table_file.readlines()

            # now write the decoded lines back to the file
            with codecs.open(file_name_sanatized, "w+", "utf-8") as table_file:

                table_file.write("".join(new_lines))

            #        save the time that we go this table

            with open(cache_file_sanatized, "w") as cache:

                yaml_dict = {}

                current_time = astro_time.Time(datetime.datetime.utcnow(), scale="utc")

                yaml_dict["last save"] = current_time.datetime.strftime(
                    "%Y-%m-%d-%H-%M-%S"
                )

                seconds_in_day = 86400.0

                yaml_dict["cache time"] = seconds_in_day * cache_time_days

                yaml.dump(yaml_dict, stream=cache, default_flow_style=False)

    # use astropy routines to read the votable
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        vo_table = votable.parse(file_name_sanatized)

    table = vo_table.get_first_table().to_table(use_names_over_ids=True)

    # make sure we do not use this as byte code
    table.convert_bytestring_to_unicode()

    # create a pandas table indexed by name

    pandas_df = table.to_pandas().set_index("name")

    del vo_table

    return pandas_df
Пример #18
0
    def __init__(self, rsp_file, arf_file=None):
        """

        :param rsp_file:
        :param arf_file:
        """

        # Now make sure that the response file exist

        rsp_file = sanitize_filename(rsp_file)

        assert file_existing_and_readable(rsp_file.split("{")[0]), "OGIPResponse file %s not existing or not " \
                                                                   "readable" % rsp_file

        # Check if we are dealing with a .rsp2 file (containing more than
        # one response). This is checked by looking for the syntax
        # [responseFile]{[responseNumber]}

        if '{' in rsp_file:

            tokens = rsp_file.split("{")
            rsp_file = tokens[0]
            rsp_number = int(tokens[-1].split('}')[0].replace(" ", ""))

        else:

            rsp_number = 1

        self._rsp_file = rsp_file

        # Read the response
        with pyfits.open(rsp_file) as f:

            try:

                # This is usually when the response file contains only the energy dispersion

                data = f['MATRIX', rsp_number].data
                header = f['MATRIX', rsp_number].header

                if arf_file is None:
                    warnings.warn("The response is in an extension called MATRIX, which usually means you also "
                                  "need an ancillary file (ARF) which you didn't provide. You should refer to the "
                                  "documentation  of the instrument and make sure you don't need an ARF.")

            except Exception as e:
                warnings.warn("The default choice for MATRIX extension failed:"+repr(e)+\
                              "available: "+" ".join([repr(e.header.get('EXTNAME')) for e in f]))

                # Other detectors might use the SPECRESP MATRIX name instead, usually when the response has been
                # already convoluted with the effective area

                # Note that here we are not catching any exception, because
                # we have to fail if we cannot read the matrix

                data = f['SPECRESP MATRIX', rsp_number].data
                header = f['SPECRESP MATRIX', rsp_number].header

            # These 3 operations must be executed when the file is still open

            matrix = self._read_matrix(data, header)

            ebounds = self._read_ebounds(f['EBOUNDS'])

            mc_channels = self._read_mc_channels(data)

        # Now, if there is information on the coverage interval, let's use it

        header_start = header.get("TSTART", None)
        header_stop = header.get("TSTOP", None)

        if header_start is not None and header_stop is not None:

            super(OGIPResponse, self).__init__(matrix=matrix,
                                               ebounds=ebounds,
                                               monte_carlo_energies=mc_channels,
                                               coverage_interval=TimeInterval(header_start, header_stop))

        else:

            super(OGIPResponse, self).__init__(matrix=matrix,
                                               ebounds=ebounds,
                                               monte_carlo_energies=mc_channels)

        # Read the ARF if there is any
        # NOTE: this has to happen *after* calling the parent constructor

        if arf_file is not None and arf_file.lower() != 'none':

            self._read_arf_file(arf_file)

        else:

            self._arf_file = None
    def check_if_fit_exists(self, destination='.', save_info=''):

        file_name = "%s/%s_%s_%s.fits" % (destination, self._band, self._snid,
                                          save_info)

        return file_existing_and_readable(file_name)
Пример #20
0
    def __init__(self, name, maptree, response, n_transits=None, fullsky=False):

        # This controls if the likeHAWC class should load the entire
        # map or just a small disc around a source (faster).
        # Default is the latter, which is way faster. LIFF will decide
        # autonomously which ROI to use depending on the source model

        self._fullsky = bool(fullsky)

        # Sanitize files in input (expand variables and so on)

        self._maptree = os.path.abspath(sanitize_filename(maptree))

        self._response = os.path.abspath(sanitize_filename(response))

        # Check that they exists and can be read

        if not file_existing_and_readable(self._maptree):
            raise IOError("MapTree %s does not exist or is not readable" % maptree)

        if not file_existing_and_readable(self._response):
            raise IOError("Response %s does not exist or is not readable" % response)

        # Post-pone the creation of the LIFF instance to when
        # we have the likelihood model

        self._instanced = False

        # Number of transits
        if n_transits is not None:

            self._n_transits = float(n_transits)

        else:

            self._n_transits = None

        # Default list of bins

        self._bin_list = self._min_and_max_to_list(defaultMinChannel, defaultMaxChannel)

        # By default the fit of the CommonNorm is deactivated
        # NOTE: this flag sets the internal common norm minimization of LiFF, not
        # the common norm as nuisance parameter (which is controlled by activate_CommonNorm() and
        # deactivate_CommonNorm()
        self._fit_commonNorm = False

        # This is to keep track of whether the user defined a ROI or not

        self._roi_ra = None
        self._roi_fits = None
        self._roi_galactic = False

        # Create the dictionary of nuisance parameters

        self._nuisance_parameters = collections.OrderedDict()

        param_name = "%s_ComNorm" % name

        self._nuisance_parameters[param_name] = Parameter(
            param_name, 1.0, min_value=0.5, max_value=1.5, delta=0.01
        )
        self._nuisance_parameters[param_name].fix = True

        super(HAWCLike, self).__init__(name, self._nuisance_parameters)
Пример #21
0
def from_root_file(map_tree_file, roi):
    """
    Create a MapTree object from a ROOT file and a ROI. Do not use this directly, use map_tree_factory instead.

    :param map_tree_file:
    :param roi:
    :return:
    """

    from ..root_handler import open_ROOT_file, root_numpy, tree_to_ndarray

    map_tree_file = sanitize_filename(map_tree_file)

    # Check that they exists and can be read

    if not file_existing_and_readable(map_tree_file):  # pragma: no cover
        raise IOError("MapTree %s does not exist or is not readable" % map_tree_file)

    # Make sure we have a proper ROI (or None)

    assert isinstance(roi, HealpixROIBase) or roi is None, "You have to provide an ROI choosing from the " \
                                                           "available ROIs in the region_of_interest module"

    if roi is None:
        custom_warnings.warn("You have set roi=None, so you are reading the entire sky")

    # Read map tree

    with open_ROOT_file(map_tree_file) as f:

        data_bins_labels = list(root_numpy.tree2array(f.Get("BinInfo"), "name"))

        # A transit is defined as 1 day, and totalDuration is in hours
        # Get the number of transit from bin 0 (as LiFF does)

        n_transits = root_numpy.tree2array(f.Get("BinInfo"), "totalDuration") / 24.0

        # The map-maker underestimate the livetime of bins with low statistic by removing time intervals with
        # zero events. Therefore, the best estimate of the livetime is the maximum of n_transits, which normally
        # happen in the bins with high statistic
        n_transits = max(n_transits)

        n_bins = len(data_bins_labels)

        # These are going to be Healpix maps, one for each data analysis bin_name

        data_analysis_bins = collections.OrderedDict()

        for i in range(n_bins):

            name = data_bins_labels[i]

            data_tobject = _get_bin_object(f, name, "data")

            bkg_tobject = _get_bin_object(f, name, "bkg")

            # Get ordering scheme
            nside = data_tobject.GetUserInfo().FindObject("Nside").GetVal()
            nside_bkg = bkg_tobject.GetUserInfo().FindObject("Nside").GetVal()

            assert nside == nside_bkg

            scheme = data_tobject.GetUserInfo().FindObject("Scheme").GetVal()
            scheme_bkg = bkg_tobject.GetUserInfo().FindObject("Scheme").GetVal()

            assert scheme == scheme_bkg

            assert scheme == 0, "NESTED scheme is not supported yet"

            if roi is not None:

                # Only read the elements in the ROI

                active_pixels = roi.active_pixels(nside, system='equatorial', ordering='RING')

                counts = _read_partial_tree(data_tobject, active_pixels)
                bkg = _read_partial_tree(bkg_tobject, active_pixels)

                counts_hpx = SparseHealpix(counts, active_pixels, nside)
                bkg_hpx = SparseHealpix(bkg, active_pixels, nside)

                this_data_analysis_bin = DataAnalysisBin(name,
                                                         counts_hpx,
                                                         bkg_hpx,
                                                         active_pixels_ids=active_pixels,
                                                         n_transits=n_transits,
                                                         scheme='RING')

            else:

                # Read the entire sky.

                counts = tree_to_ndarray(data_tobject, "count").astype(np.float64)
                bkg = tree_to_ndarray(bkg_tobject, "count").astype(np.float64)

                this_data_analysis_bin = DataAnalysisBin(name,
                                                         DenseHealpix(counts),
                                                         DenseHealpix(bkg),
                                                         active_pixels_ids=None,
                                                         n_transits=n_transits,
                                                         scheme='RING')

            data_analysis_bins[name] = this_data_analysis_bin

    return data_analysis_bins
Пример #22
0
    def __init__(self, name, phafile, bkgfile, rspfile, arffile=None):

        self.name = name

        # Check that all file exists
        notExistant = []

        inputFiles = [phafile, bkgfile, rspfile]

        for i in range(3):

            # The file could contain a {#} specification, like spectrum.pha{3},
            # which indicate the 3rd spectrum in the spectrum.pha file

            inputFiles[i] = file_utils.sanitize_filename(inputFiles[i].split("{")[0])

            if not file_utils.file_existing_and_readable(inputFiles[i]):
                raise IOError("File %s does not exist or is not readable" % (inputFiles[i]))

        phafile, bkgfile, rspfile = inputFiles

        # Check the arf, if provided
        if arffile is not None:

            arffile = file_utils.sanitize_filename(arffile.split("{")[0])

            if not file_utils.file_existing_and_readable(arffile):
                raise IOError("File %s does not exist or is not readable" % (arffile))

        self.phafile = OGIPPHA(phafile, filetype="observed")
        self.exposure = self.phafile.getExposure()
        self.bkgfile = OGIPPHA(bkgfile, filetype="background")
        self.response = Response(rspfile, arffile)

        # Start with an empty mask (the user will overwrite it using the
        # setActiveMeasurement method)
        self.mask = numpy.asarray(numpy.ones(self.phafile.getRates().shape), numpy.bool)

        # Get the counts for this spectrum
        self.counts = self.phafile.getRates()[self.mask] * self.exposure

        # Check that counts is positive
        idx = self.counts < 0

        if numpy.sum(idx) > 0:
            warnings.warn(
                "The observed spectrum for %s " % self.name + "has negative channels! Fixing those to zero.",
                RuntimeWarning,
            )
            self.counts[idx] = 0

        pass

        # Get the background counts for this spectrum
        self.bkgCounts = self.bkgfile.getRates()[self.mask] * self.exposure

        # Check that bkgCounts is positive
        idx = self.bkgCounts < 0

        if numpy.sum(idx) > 0:
            warnings.warn(
                "The background spectrum for %s " % self.name + "has negative channels! Fixing those to zero.",
                RuntimeWarning,
            )
            self.bkgCounts[idx] = 0

        # Check that the observed counts are positive

        idx = self.counts < 0

        if numpy.sum(idx) > 0:
            raise RuntimeError("Negative counts in observed spectrum %s. Data are corrupted." % (phafile))

        # Keep a copy which will never be modified
        self.counts_backup = numpy.array(self.counts, copy=True)
        self.bkgCounts_backup = numpy.array(self.bkgCounts, copy=True)

        # Effective area correction is disabled by default, i.e.,
        # the nuisance parameter is fixed to 1
        self.nuisanceParameters = {}
        self.nuisanceParameters["InterCalib"] = Parameter("InterCalib", 1, min_value=0.9, max_value=1.1, delta=0.01)
        self.nuisanceParameters["InterCalib"].fix = True
Пример #23
0
    def __init__(self, name, maptree, response, n_transits=None, fullsky=False):

        # This controls if the likeHAWC class should load the entire
        # map or just a small disc around a source (faster).
        # Default is the latter, which is way faster. LIFF will decide
        # autonomously which ROI to use depending on the source model

        self._fullsky = bool(fullsky)

        # Sanitize files in input (expand variables and so on)

        self._maptree = os.path.abspath(sanitize_filename(maptree))

        self._response = os.path.abspath(sanitize_filename(response))

        # Check that they exists and can be read

        if not file_existing_and_readable(self._maptree):
            raise IOError("MapTree %s does not exist or is not readable" % maptree)

        if not file_existing_and_readable(self._response):
            raise IOError("Response %s does not exist or is not readable" % response)

        # Post-pone the creation of the LIFF instance to when
        # we have the likelihood model

        self._instanced = False

        # Number of transits
        if n_transits is not None:

            self._n_transits = float(n_transits)

        else:

            self._n_transits = None

        # Default list of bins
        
        self._bin_list = self._min_and_max_to_list(defaultMinChannel,
                                                   defaultMaxChannel)

        # By default the fit of the CommonNorm is deactivated
        # NOTE: this flag sets the internal common norm minimization of LiFF, not
        # the common norm as nuisance parameter (which is controlled by activate_CommonNorm() and
        # deactivate_CommonNorm()
        self._fit_commonNorm = False

        # This is to keep track of whether the user defined a ROI or not

        self._roi_ra = None
        self._roi_fits = None
        self._roi_galactic = False

        # Create the dictionary of nuisance parameters

        self._nuisance_parameters = collections.OrderedDict()

        param_name = "%s_ComNorm" % name

        self._nuisance_parameters[param_name] = Parameter(param_name, 1.0, min_value=0.5, max_value=1.5, delta=0.01)
        self._nuisance_parameters[param_name].fix = True

        super(HAWCLike, self).__init__(name, self._nuisance_parameters)
Пример #24
0
def add_svo_filter_to_speclite(observatory, instrument, ffilter, update=False):
    """
    download an SVO filter file and then add it to the user library
    :param observatory:
    :param instrument:
    :param ffilter:
    :return:
    """

    # make a directory for this observatory and instrument

    filter_path = os.path.join(
        get_speclite_filter_path(), to_valid_python_name(observatory)
    )

    if_directory_not_existing_then_make(filter_path)

    # grab the filter file from SVO

    # reconvert 2MASS so we can grab it

    if observatory == "TwoMASS":
        observatory = "2MASS"

    if (
        not file_existing_and_readable(
            os.path.join(
                filter_path,
                "%s-%s.ecsv"
                % (to_valid_python_name(instrument), to_valid_python_name(ffilter)),
            )
        )
        or update
    ):

        url_response = urllib.request.urlopen(
            "http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?PhotCalID=%s/%s.%s/AB"
            % (observatory, instrument, ffilter)
        )
        # now parse it
        data = votable.parse_single_table(url_response).to_table()

        # save the waveunit

        waveunit = data["Wavelength"].unit

        # the filter files are masked arrays, which do not go to zero on
        # the boundaries. This confuses speclite and will throw an error.
        # so we add a zero on the boundaries

        if data["Transmission"][0] != 0.0:

            w1 = data["Wavelength"][0] * 0.9
            data.insert_row(0, [w1, 0])

        if data["Transmission"][-1] != 0.0:

            w2 = data["Wavelength"][-1] * 1.1
            data.add_row([w2, 0])

        # filter any negative values

        idx = data["Transmission"] < 0
        data["Transmission"][idx] = 0

        # build the transmission. # we will force all the wavelengths
        # to Angstroms because sometimes AA is misunderstood

        try:

            transmission = spec_filter.FilterResponse(
                wavelength=data["Wavelength"] * waveunit.to("Angstrom") * u.Angstrom,
                response=data["Transmission"],
                meta=dict(
                    group_name=to_valid_python_name(instrument),
                    band_name=to_valid_python_name(ffilter),
                ),
            )

            # save the filter

            transmission.save(filter_path)

            success = True

        except (ValueError):

            success = False

            print(
                "%s:%s:%s has an invalid wave table, SKIPPING"
                % (observatory, instrument, ffilter)
            )

        return success

    else:

        return True
Пример #25
0
    def __init__(self, rsp_file, arf_file=None):
        """

        :param rsp_file:
        :param arf_file:
        """

        # Now make sure that the response file exist

        rsp_file = sanitize_filename(rsp_file)

        assert file_existing_and_readable(rsp_file.split("{")[0]), "OGIPResponse file %s not existing or not " \
                                                                   "readable" % rsp_file

        # Check if we are dealing with a .rsp2 file (containing more than
        # one response). This is checked by looking for the syntax
        # [responseFile]{[responseNumber]}

        if '{' in rsp_file:

            tokens = rsp_file.split("{")
            rsp_file = tokens[0]
            rsp_number = int(tokens[-1].split('}')[0].replace(" ", ""))

        else:

            rsp_number = 1

        self._rsp_file = rsp_file

        # Read the response
        with pyfits.open(rsp_file) as f:

            try:

                # This is usually when the response file contains only the energy dispersion

                data = f['MATRIX', rsp_number].data
                header = f['MATRIX', rsp_number].header

                if arf_file is None:
                    warnings.warn(
                        "The response is in an extension called MATRIX, which usually means you also "
                        "need an ancillary file (ARF) which you didn't provide. You should refer to the "
                        "documentation  of the instrument and make sure you don't need an ARF."
                    )

            except Exception as e:
                warnings.warn("The default choice for MATRIX extension failed:"+repr(e)+\
                              "available: "+" ".join([repr(e.header.get('EXTNAME')) for e in f]))

                # Other detectors might use the SPECRESP MATRIX name instead, usually when the response has been
                # already convoluted with the effective area

                # Note that here we are not catching any exception, because
                # we have to fail if we cannot read the matrix

                data = f['SPECRESP MATRIX', rsp_number].data
                header = f['SPECRESP MATRIX', rsp_number].header

            # These 3 operations must be executed when the file is still open

            matrix = self._read_matrix(data, header)

            ebounds = self._read_ebounds(f['EBOUNDS'])

            mc_channels = self._read_mc_channels(data)

        # Now, if there is information on the coverage interval, let's use it

        header_start = header.get("TSTART", None)
        header_stop = header.get("TSTOP", None)

        if header_start is not None and header_stop is not None:

            super(OGIPResponse,
                  self).__init__(matrix=matrix,
                                 ebounds=ebounds,
                                 monte_carlo_energies=mc_channels,
                                 coverage_interval=TimeInterval(
                                     header_start, header_stop))

        else:

            super(OGIPResponse,
                  self).__init__(matrix=matrix,
                                 ebounds=ebounds,
                                 monte_carlo_energies=mc_channels)

        # Read the ARF if there is any
        # NOTE: this has to happen *after* calling the parent constructor

        if arf_file is not None and arf_file.lower() != 'none':

            self._read_arf_file(arf_file)

        else:

            self._arf_file = None
Пример #26
0
    def from_root_file(cls, response_file_name):
        """
        Build response from a ROOT file. Do not use directly, use the hawc_response_factory function instead.

        :param response_file_name:
        :return: a HAWCResponse instance
        """

        from ..root_handler import open_ROOT_file, get_list_of_keys, tree_to_ndarray

        # Make sure file is readable

        response_file_name = sanitize_filename(response_file_name)

        # Check that they exists and can be read

        if not file_existing_and_readable(
                response_file_name):  # pragma: no cover

            raise IOError("Response %s does not exist or is not readable" %
                          response_file_name)

        # Read response

        with open_ROOT_file(response_file_name) as root_file:

            # Get the name of the trees
            object_names = get_list_of_keys(root_file)

            # Make sure we have all the things we need

            assert 'LogLogSpectrum' in object_names
            assert 'DecBins' in object_names
            assert 'AnalysisBins' in object_names

            # Read spectrum used during the simulation
            log_log_spectrum = root_file.Get("LogLogSpectrum")

            # Get the analysis bins definition
            dec_bins_ = tree_to_ndarray(root_file.Get("DecBins"))

            dec_bins_lower_edge = dec_bins_['lowerEdge']  # type: np.ndarray
            dec_bins_upper_edge = dec_bins_['upperEdge']  # type: np.ndarray
            dec_bins_center = dec_bins_['simdec']  # type: np.ndarray

            dec_bins = zip(dec_bins_lower_edge, dec_bins_center,
                           dec_bins_upper_edge)

            # Read in the ids of the response bins ("analysis bins" in LiFF jargon)
            try:

                response_bins_ids = tree_to_ndarray(
                    root_file.Get("AnalysisBins"), "name")  # type: np.ndarray

            except ValueError:

                try:

                    response_bins_ids = tree_to_ndarray(
                        root_file.Get("AnalysisBins"),
                        "id")  # type: np.ndarray

                except ValueError:

                    # Some old response files (or energy responses) have no "name" branch
                    custom_warnings.warn(
                        "Response %s has no AnalysisBins 'id' or 'name' branch. "
                        "Will try with default names" % response_file_name)

                    response_bins_ids = None

            response_bins_ids = response_bins_ids.astype(str)

            # Now we create a dictionary of ResponseBin instances for each dec bin_name
            response_bins = collections.OrderedDict()

            for dec_id in range(len(dec_bins)):

                this_response_bins = collections.OrderedDict()

                min_dec, dec_center, max_dec = dec_bins[dec_id]

                # If we couldn't get the reponse_bins_ids above, let's use the default names
                if response_bins_ids is None:

                    # Default are just integers. let's read how many nHit bins are from the first dec bin
                    dec_id_label = "dec_%02i" % dec_id

                    n_energy_bins = root_file.Get(dec_id_label).GetNkeys()

                    response_bins_ids = range(n_energy_bins)

                for response_bin_id in response_bins_ids:
                    this_response_bin = ResponseBin.from_ttree(
                        root_file, dec_id, response_bin_id, log_log_spectrum,
                        min_dec, dec_center, max_dec)

                    this_response_bins[response_bin_id] = this_response_bin

                response_bins[dec_bins[dec_id][1]] = this_response_bins

        # Now the file is closed. Let's explicitly remove f so we are sure it is freed
        del root_file

        # Instance the class and return it
        instance = cls(response_file_name, dec_bins, response_bins)

        return instance
Пример #27
0
    def from_root_file(cls, response_file_name):

        # Make sure file is readable

        response_file_name = sanitize_filename(response_file_name)

        # Check that they exists and can be read

        if not file_existing_and_readable(
                response_file_name):  # pragma: no cover
            raise IOError("Response %s does not exist or is not readable" %
                          response_file_name)

        # Read response

        with open_ROOT_file(response_file_name) as f:

            # Get the name of the trees
            object_names = get_list_of_keys(f)

            # Make sure we have all the things we need

            assert 'LogLogSpectrum' in object_names
            assert 'DecBins' in object_names
            assert 'AnalysisBins' in object_names

            # Read spectrum used during the simulation
            log_log_spectrum = f.Get("LogLogSpectrum")

            # Get the analysis bins definition
            dec_bins_ = tree_to_ndarray(f.Get("DecBins"))

            dec_bins_lower_edge = dec_bins_['lowerEdge']  # type: np.ndarray
            dec_bins_upper_edge = dec_bins_['upperEdge']  # type: np.ndarray
            dec_bins_center = dec_bins_['simdec']  # type: np.ndarray

            dec_bins = zip(dec_bins_lower_edge, dec_bins_center,
                           dec_bins_upper_edge)

            # Read in the ids of the response bins ("analysis bins" in LiFF jargon)
            response_bins_ids = tree_to_ndarray(f.Get("AnalysisBins"),
                                                "id")  # type: np.ndarray

            # Now we create a list of ResponseBin instances for each dec bin_name
            response_bins = {}

            for dec_id in range(len(dec_bins)):

                this_response_bins = []

                min_dec, dec_center, max_dec = dec_bins[dec_id]

                for response_bin_id in response_bins_ids:

                    this_response_bin = ResponseBin.from_ttree(
                        f, dec_id, response_bin_id, log_log_spectrum, min_dec,
                        dec_center, max_dec)

                    this_response_bins.append(this_response_bin)

                response_bins[dec_bins[dec_id][1]] = this_response_bins

        # Now the file is closed. Let's explicitly remove f so we are sure it is freed
        del f

        # Instance the class and return it
        instance = cls(response_file_name, dec_bins, response_bins)

        return instance