def radius_sky_portion(CCD_structure):
    '''
    This function uses the information on the CCD to estimate the radius of the portion of the sky visualized

    Parameters
    ----------

    CCD_structure : array (float elements)
        This object contains the information about the measure of the CCD
        [0] : the length of the CCD on the x axis in mm
        [1] : the length of the CCD on the y axis in mm
        [2] : the length of a single pixel in mm

    Output
    ------
     radius : astropy.units.quantity.Quantity
         it is the radius in an astropy undestandable format
    '''
    log.debug(hist())
    
    pixel_on_x = CCD_structure[0]/CCD_structure[2]
    pixel_on_y = CCD_structure[1]/CCD_structure[2]
    diagonal_diameter = np.sqrt((pixel_on_x)**2+(pixel_on_y)**2)
    radius_on_pixel = diagonal_diameter/2
    radius = radius_on_pixel*CCD_structure[3]/60
    radius = (radius/60)*u.deg
    return radius
def VEGA_to_AB(magnitudo, photo_filter):
    '''
    This function takes the magnitudo of a star in the VEGA system and converts it in the AB system.
    It download the conversion table calling data_loader

    Parameters
    ----------

    magnitudo : array (float elements)
        An element of magnitudo is the magnitudo of a star in a specific range (photo_filter)

    photo_filter : list (string elements)
        It is a list where a element is a string that identifies a filter, e.g. "U","R","I"

    Output
    ------

    magnitudo : array
        The array contains the elements converted        
    '''

    log.debug(hist(photo_filter))

    sub = DATA["Convertion_table"]
    convertion_table = [sub[k] for k in photo_filter if k in sub]
    for i in range(len(magnitudo)-1):
        if magnitudo[i]== 0:
            magnitudo[i]= 40
        else:
            magnitudo[i] = magnitudo[i] - convertion_table[i]
        magnitudo[i] = magnitudo[i] - convertion_table[i]
    return magnitudo
def photons_to_electrons(photons, photo_filter):
    '''
    This function takes the number of photons/sec and converts them in number of electron/sec on the CCD.
    It calls data_loader to download the quantum efficiency of the CCD

    Parameters
    ----------

    photons : array
        The elements of the array are the number of photons for a specific range

    photo_filter : list (string elements)
        It is a list where a element is a string that identifies a filter, e.g. "U","R","I"

    Output

    electron : array
        The elements of the array are the number of electrons for a specific range 
    '''
    log.debug(hist())

    sub = DATA["Quantum_efficiency"]
    quantum_efficiency = [sub[k] for k in photo_filter if k in sub]
    
    electrons = photons
    for i in range(len(photons)-1):
        electrons[i] = photons[i]*quantum_efficiency[i]

    return electrons
def magnitudo_to_photons(magnitudo, photo_filter):
    '''
    This function convertes the magnitudo of a star in number of photons/second.
    It download informations calling data_loader

    Parameters
    ----------

    magnitudo : array (float elements)
        An element of magnitudo is the magnitudo of a star in a specific range (photo_filter)

    photo_filter : list (string elements)
        It is a list where a element is a string that identifies a filter, e.g. "U","R","I"

    Output
    ------

    photons : array
        The elements of the array are the number of photons for a specific range        
    '''
    log.debug(hist())
    
    sub = DATA["Central_wavelenght"]
    central_wavelenght = [sub[k] for k in photo_filter if k in sub]
    
    number = magnitudo   
    for i in range(len(magnitudo)-1):
        if magnitudo[i] == 0:
            number[i] = 0
        else:
            exp = 6.74-0.4*magnitudo[i]
            number[i] = (10**exp)#/(central_wavelenght[i])
    return number
def atmospheric_attenuation(magnitudo, photo_filter, AirMass):
    '''
    This function takes the magnitudo of a star and gives back the same magnitudo attenuated by the atmosphere.
    It download the extinction coefficients calling data_loader

    Parameters
    ----------

    magnitudo : array (float elements)
        An element of magnitudo is the magnitudo of a star in a specific range (photo_filter)

    photo_filter : list (string elements)
        It is a list where a element is a string that identifies a filter, e.g. "U","R","I"

    AirMass : float
        The AirMass provide the thickness of the atmosphere crossed by the light 

    Output
    ------

    magnitudo : array
        The array contains the elements attenuated        
    '''
    log.debug(hist())

    sub = DATA["Extinction_coefficient"]
    extinction_coefficient = [sub[k] for k in photo_filter if k in sub]
    
    for i in range(len(magnitudo)-1):
        #if magnitudo[i] == 0:
         #   magnitudo[i] = 0
        #else:
         #   magnitudo[i] = magnitudo[i]-AirMass*extinction_coefficient[i]
        magnitudo[i] = magnitudo[i]+AirMass*extinction_coefficient[i]
    return magnitudo
Esempio n. 6
0
    def _parse_result(self, response, verbose=False):
        """
        Parses the results form the HTTP response to `~astropy.table.Table`.

        Parameters
        ----------
        response : `requests.Response`
            The HTTP response object
        verbose : bool, optional
            Defaults to `False`. When true it will display warnings whenever
            the VOtable returned from the Service doesn't conform to the
            standard.

        Returns
        -------
        table : `~astropy.table.Table`
        """
        if not verbose:
            commons.suppress_vo_warnings()

        content = response.text
        log.debug(content)

        # Check if results were returned
        if 'The catalog is not in the list' in content:
            raise Exception("Catalogue not found")

        # Check that object name was not malformed
        if 'Either wrong or missing coordinate/object name' in content:
            raise Exception("Malformed coordinate/object name")

        # Check that the results are not of length zero
        if len(content) == 0:
            raise Exception("The LCOGT server sent back an empty reply")

        # Read it in using the astropy VO table reader
        try:
            first_table = votable.parse(six.BytesIO(response.content),
                                        pedantic=False).get_first_table()
        except Exception as ex:
            self.response = response
            self.table_parse_error = ex
            raise TableParseError("Failed to parse LCOGT votable! The raw "
                                  " response can be found in self.response,"
                                  " and the error in self.table_parse_error.")

        # Convert to astropy.table.Table instance
        table = first_table.to_table()

        # Check if table is empty
        if len(table) == 0:
            warnings.warn(
                "Query returned no results, so the table will "
                "be empty", NoResultsWarning)

        return table
Esempio n. 7
0
 def read_array_field(self, fieldlist):
     # Turn an iraf record array field into a numpy array
     fieldline = [l.split() for l in fieldlist[1:]]
     # take only the first 3 columns
     # identify writes also strings at the end of some field lines
     xyz = [l[:3] for l in fieldline]
     try:
         farr = np.array(xyz)
     except Exception:
         log.debug("Could not read array field {}".format(fieldlist[0].split()[0]))
     return farr.astype(np.float64)
def Data_structure():
    '''
    This function simply creates the data structure used in query
    '''
    log.debug(hist())
    
    Coord_x = []
    Coord_y = []
    Flux_tot = []
    data = [Coord_x, Coord_y, Flux_tot]
    return data
Esempio n. 9
0
 def from_cache(self, cache_location):
     request_file = self.request_file(cache_location)
     try:
         with open(request_file, "rb") as f:
             response = pickle.load(f)
         if not isinstance(response, requests.Response):
             response = None
     except IOError:  # TODO: change to FileNotFoundError once drop py2 support
         response = None
     if response:
         log.debug("Retrieving data from {0}".format(request_file))
     return response
Esempio n. 10
0
 def from_cache(self, cache_location):
     request_file = self.request_file(cache_location)
     try:
         with open(request_file, "rb") as f:
             response = pickle.load(f)
         if not isinstance(response, requests.Response):
             response = None
     except IOError:  # TODO: change to FileNotFoundError once drop py2 support
         response = None
     if response:
         log.debug("Retrieving data from {0}".format(request_file))
     return response
Esempio n. 11
0
def magnitudo_to_electrons(magnitudo, photo_filters, AirMass, exposure_time, Controll):
    '''
    This functions calls VEGA_to_AB, atmospheric_attenuation, magnitudo_to_photons, photons_to_electrons
    to obtain the total electrons generated in a CCD by the light of a star
    
    Parameters
    ----------

    magnitudo : array (float elements)
        An element of magnitudo is the magnitudo of a star in a specific range (photo_filter)

    photo_filter : list (string elements)
        It is a list where a element is a string that identifies a filter, e.g. "U","R","I"

    AirMass : float
        The AirMass provide the thickness of the atmosphere crossed by the light

    exposure_time : int
        It is the exposure time used to obtain the image, it is in seconds

    Output
    ------

    tot : float
        It is the total number of electrons generated by the star on the CCD
    '''
    log.debug(hist())
    magnitudo = VEGA_to_AB(magnitudo, photo_filters)
    magnitudo = atmospheric_attenuation(magnitudo, photo_filters, 1)
    photons = magnitudo_to_photons(magnitudo, photo_filters)
    electrons = photons_to_electrons(photons, photo_filters)
    
    if Controll:
        for n, i in enumerate(photo_filters):
            if i == "V":
                if electrons[n] <= 3e-4:
                    electrons[n] = electrons[-1]
        tot = (sum(electrons)-electrons[-1])*exposure_time
    else:
        tot = sum(electrons)*exposure_time
    return tot
Esempio n. 12
0
    def find_radius_cumul(self, fraction):
        """
        Find for each model the radius containing a fraction of the flux.

        Parameters
        ----------
        fraction: float
            The fraction to use when determining the radius
        """

        log.debug("Calculating radii containing %g%s of the flux" %
                  (fraction * 100., '%'))

        radius = np.zeros(self.n_models, dtype=self.flux.dtype) * u.au

        if self.apertures is None:

            return radius

        else:

            required = fraction * self.flux[:, -1]

            # Linear interpolation - need to loop over apertures for vectorization
            for ia in range(len(self.apertures) - 1):
                calc = (required >= self.flux[:, ia]) & (required <
                                                         self.flux[:, ia + 1])
                radius[calc] = (required[calc] - self.flux[calc, ia]) / \
                               (self.flux[calc, ia + 1] - self.flux[calc, ia]) * \
                               (self.apertures[ia + 1] - self.apertures[ia]) + \
                    self.apertures[ia]

            calc = (required < self.flux[:, 0])
            radius[calc] = self.apertures[0]

            calc = (required >= self.flux[:, -1])
            radius[calc] = self.apertures[-1]

            return radius
Esempio n. 13
0
    def find_radius_cumul(self, fraction):
        """
        Find for each model the radius containing a fraction of the flux.

        Parameters
        ----------
        fraction: float
            The fraction to use when determining the radius
        """

        log.debug("Calculating radii containing %g%s of the flux" % (fraction * 100., '%'))

        radius = np.zeros(self.n_models, dtype=self.flux.dtype) * u.au

        if self.apertures is None:

            return radius

        else:

            required = fraction * self.flux[:, -1]

            # Linear interpolation - need to loop over apertures for vectorization
            for ia in range(len(self.apertures) - 1):
                calc = (required >= self.flux[:, ia]) & (required < self.flux[:, ia + 1])
                radius[calc] = (required[calc] - self.flux[calc, ia]) / \
                               (self.flux[calc, ia + 1] - self.flux[calc, ia]) * \
                               (self.apertures[ia + 1] - self.apertures[ia]) + \
                    self.apertures[ia]

            calc = (required < self.flux[:, 0])
            radius[calc] = self.apertures[0]

            calc = (required >= self.flux[:, -1])
            radius[calc] = self.apertures[-1]

            return radius
Esempio n. 14
0
    def find_radius_sigma(self, fraction):
        """
        Find for each model a fractional surface brightness radius

        This is the outermost radius where the surface brightness is larger
        than a fraction of the peak surface brightness.

        Parameters
        ----------
        fraction: float
            The fraction to use when determining the radius
        """

        log.debug("Calculating %g%s peak surface brightness radii" %
                  (fraction * 100., '%'))

        sigma = np.zeros(self.flux.shape, dtype=self.flux.dtype)
        sigma[:, 0] = self.flux[:, 0] / self.apertures[0]**2
        sigma[:, 1:] = (self.flux[:, 1:] - self.flux[:, :-1]) / \
                       (self.apertures[1:] ** 2 - self.apertures[:-1] ** 2)

        maximum = np.max(sigma, axis=1)

        radius = np.zeros(self.n_models, dtype=self.flux.dtype) * u.au

        # Linear interpolation - need to loop over apertures backwards for vectorization
        for ia in range(len(self.apertures) - 2, -1, -1):
            calc = (sigma[:, ia] > fraction * maximum) & (radius == 0.)
            radius[calc] = (sigma[calc, ia] - fraction * maximum[calc]) / \
                           (sigma[calc, ia] - sigma[calc, ia + 1]) * \
                           (self.apertures[ia + 1] - self.apertures[ia]) + \
                self.apertures[ia]

        calc = sigma[:, -1] > fraction * maximum
        radius[calc] = self.apertures[-1]

        return radius
Esempio n. 15
0
    def find_radius_sigma(self, fraction):
        """
        Find for each model a fractional surface brightness radius

        This is the outermost radius where the surface brightness is larger
        than a fraction of the peak surface brightness.

        Parameters
        ----------
        fraction: float
            The fraction to use when determining the radius
        """

        log.debug("Calculating %g%s peak surface brightness radii" % (fraction * 100., '%'))

        sigma = np.zeros(self.flux.shape, dtype=self.flux.dtype)
        sigma[:, 0] = self.flux[:, 0] / self.apertures[0] ** 2
        sigma[:, 1:] = (self.flux[:, 1:] - self.flux[:, :-1]) / \
                       (self.apertures[1:] ** 2 - self.apertures[:-1] ** 2)

        maximum = np.max(sigma, axis=1)

        radius = np.zeros(self.n_models, dtype=self.flux.dtype) * u.au

        # Linear interpolation - need to loop over apertures backwards for vectorization
        for ia in range(len(self.apertures) - 2, -1, -1):
            calc = (sigma[:, ia] > fraction * maximum) & (radius == 0.)
            radius[calc] = (sigma[calc, ia] - fraction * maximum[calc]) / \
                           (sigma[calc, ia] - sigma[calc, ia + 1]) * \
                           (self.apertures[ia + 1] - self.apertures[ia]) + \
                self.apertures[ia]

        calc = sigma[:, -1] > fraction * maximum
        radius[calc] = self.apertures[-1]

        return radius
Esempio n. 16
0
def _convolve_model_dir_1(model_dir, filters, overwrite=False):

    for f in filters:
        if f.name is None:
            raise Exception("filter name needs to be set")
        if f.central_wavelength is None:
            raise Exception("filter central wavelength needs to be set")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + "/convolved"):
        os.mkdir(model_dir + "/convolved")

    # Find all SED files to convolve
    sed_files = (
        glob.glob(model_dir + "/seds/*.fits.gz")
        + glob.glob(model_dir + "/seds/*/*.fits.gz")
        + glob.glob(model_dir + "/seds/*.fits")
        + glob.glob(model_dir + "/seds/*/*.fits")
    )

    par_table = load_parameter_table(model_dir)

    if len(sed_files) == 0:
        raise Exception("No SEDs found in %s" % model_dir)
    else:
        log.info("{0} SEDs found in {1}".format(len(sed_files), model_dir))

    # Find out apertures
    first_sed = SED.read(sed_files[0])
    n_ap = first_sed.n_ap
    apertures = first_sed.apertures

    # Set up convolved fluxes
    fluxes = [
        ConvolvedFluxes(
            model_names=np.zeros(len(sed_files), dtype="U30" if six.PY3 else "S30"),
            apertures=apertures,
            initialize_arrays=True,
        )
        for i in range(len(filters))
    ]

    # Set up list of binned filters
    binned_filters = []
    binned_nu = None

    # Loop over SEDs

    b = ProgressBar(len(sed_files))

    for im, sed_file in enumerate(sed_files):

        log.debug("Convolving {0}".format(os.path.basename(sed_file)))

        # Read in SED
        s = SED.read(sed_file, unit_freq=u.Hz, unit_flux=u.mJy, order="nu")

        # Check if filters need to be re-binned
        try:
            assert binned_nu is not None
            np.testing.assert_array_almost_equal_nulp(s.nu.value, binned_nu.value, 100)
        except AssertionError:
            log.info("Rebinning filters")
            binned_filters = [f.rebin(s.nu) for f in filters]
            binned_nu = s.nu

        b.update()

        # Convolve
        for i, f in enumerate(binned_filters):

            fluxes[i].central_wavelength = f.central_wavelength
            fluxes[i].apertures = apertures
            fluxes[i].model_names[im] = s.name

            if n_ap == 1:
                fluxes[i].flux[im] = np.sum(s.flux * f.response)
                fluxes[i].error[im] = np.sqrt(np.sum((s.error * f.response) ** 2))
            else:
                fluxes[i].flux[im, :] = np.sum(s.flux * f.response, axis=1)
                fluxes[i].error[im] = np.sqrt(np.sum((s.error * f.response) ** 2, axis=1))

    for i, f in enumerate(binned_filters):
        fluxes[i].sort_to_match(par_table["MODEL_NAME"])
        fluxes[i].write(model_dir + "/convolved/" + f.name + ".fits", overwrite=overwrite)
Esempio n. 17
0
def convolve_model_dir_monochromatic(model_dir,
                                     overwrite=False,
                                     max_ram=8,
                                     wav_min=-np.inf * u.micron,
                                     wav_max=np.inf * u.micron):
    """
    Convolve all the model SEDs in a model directory

    Parameters
    ----------
    model_dir : str
        The path to the model directory
    overwrite : bool, optional
        Whether to overwrite the output files
    max_ram : float, optional
        The maximum amount of RAM that can be used (in Gb)
    wav_min : float, optional
        The minimum wavelength to consider. Only wavelengths above this value
        will be output.
    wav_max : float, optional
        The maximum wavelength to consider. Only wavelengths below this value
        will be output.
    """

    modpar = parfile.read(os.path.join(model_dir, 'models.conf'), 'conf')
    if modpar.get('version', 1) > 1:
        raise ValueError(
            "monochromatic filters are no longer used for new-style model directories"
        )

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_files = sorted(
        glob.glob(model_dir + '/seds/*.fits.gz') +
        glob.glob(model_dir + '/seds/*/*.fits.gz') +
        glob.glob(model_dir + '/seds/*.fits') +
        glob.glob(model_dir + '/seds/*/*.fits'))

    par_table = load_parameter_table(model_dir)

    # Find number of models
    n_models = len(sed_files)

    if n_models == 0:
        raise Exception("No SEDs found in %s" % model_dir)
    else:
        log.info("{0} SEDs found in {1}".format(n_models, model_dir))

    # Find out apertures and wavelengths
    first_sed = SED.read(sed_files[0])
    n_ap = first_sed.n_ap
    apertures = first_sed.apertures
    n_wav = first_sed.n_wav
    wavelengths = first_sed.wav

    # For model grids that are very large, it is not possible to compute all
    # fluxes in one go, so we need to process in chunks in wavelength space.
    chunk_size = min(
        n_wav, int(np.floor(max_ram * 1024.**3 / (4. * 2. * n_models * n_ap))))

    if chunk_size == n_wav:
        log.info("Producing all monochromatic files in one go")
    else:
        log.info("Producing monochromatic files in chunks of {0}".format(
            chunk_size))

    filters = Table()
    filters['wav'] = wavelengths
    filters['filter'] = np.zeros(wavelengths.shape, dtype='S10')

    # Figure out range of wavelength indices to use
    # (wavelengths array is sorted in reverse order)
    jlo = n_wav - 1 - (wavelengths[::-1].searchsorted(wav_max) - 1)
    jhi = n_wav - 1 - wavelengths[::-1].searchsorted(wav_min)
    chunk_size = min(chunk_size, jhi - jlo + 1)

    # Loop over wavelength chunks
    for jmin in range(jlo, jhi, chunk_size):

        # Find upper wavelength to compute
        jmax = min(jmin + chunk_size - 1, jhi)

        log.info('Processing wavelengths {0} to {1}'.format(jmin, jmax))

        # Set up convolved fluxes
        fluxes = [
            ConvolvedFluxes(model_names=np.zeros(
                n_models, dtype='U30' if six.PY3 else 'S30'),
                            apertures=apertures,
                            initialize_arrays=True) for i in range(chunk_size)
        ]

        b = ProgressBar(len(sed_files))

        # Loop over SEDs
        for im, sed_file in enumerate(sed_files):

            b.update()

            log.debug('Processing {0}'.format(os.path.basename(sed_file)))

            # Read in SED
            s = SED.read(sed_file, unit_freq=u.Hz, unit_flux=u.mJy, order='nu')

            # Convolve
            for j in range(chunk_size):

                fluxes[j].central_wavelength = wavelengths[j + jmin]
                fluxes[j].apertures = apertures
                fluxes[j].model_names[im] = s.name

                if n_ap == 1:
                    fluxes[j].flux[im] = s.flux[0, j + jmin]
                    fluxes[j].error[im] = s.error[0, j + jmin]
                else:
                    fluxes[j].flux[im, :] = s.flux[:, j + jmin]
                    fluxes[j].error[im, :] = s.error[:, j + jmin]

        for j in range(chunk_size):
            fluxes[j].sort_to_match(par_table['MODEL_NAME'])
            fluxes[j].write('{0:s}/convolved/MO{1:03d}.fits'.format(
                model_dir, j + jmin + 1),
                            overwrite=overwrite)
            filters['filter'][j + jmin] = "MO{0:03d}".format(j + jmin + 1)

    return filters
Esempio n. 18
0
def to_cache(response, cache_file):
    log.debug("Caching data to {0}".format(cache_file))
    with open(cache_file, "wb") as f:
        pickle.dump(response, f)
Esempio n. 19
0
def to_cache(response, cache_file):
    log.debug("Caching data to {0}".format(cache_file))
    with open(cache_file, "wb") as f:
        pickle.dump(response, f)
Esempio n. 20
0
def convolve_model_dir(model_dir, filters, overwrite=False):
    """
    Convolve all the model SEDs in a model directory

    Parameters
    ----------
    model_dir : str
        The path to the model directory
    filters : list
        A list of :class:`~sedfitter.filter.Filter` objects to use for the
        convolution
    overwrite : bool, optional
        Whether to overwrite the output files
    """

    for f in filters:
        if f.name is None:
            raise Exception("filter name needs to be set")
        if f.central_wavelength is None:
            raise Exception("filter central wavelength needs to be set")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_files = (glob.glob(model_dir + '/seds/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*.fits') +
                 glob.glob(model_dir + '/seds/*/*.fits'))

    par_table = load_parameter_table(model_dir)

    if len(sed_files) == 0:
        raise Exception("No SEDs found in %s" % model_dir)
    else:
        log.info("{0} SEDs found in {1}".format(len(sed_files), model_dir))

    # Find out apertures
    first_sed = SED.read(sed_files[0])
    n_ap = first_sed.n_ap
    apertures = first_sed.apertures

    # Set up convolved fluxes
    fluxes = [ConvolvedFluxes(model_names=np.zeros(len(sed_files), dtype='U30' if six.PY3 else 'S30'), apertures=apertures, initialize_arrays=True) for i in range(len(filters))]

    # Set up list of binned filters
    binned_filters = []
    binned_nu = None

    # Loop over SEDs

    b = ProgressBar(len(sed_files))

    for im, sed_file in enumerate(sed_files):

        log.debug('Convolving {0}'.format(os.path.basename(sed_file)))

        # Read in SED
        s = SED.read(sed_file, unit_freq=u.Hz, unit_flux=u.mJy, order='nu')

        # Check if filters need to be re-binned
        try:
            assert binned_nu is not None
            np.testing.assert_array_almost_equal_nulp(s.nu.value, binned_nu.value, 100)
        except AssertionError:
            log.info('Rebinning filters')
            binned_filters = [f.rebin(s.nu) for f in filters]
            binned_nu = s.nu

        b.update()

        # Convolve
        for i, f in enumerate(binned_filters):

            fluxes[i].central_wavelength = f.central_wavelength
            fluxes[i].apertures = apertures
            fluxes[i].model_names[im] = s.name

            if n_ap == 1:
                fluxes[i].flux[im] = np.sum(s.flux * f.response)
                fluxes[i].error[im] = np.sqrt(np.sum((s.error * f.response) ** 2))
            else:
                fluxes[i].flux[im, :] = np.sum(s.flux * f.response, axis=1)
                fluxes[i].error[im] = np.sqrt(np.sum((s.error * f.response) ** 2, axis=1))

    for i, f in enumerate(binned_filters):
        fluxes[i].sort_to_match(par_table['MODEL_NAME'])
        fluxes[i].write(model_dir + '/convolved/' + f.name + '.fits',
                        overwrite=overwrite)
Esempio n. 21
0
def _convolve_model_dir_1(model_dir, filters, overwrite=False):

    for f in filters:
        if f.name is None:
            raise Exception("filter name needs to be set")
        if f.central_wavelength is None:
            raise Exception("filter central wavelength needs to be set")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_files = (glob.glob(model_dir + '/seds/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*.fits') +
                 glob.glob(model_dir + '/seds/*/*.fits'))

    par_table = load_parameter_table(model_dir)

    if len(sed_files) == 0:
        raise Exception("No SEDs found in %s" % model_dir)
    else:
        log.info("{0} SEDs found in {1}".format(len(sed_files), model_dir))

    # Find out apertures
    first_sed = SED.read(sed_files[0])
    n_ap = first_sed.n_ap
    apertures = first_sed.apertures

    # Set up convolved fluxes
    fluxes = [
        ConvolvedFluxes(model_names=np.zeros(
            len(sed_files), dtype='U30' if six.PY3 else 'S30'),
                        apertures=apertures,
                        initialize_arrays=True) for i in range(len(filters))
    ]

    # Set up list of binned filters
    binned_filters = []
    binned_nu = None

    # Loop over SEDs

    b = ProgressBar(len(sed_files))

    for im, sed_file in enumerate(sed_files):

        log.debug('Convolving {0}'.format(os.path.basename(sed_file)))

        # Read in SED
        s = SED.read(sed_file, unit_freq=u.Hz, unit_flux=u.mJy, order='nu')

        # Check if filters need to be re-binned
        try:
            assert binned_nu is not None
            np.testing.assert_array_almost_equal_nulp(s.nu.value,
                                                      binned_nu.value, 100)
        except AssertionError:
            log.info('Rebinning filters')
            binned_filters = [f.rebin(s.nu) for f in filters]
            binned_nu = s.nu

        b.update()

        # Convolve
        for i, f in enumerate(binned_filters):

            fluxes[i].central_wavelength = f.central_wavelength
            fluxes[i].apertures = apertures
            fluxes[i].model_names[im] = s.name

            if n_ap == 1:
                fluxes[i].flux[im] = np.sum(s.flux * f.response)
                fluxes[i].error[im] = np.sqrt(np.sum(
                    (s.error * f.response)**2))
            else:
                fluxes[i].flux[im, :] = np.sum(s.flux * f.response, axis=1)
                fluxes[i].error[im] = np.sqrt(
                    np.sum((s.error * f.response)**2, axis=1))

    for i, f in enumerate(binned_filters):
        fluxes[i].sort_to_match(par_table['MODEL_NAME'])
        fluxes[i].write(model_dir + '/convolved/' + f.name + '.fits',
                        overwrite=overwrite)
Esempio n. 22
0
def convolve_model_dir_monochromatic(model_dir, overwrite=False, max_ram=8,
                                     wav_min=-np.inf * u.micron, wav_max=np.inf * u.micron):
    """
    Convolve all the model SEDs in a model directory

    Parameters
    ----------
    model_dir : str
        The path to the model directory
    overwrite : bool, optional
        Whether to overwrite the output files
    max_ram : float, optional
        The maximum amount of RAM that can be used (in Gb)
    wav_min : float, optional
        The minimum wavelength to consider. Only wavelengths above this value
        will be output.
    wav_max : float, optional
        The maximum wavelength to consider. Only wavelengths below this value
        will be output.
    """

    modpar = parfile.read(os.path.join(model_dir, 'models.conf'), 'conf')
    if modpar.get('version', 1) > 1:
        raise ValueError("monochromatic filters are no longer used for new-style model directories")

    # Create 'convolved' sub-directory if needed
    if not os.path.exists(model_dir + '/convolved'):
        os.mkdir(model_dir + '/convolved')

    # Find all SED files to convolve
    sed_files = (glob.glob(model_dir + '/seds/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*/*.fits.gz') +
                 glob.glob(model_dir + '/seds/*.fits') +
                 glob.glob(model_dir + '/seds/*/*.fits'))

    par_table = load_parameter_table(model_dir)

    # Find number of models
    n_models = len(sed_files)

    if n_models == 0:
        raise Exception("No SEDs found in %s" % model_dir)
    else:
        log.info("{0} SEDs found in {1}".format(n_models, model_dir))

    # Find out apertures and wavelengths
    first_sed = SED.read(sed_files[0])
    n_ap = first_sed.n_ap
    apertures = first_sed.apertures
    n_wav = first_sed.n_wav
    wavelengths = first_sed.wav

    # For model grids that are very large, it is not possible to compute all
    # fluxes in one go, so we need to process in chunks in wavelength space.
    chunk_size = min(n_wav, int(np.floor(max_ram * 1024. ** 3 / (4. * 2. * n_models * n_ap))))

    if chunk_size == n_wav:
        log.info("Producing all monochromatic files in one go")
    else:
        log.info("Producing monochromatic files in chunks of {0}".format(chunk_size))

    filters = Table()
    filters['wav'] = wavelengths
    filters['filter'] = np.zeros(wavelengths.shape, dtype='S10')

    # Figure out range of wavelength indices to use
    # (wavelengths array is sorted in reverse order)
    jlo = n_wav - 1 - (wavelengths[::-1].searchsorted(wav_max) - 1)
    jhi = n_wav - 1 - wavelengths[::-1].searchsorted(wav_min)
    chunk_size = min(chunk_size, jhi - jlo + 1)

    # Loop over wavelength chunks
    for jmin in range(jlo, jhi, chunk_size):

        # Find upper wavelength to compute
        jmax = min(jmin + chunk_size - 1, jhi)

        log.info('Processing wavelengths {0} to {1}'.format(jmin, jmax))

        # Set up convolved fluxes
        fluxes = [ConvolvedFluxes(model_names=np.zeros(n_models, dtype='U30' if six.PY3 else 'S30'), apertures=apertures, initialize_arrays=True) for i in range(chunk_size)]

        b = ProgressBar(len(sed_files))

        # Loop over SEDs
        for im, sed_file in enumerate(sed_files):

            b.update()

            log.debug('Processing {0}'.format(os.path.basename(sed_file)))

            # Read in SED
            s = SED.read(sed_file, unit_freq=u.Hz, unit_flux=u.mJy, order='nu')

            # Convolve
            for j in range(chunk_size):

                fluxes[j].central_wavelength = wavelengths[j + jmin]
                fluxes[j].apertures = apertures
                fluxes[j].model_names[im] = s.name

                if n_ap == 1:
                    fluxes[j].flux[im] = s.flux[0, j + jmin]
                    fluxes[j].error[im] = s.error[0, j + jmin]
                else:
                    fluxes[j].flux[im, :] = s.flux[:, j + jmin]
                    fluxes[j].error[im, :] = s.error[:, j + jmin]

        for j in range(chunk_size):
            fluxes[j].sort_to_match(par_table['MODEL_NAME'])
            fluxes[j].write('{0:s}/convolved/MO{1:03d}.fits'.format(model_dir, j + jmin + 1),
                            overwrite=overwrite)
            filters['filter'][j + jmin] = "MO{0:03d}".format(j + jmin + 1)

    return filters