Ejemplo n.º 1
0
def link_or_copy(group, name, link, copy, absolute_paths=False):
    '''
    Link or copy a dataset or group

    Parameters
    ----------
    group : h5py.Group
        The group to create the link, dataset, or group in
    name : str
        The name of the link, dataset, or group in the new file
    link : h5py.ExternalLink
        A link to the group or dataset to include
    copy : bool
        Whether to copy or link to the dataset
    absolute_paths : bool
        If copy=False, then if absolute_paths is True, absolute filenames
        are used in the link, otherwise the path relative to the file is used.
    '''
    if copy:
        f = h5py.File(link.filename, 'r')
        f.copy(link.path, group, name=name)
        f.close()
    else:
        if absolute_paths:
            group[name] = h5py.ExternalLink(os.path.abspath(link.filename), link.path)
        else:
            group[name] = h5py.ExternalLink(os.path.relpath(link.filename, os.path.dirname(group.file.filename)), link.path)
        try:
            group[name]
        except KeyError:  # indicates linking failed (h5py < 2.1.0)
            logger.warn("Linking failed, copying instead (indicates an outdated version of h5py)")
            del group[name]
            f = h5py.File(link.filename, 'r')
            f.copy(link.path, group, name=name)
            f.close()
Ejemplo n.º 2
0
 def rho_0(self, value):
     if value is not None:
         validate_scalar('rho_0', value, domain='positive')
         if self._mdot is not None:
             logger.warn("Overriding value of mdot with value derived from rho_0")
             self._mdot = None
     self._rho_0 = value
Ejemplo n.º 3
0
 def mdot(self, value):
     if value is not None:
         validate_scalar('mdot', value, domain='positive')
         if self._rho_0 is not None:
             logger.warn("Overriding value of rho_0 with value derived from mdot")
             self._rho_0 = None
     self._mdot = value
Ejemplo n.º 4
0
    def add_settled_disks(self, reference_disk, reference_size, eta=0.,
                          sizes=[], dust_files=[]):
        '''
        Automatically create disks with varying degrees of settling

        .. warning:: this function is still experimental, and will be documented once stable
        '''

        exists = False

        for disk in self.disks:
            if disk is reference_disk:
                logger.warn("Reference disk already exists, not re-adding")
                exists = True

        if not exists:
            logger.warn("Reference disk does not exist, adding")
            self.disks.append(reference_disk)

        for i, size in enumerate(sizes):
            disk = deepcopy(reference_disk)
            disk.star = self.star
            disk.h_0 *= (size / reference_size) ** (-eta)
            disk.dust = dust_files[i]
            self.disks.append(disk)
    def from_fits(hdu_list):
        """
        Create EnergyDependentARF from HDU list.

        Parameters
        ----------
        hdu_list : `~astropy.io.fits.HDUList`
            HDU list with ``SPECRESP`` extensions.

        Returns
        -------
        arf : `EnergyDependentARF`
            ARF object.

        Notes
        -----
        For more info on the ARF FITS file format see:
        http://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/summary/cal_gen_92_002_summary.html

        Recommended units for ARF tables are keV and cm^2, but TeV and m^2 are chosen here
        as the more natural units for IACTs.
        """
        energy_lo = Quantity(hdu_list['SPECRESP'].data['ENERG_LO'], 'TeV')
        energy_hi = Quantity(hdu_list['SPECRESP'].data['ENERG_HI'], 'TeV')
        effective_area = Quantity(hdu_list['SPECRESP'].data['SPECRESP'], 'm^2')
        try:
            energy_thresh_lo = Quantity(
                hdu_list['SPECRESP'].header['LO_THRES'], 'TeV')
            energy_thresh_hi = Quantity(
                hdu_list['SPECRESP'].header['HI_THRES'], 'TeV')
            return EffectiveAreaTable(energy_lo, energy_hi, effective_area,
                                      energy_thresh_lo, energy_thresh_hi)
        except KeyError:
            log.warn('No safe energy thresholds found. Setting to default')
            return EffectiveAreaTable(energy_lo, energy_hi, effective_area)
Ejemplo n.º 6
0
    def from_fits(cls, hdu_list):
        """Create `EnergyDependentMultiGaussPSF` from HDU list.

        Parameters
        ----------
        hdu_list : `~astropy.io.fits.HDUList`
            HDU list with correct extensions.
        """
        valid_extnames = ['POINT SPREAD FUNCTION', 'PSF_2D']
        hdu = get_hdu_with_valid_name(hdu_list, valid_extnames)

        energy_lo = Quantity(hdu.data['ENERG_LO'][0], 'TeV')
        energy_hi = Quantity(hdu.data['ENERG_HI'][0], 'TeV')
        theta = Angle(hdu.data['THETA_LO'][0], 'deg')

        # Get sigmas
        shape = (len(theta), len(energy_hi))
        sigmas = []
        for key in ['SIGMA_1', 'SIGMA_2', 'SIGMA_3']:
            sigmas.append(hdu.data[key].reshape(shape))

        # Get amplitudes
        norms = []
        for key in ['SCALE', 'AMPL_2', 'AMPL_3']:
            norms.append(hdu.data[key].reshape(shape))
        try:
            energy_thresh_lo = Quantity(hdu.header['LO_THRES'], 'TeV')
            energy_thresh_hi = Quantity(hdu.header['HI_THRES'], 'TeV')
            return cls(energy_lo, energy_hi, theta, sigmas,
                       norms, energy_thresh_lo, energy_thresh_hi)
        except KeyError:
            log.warn('No safe energy thresholds found. Setting to default')
            return cls(energy_lo, energy_hi, theta, sigmas, norms)
Ejemplo n.º 7
0
def time_from_mjd_string(s, scale='utc'):
    """Returns an astropy Time object generated from a MJD string input."""
    ss = s.lower()
    if "e" in ss or "d" in ss:
        ss = ss.translate(maketrans("d", "e"))
        num, expon = ss.split("e")
        expon = int(expon)
        if expon < 0:
            log.warn("Likely bogus sci notation input in " +
                     "time_from_mjd_string ('%s')!" % s)
            # This could cause a loss of precision...
            # maybe throw an exception instead?
            imjd, fmjd = 0, float(ss)
        else:
            imjd_s, fmjd_s = num.split('.')
            imjd = int(imjd_s + fmjd_s[:expon])
            fmjd = float("0."+fmjd_s[expon:])
    else:
        mjd_s = ss.split('.')
        # If input was given as an integer, add floating "0"
        if len(mjd_s) == 1:
            mjd_s.append("0")
        imjd_s, fmjd_s = mjd_s
        imjd = int(imjd_s)
        fmjd = float("0." + fmjd_s)

    return astropy.time.Time(imjd, fmjd, scale=scale, format='pulsar_mjd',
                             precision=9)
Ejemplo n.º 8
0
 def rho_0(self, value):
     if value is not None:
         validate_scalar("rho_0", value, domain="positive")
         if self._mass is not None:
             logger.warn("Overriding value of mass with value derived from rho_0")
             self._mass = None
     self._rho_0 = value
Ejemplo n.º 9
0
    def __init__(self, command, string):

        string = string[8:-1]

        strings = {}
        while True:
            try:
                p1 = string.index('"')
                p2 = string.index('"', p1 + 1)
                substring = string[p1 + 1:p2]
                key = hashlib.md5(substring.encode('ascii')).hexdigest()
                strings[key] = substring
                string = string[:p1] + key + string[p2 + 1:]
            except:
                break

        for pair in string.split(', '):
            key, value = pair.split('=')
            if value in strings:
                self.__dict__[key] = strings[value]
            else:
                self.__dict__[key] = simplify(value)

        if self.stat == "ERROR":
            raise MontageError("%s: %s" % (command, self.msg))
        elif self.stat == "WARNING":
            log.warn(self.msg)
Ejemplo n.º 10
0
    def _vertical_profile(self, r, theta):

        self._check_all_set()

        if self.rmax <= self.rmin:
            logger.warn("Ignoring disk, since rmax < rmin")
            return np.zeros(theta.shape)

        # Convert coordinates to cylindrical polars
        z = r * np.cos(theta)
        w = r * np.sin(theta)

        # Find disk scaleheight at each cylindrical radius
        h = self.h_0 * (w / self.r_0) ** self.beta

        # Find disk density at all positions
        rho = (self.r_0 / w) ** (self.beta - self.p) \
            * np.exp(-0.5 * (z / h) ** 2)

        # Geometrical factor
        rho *= (1. - np.sqrt(self.star.radius / w))

        rho *= self.rho_0

        # What about normalization

        return rho
Ejemplo n.º 11
0
 def mass(self, value):
     if value is not None:
         validate_scalar("mass", value, domain="positive")
         if self._rho_0 is not None:
             logger.warn("Overriding value of rho_0 with value derived from mass")
             self._rho_0 = None
     self._mass = value
Ejemplo n.º 12
0
    def from_fits(cls, hdu, unit=None):
        """Read ENERGIES fits extension (`~gammapy.spectrum.energy.Energy`).

        Parameters
        ----------
        hdu: `~astropy.io.fits.BinTableHDU`
            ``ENERGIES`` extensions.
        unit : `~astropy.units.UnitBase`, str, None
            Energy unit
        """

        header = hdu.header
        fitsunit = header.get('TUNIT1')

        if fitsunit is None:
            if unit is not None:
                log.warn("No unit found in the FITS header."
                         " Setting it to {0}".format(unit))
                fitsunit = unit
            else:
                raise ValueError("No unit found in the FITS header."
                                 " Please specifiy a unit")

        energy = cls(hdu.data['Energy'], fitsunit)

        return energy.to(unit)
Ejemplo n.º 13
0
def get_latest_file(template, raise_error=False, err_msg=''):
    """Find the filename that appears last in sorted order
    based on given template.

    Parameters
    ----------
    template : str
        Search template in the form of ``path/pattern``
        where pattern is acceptable by :py:mod:`fnmatch`.

    raise_error : bool, optional
        Raise an error when no files found.
        Otherwise, will issue warning only.

    err_msg : str
        Alternate message for when no files found.
        If not given, generic message is used.

    Returns
    -------
    filename : str
        Latest filename.

    Raises
    ------
    IOError
        No files found.

    """
    path, pattern = os.path.split(template)

    # Remote FTP directory
    if path.lower().startswith('ftp:'):
        from astropy.extern.six.moves.urllib.request import urlopen

        response = urlopen(path).read().decode('utf-8').splitlines()
        allfiles = list(set([x.split()[-1] for x in response]))  # Rid symlink

    # Local directory
    else:
        allfiles = os.listdir(path)

    matched_files = sorted(fnmatch.filter(allfiles, pattern))

    # Last file in sorted listing
    if matched_files:
        filename = os.path.join(path, matched_files[-1])

    # No files found
    else:
        if not err_msg:
            err_msg = 'No files found for {0}'.format(template)

        if raise_error:
            raise IOError(err_msg)
        else:
            log.warn(err_msg)
            filename = ''

    return filename
Ejemplo n.º 14
0
    def midplane_cumulative_density(self, r):
        """
        Find the cumulative column density as a function of radius.

        The cumulative density is measured outwards from the origin, and in
        the midplane.

        Parameters
        ----------
        r : np.ndarray
            Array of values of the radius up to which to tabulate the
            cumulative density.

        Returns
        -------
        rho : np.ndarray
            Array of values of the cumulative density.
        """

        self._check_all_set()

        if self.rmax <= self.rmin:
            logger.warn("Ignoring disk, since rmax < rmin")
            return np.zeros(r.shape)

        int1 = integrate_powerlaw(self.rmin, r.clip(self.rmin, self.rmax), self.p - self.beta)
        int1 *= self.r_0 ** (self.beta - self.p)

        return self.rho_0 * int1
Ejemplo n.º 15
0
    def set_lte(self, optical_properties, mean_opacities):

        # Specify that emissivities are LTE
        self.is_lte = True

        # Get temperatures from mean opacities
        temperature = mean_opacities.temperature
        specific_energy = mean_opacities.specific_energy

        # Set frequency scale
        planck_nu = planck_nu_range(temperature[0], temperature[-1])
        self.nu = nu_common(planck_nu, optical_properties.nu)

        if planck_nu.min() < optical_properties.nu.min():
            logger.warn("Planck function for lowest temperature not completely covered by opacity function")
            self.nu = self.nu[self.nu >= optical_properties.nu.min()]

        if planck_nu.max() > optical_properties.nu.max():
            logger.warn("Planck function for highest temperature not completely covered by opacity function")
            self.nu = self.nu[self.nu <= optical_properties.nu.max()]

        # Compute opacity to absorption
        kappa_nu = interp1d_fast_loglog(optical_properties.nu,
                                        optical_properties.kappa, self.nu)

        # Compute LTE emissivities
        self.var_name = 'specific_energy'
        self.var = specific_energy
        self.jnu = np.zeros((len(self.nu), len(temperature)))

        # Find LTE emissivities
        for it, T in enumerate(temperature):
            self.jnu[:, it] = kappa_nu * B_nu(self.nu, T)
Ejemplo n.º 16
0
def read_lmv(filename):
    """
    Read an LMV cube file

    Specification is primarily in GILDAS image_def.f90
    """
    log.warn("CLASS LMV cube reading is tentatively supported.  "
             "Please post bug reports at the first sign of danger!")

    with open(filename,'rb') as lf:
        # lf for "LMV File"
        filetype = _read_string(lf, 12)
        #!---------------------------------------------------------------------
        #! @ private
        #!       SYCODE system code
        #!       '-'    IEEE
        #!       '.'    EEEI (IBM like)
        #!       '_'    VAX
        #!       IMCODE file code
        #!       '<'    IEEE  64 bits    (Little Endian, 99.9 % of recent computers)
        #!       '>'    EEEI  64 bits    (Big Endian, HPUX, IBM-RISC, and SPARC ...)
        #!---------------------------------------------------------------------
        imcode = filetype[6]
        if filetype[:6] != 'GILDAS' or filetype[7:] != 'IMAGE':
            raise TypeError("File is not a GILDAS Image file")

        if imcode in ('<','>'):
            if imcode =='>':
                log.warn("Swap the endianness first...")
            return read_lmv_type2(lf)
        else:
            return read_lmv_type1(lf)
Ejemplo n.º 17
0
 def lvisc(self, value):
     if value is not None:
         validate_scalar('lvisc', value, domain='positive')
         if self._mdot is not None:
             logger.warn("Overriding value of mdot with value derived from lvisc")
             self._mdot = None
     self._lvisc = value
Ejemplo n.º 18
0
 def _login(self, username=None, store_password=False):
     if username is None:
         if self.USERNAME == "":
             raise LoginError("If you do not pass a username to login(), you should configure a default one!")
         else:
             username = self.USERNAME
     # Get password from keyring or prompt
     password_from_keyring = keyring.get_password("astroquery:www.eso.org", username)
     if password_from_keyring is None:
         if system_tools.in_ipynb():
             log.warn("You may be using an ipython notebook:"
                      " the password form will appear in your terminal.")
         password = getpass.getpass("{0}, enter your ESO password:\n".format(username))
     else:
         password = password_from_keyring
     # Authenticate
     log.info("Authenticating {0} on www.eso.org...".format(username))
     # Do not cache pieces of the login process
     login_response = self._request("GET", "https://www.eso.org/sso/login", cache=False)
     login_result_response = self._activate_form(login_response,
                                                 form_index=-1,
                                                 inputs={'username': username,
                                                         'password': password})
     root = BeautifulSoup(login_result_response.content, 'html5lib')
     authenticated = not root.select('.error')
     if authenticated:
         log.info("Authentication successful!")
     else:
         log.exception("Authentication failed!")
     # When authenticated, save password in keyring if needed
     if authenticated and password_from_keyring is None and store_password:
         keyring.set_password("astroquery:www.eso.org", username, password)
     return authenticated
Ejemplo n.º 19
0
    def from_fits(cls, hdu_list):
        """Create `EnergyDependentMultiGaussPSF` from HDU list.

        Parameters
        ----------
        hdu_list : `~astropy.io.fits.HDUList`
            HDU list with correct extensions.
        """
        extension = "POINT SPREAD FUNCTION"
        energy_lo = Quantity(hdu_list[extension].data["ENERG_LO"][0], "TeV")
        energy_hi = Quantity(hdu_list[extension].data["ENERG_HI"][0], "TeV")
        theta = Angle(hdu_list[extension].data["THETA_LO"][0], "degree")

        # Get sigmas
        shape = (len(theta), len(energy_hi))
        sigmas = []
        for key in ["SIGMA_1", "SIGMA_2", "SIGMA_3"]:
            sigmas.append(hdu_list[extension].data[key].reshape(shape))

        # Get amplitudes
        norms = []
        for key in ["SCALE", "AMPL_2", "AMPL_3"]:
            norms.append(hdu_list[extension].data[key].reshape(shape))
        try:
            energy_thresh_lo = Quantity(hdu_list[extension].header["LO_THRES"], "TeV")
            energy_thresh_hi = Quantity(hdu_list[extension].header["HI_THRES"], "TeV")
            return cls(energy_lo, energy_hi, theta, sigmas, norms, energy_thresh_lo, energy_thresh_hi)
        except KeyError:
            log.warn("No safe energy thresholds found. Setting to default")
            return cls(energy_lo, energy_hi, theta, sigmas, norms)
Ejemplo n.º 20
0
    def read_parfile(self, filename):
        """Read values from the specified parfile into the model parameters."""
        pfile = open(filename, 'r')
        for l in [pl.strip() for pl in pfile.readlines()]:
            # Skip blank lines
            if not l:
                continue
            # Skip commented lines
            if l.startswith('#') or l[:2]=="C ":
                continue
            parsed = False
            for par in self.params:
                if getattr(self, par).from_parfile_line(l):
                    parsed = True
            if not parsed:
                try:
                    prefix,f,v = utils.split_prefixed_name(l.split()[0])
                    if prefix not in ignore_prefix:
                        log.warn("Unrecognized parfile line '%s'" % l)
                except:
                    if l.split()[0] not in ignore_params:
                        log.warn("Unrecognized parfile line '%s'" % l)

        # The "setup" functions contain tests for required parameters or
        # combinations of parameters, etc, that can only be done
        # after the entire parfile is read
        self.setup()
Ejemplo n.º 21
0
 def __setitem__(self, item, value):
     if isinstance(value, AMRGridView):
         if self.levels == [] and value.levels != []:
             logger.warn("No geometry in target grid - copying from original grid")
             for level in value.levels:
                 level_ref = self.add_level()
                 for grid in level.grids:
                     grid_ref = level_ref.add_grid()
                     grid_ref.nx = grid.nx
                     grid_ref.ny = grid.ny
                     grid_ref.nz = grid.nz
                     grid_ref.xmin, grid_ref.xmax = grid.xmin, grid.xmax
                     grid_ref.ymin, grid_ref.ymax = grid.ymin, grid.ymax
                     grid_ref.zmin, grid_ref.zmax = grid.zmin, grid.zmax
                     grid_ref.quantities = {}
         for ilevel, level_ref in enumerate(self.levels):
             level = value.levels[ilevel]
             for igrid, grid_ref in enumerate(level_ref.grids):
                 grid = level.grids[igrid]
                 grid_ref.quantities[item] = deepcopy(grid.quantities[value.viewed_quantity])
     elif isinstance(value, h5py.ExternalLink):
         filename = value.filename
         base_path = os.path.dirname(value.path)
         array_name = os.path.basename(value.path)
         for ilevel, level_ref in enumerate(self.levels):
             level_path = 'level_%05i' % (ilevel + 1)
             for igrid, grid_ref in enumerate(level_ref.grids):
                 grid_path = 'grid_%05i' % (ilevel + 1)
                 grid_ref.quantities[item] = h5py.ExternalLink(filename, os.path.join(base_path, level_path, grid_path, array_name))
     elif value == []:
         for level in self.levels:
             for grid in level.grids:
                 grid.quantities[item] = []
     else:
         raise ValueError('value should be an empty list or an AMRGridView instance')
Ejemplo n.º 22
0
 def add_component(self, component, order=None, force=False):
     """
     This is a method to add a component to the timing model
     Parameter
     ---------
     component: component instance
         The component need to be added to the timing model
     order: int, optional
         The order of component. The order starts from zero.
     force: bool, optional
         If add a duplicated type of component
     """
     comp_type = self.get_component_type(component)
     if comp_type in self.component_types:
         comp_list = getattr(self, comp_type+'_list')
         # Check if the component has been added already.
         comp_classes = [x.__class__ for x in comp_list]
         if component.__class__ in comp_classes:
             log.warn("Component '%s' is already added." %
                      component.__class__.__name__)
             if not force:
                 log.warn("Component '%s' will not be added. To force add it, use"
                          " force option." % component.__class__.__name__)
                 return
         if order is None:
             comp_list.append(component)
         else:
             if order > len(comp_list):
                 order = len(comp_list)
             comp_list.insert(order, component)
     else:
         comp_list = [component,]
     self.setup_components(comp_list)
Ejemplo n.º 23
0
 def d_delay_d_param_num(self, toas, param, step=1e-2):
     """ Return the derivative of phase with respect to the parameter.
     """
     # TODO : We need to know the range of parameter.
     par = getattr(self, param)
     ori_value = par.value
     if ori_value is None:
          # A parameter did not get to use in the model
         log.warn("Parameter '%s' is not used by timing model." % param)
         return np.zeros(len(toas)) * (u.second/par.units)
     unit = par.units
     if ori_value == 0:
         h = 1.0 * step
     else:
         h = ori_value * step
     parv = [par.value-h, par.value+h]
     delay = np.zeros((len(toas),2))
     for ii, val in enumerate(parv):
         par.value = val
         try:
             delay[:,ii] = self.delay(toas)
         except:
             par.value = ori_value
             raise
     d_delay = (-delay[:,0] + delay[:,1])/2.0/h
     par.value = ori_value
     return d_delay * (u.second/unit)
Ejemplo n.º 24
0
def _matplotlib_pil_bug_present():
    """
    Determine whether PIL images should be pre-flipped due to a bug in Matplotlib.

    Prior to Matplotlib 1.2.0, RGB images provided as PIL objects were
    oriented wrongly. This function tests whether the bug is present.
    """

    from matplotlib.image import pil_to_array

    try:
        from PIL import Image
    except:
        import Image

    from astropy import log

    array1 = np.array([[1, 2], [3, 4]], dtype=np.uint8)
    image = Image.fromarray(array1)
    array2 = pil_to_array(image)

    if np.all(array1 == array2):
        log.debug("PIL Image flipping bug not present in Matplotlib")
        return False
    elif np.all(array1 == array2[::-1, :]):
        log.debug("PIL Image flipping bug detected in Matplotlib")
        return True
    else:
        log.warn("Could not properly determine Matplotlib behavior for RGB images - image may be flipped incorrectly")
        return False
Ejemplo n.º 25
0
def check(header, convention=None, dimensions=[0, 1]):

    ix = dimensions[0] + 1
    iy = dimensions[1] + 1

    # If header does not contain CTYPE keywords, assume that the WCS is
    # missing or incomplete, and replace it with a 1-to-1 pixel mapping
    if 'CTYPE%i' % ix not in header or 'CTYPE%i' % iy not in header:
        log.warn("No WCS information found in header - using pixel coordinates")
        header.update('CTYPE%i' % ix, 'PIXEL')
        header.update('CTYPE%i' % iy, 'PIXEL')
        header.update('CRVAL%i' % ix, 0.)
        header.update('CRVAL%i' % iy, 0.)
        header.update('CRPIX%i' % ix, 0.)
        header.update('CRPIX%i' % iy, 0.)
        header.update('CDELT%i' % ix, 1.)
        header.update('CDELT%i' % iy, 1.)

    if header['CTYPE%i' % ix][4:] == '-CAR' and header['CTYPE%i' % iy][4:] == '-CAR':

        if header['CTYPE%i' % ix][:4] == 'DEC-' or header['CTYPE%i' % ix][1:4] == 'LAT':
            ilon = iy
            ilat = ix
        elif header['CTYPE%i' % iy][:4] == 'DEC-' or header['CTYPE%i' % iy][1:4] == 'LAT':
            ilon = ix
            ilat = iy
        else:
            ilon = None
            ilat = None

        if ilat is not None and header['CRVAL%i' % ilat] != 0:

            if convention == 'calabretta':
                pass  # we don't need to do anything
            elif convention == 'wells':
                if 'CDELT%i' % ilat not in header:
                    raise Exception("Need CDELT%i to be present for wells convention" % ilat)
                crpix = header['CRPIX%i' % ilat]
                crval = header['CRVAL%i' % ilat]
                cdelt = header['CDELT%i' % ilat]
                crpix = crpix - crval / cdelt
                try:
                    header['CRPIX%i' % ilat] = crpix
                    header['CRVAL%i' % ilat] = 0.
                except:  # older versions of PyFITS
                    header.update('CRPIX%i' % ilat, crpix)
                    header.update('CRVAL%i' % ilon, 0.)

            else:
                raise Exception('''WARNING: projection is Plate Caree (-CAR) and
                CRVALy is not zero. This can be intepreted either according to
                Wells (1981) or Calabretta (2002). The former defines the
                projection as rectilinear regardless of the value of CRVALy,
                whereas the latter defines the projection as rectilinear only when
                CRVALy is zero. You will need to specify the convention to assume
                by setting either convention='wells' or convention='calabretta'
                when initializing the FITSFigure instance. ''')

    return header
Ejemplo n.º 26
0
 def pickle(self, filename=None):
     """Write the TOAs to a .pickle file with optional filename."""
     if filename is not None:
         pickle.dump(self, open(filename, "wb"))
     elif self.filename is not None:
         pickle.dump(self, gzip.open(self.filename+".pickle.gz", "wb"))
     else:
         log.warn("TOA pickle method needs a filename.")
Ejemplo n.º 27
0
    def __init__(self, directory='.', add_col_names=[], check_subdir=False):
        """
        This is a class for reading a directory's the lofasm related file or
        directories and parsing the information to an astropy table. It provides
        the writing the information to a text file as well.

        Notes
        -----
        This class only applies to one directory.

        Parameter
        ----------
        dir : str, optional
            Filename for the directory.
        add_col_names: list optional
            The new added column names, other then the default ones.
        check_subdir: bool optional
            Check subdir information or not. It will create .info file in the
            end data subdirector.

        Note
        ----
        Use check_subdir option. A new directory's information can be set up totally.
        """
        # Get all the files for the directory.
        self.all_files = os.listdir(directory)
        self.directory = directory
        self.directory_abs_path = os.path.abspath(self.directory)
        self.directory_basename = os.path.basename(self.directory_abs_path.rstrip(os.sep))
        # different file category
        self.formats = {}
        # instantiate format classes
        for k, kv in zip(DataFormat._format_list.keys(), DataFormat._format_list.values()):
            self.formats[k] = kv()
        # set up the file category list depends on the formats
        self.files, self.num_data_files = self.check_file_format(self.all_files, self.directory)
        num_info_files = len(self.files['info'])
        if num_info_files < 1:
            self.info_file_name = '.info'
        else:
            if num_info_files > 1:
                log.warn("More then one .info file detected, " \
                         "use '%s' as information file." % self.files['info'][0])
            self.info_file_name = self.files['info'][0]
        if self.num_data_files > 0:
            self.is_data_dir = True
        else:
            self.is_data_dir = False
        self.built_in_collectors = {}
        self.new_files = []
        self.table_update = False
        # Those are the default column names
        self.add_col_names = add_col_names
        self.col_names = ['station', 'channel', 'hdr_type', 'start_time', \
                          'time_span', 'start_time_J2000'] + add_col_names
        self.setup_info_table()
        if check_subdir:
           self.process_data_dirs()
Ejemplo n.º 28
0
Archivo: toa.py Proyecto: nanograv/PINT
    def apply_clock_corrections(self, include_bipm=True,
                                bipm_version="BIPM2015",
                                include_gps=True):
        """Apply observatory clock corrections and TIME statments.

        Apply clock corrections to all the TOAs where corrections are
        available.  This routine actually changes the value of the TOA,
        although the correction is also listed as a new flag for the TOA
        called 'clkcorr' so that it can be reversed if necessary.  This
        routine also applies all 'TIME' commands and treats them exactly
        as if they were a part of the observatory clock corrections.

        Options to include GPS or BIPM clock corrections are set to True
        by default in order to give the most accurate clock corrections.

        A description of how PINT handles clock corrections and timescales is here:
        https://github.com/nanograv/PINT/wiki/Clock-Corrections-and-Timescales-in-PINT

        """

        # First make sure that we haven't already applied clock corrections
        flags = self.table['flags']
        if any(['clkcorr' in f for f in flags]):
            log.warn("Some TOAs have 'clkcorr' flag.  Not applying new clock corrections.")
            return
        # An array of all the time corrections, one for each TOA
        log.info("Applying clock corrections (include_GPS = {0}, include_BIPM = {1}.".format(include_gps,include_bipm))
        corr = numpy.zeros(self.ntoas) * u.s
        times = self.table['mjd']
        for ii, key in enumerate(self.table.groups.keys):
            grp = self.table.groups[ii]
            obs = self.table.groups.keys[ii]['obs']
            site = get_observatory(obs, include_gps=include_gps,
                                   include_bipm=include_bipm,
                                   bipm_version=bipm_version)
            loind, hiind = self.table.groups.indices[ii:ii+2]
            # First apply any TIME statements
            for jj in range(loind, hiind):
                if 'to' in flags[jj]:
                    # TIME commands are in sec
                    # SUGGESTION(@paulray): These time correction units should
                    # be applied in the parser, not here. In the table the time
                    # correction should have units.
                    corr[jj] = flags[jj]['to'] * u.s
                    times[jj] += time.TimeDelta(corr[jj])

            gcorr = site.clock_corrections(time.Time(grp['mjd']))
            for jj, cc in enumerate(gcorr):
                grp['mjd'][jj] += time.TimeDelta(cc)
            corr[loind:hiind] += gcorr
            # Now update the flags with the clock correction used
            for jj in range(loind, hiind):
                if corr[jj]:
                    flags[jj]['clkcorr'] = corr[jj]
        # Update clock correction info
        self.clock_corr_info.update({'include_bipm':include_bipm,
                                     'bipm_version':bipm_version,
                                     'include_gps':include_gps})
Ejemplo n.º 29
0
    def density(self, grid, ignore_cavity=False):
        '''
        Return the density grid

        Parameters
        ----------
        grid : :class:`~hyperion.grid.SphericalPolarGrid` instance.
            The spherical polar grid object containing information about the
            position of the grid cells.

        Returns
        -------
        rho : np.ndarray
            A 3-dimensional array containing the density of the envelope
            inside each cell. The shape of this array is the same as
            ``grid.shape``.
        '''

        if not isinstance(grid, SphericalPolarGrid):
            raise TypeError("grid should be a SphericalPolarGrid instance")

        self._check_all_set()

        if self.rmax <= self.rmin:
            logger.warn("Ignoring Ulrich envelope, since rmax < rmin")
            return np.zeros(grid.shape)

        # Find mu = cos(theta)
        mu = np.cos(grid.gt)

        # Find mu_0, the cosine of the angle of a streamline of infalling
        # particles at r=infinity.
        mu0 = solve_mu0(grid.gr / self.rc, mu)

        # Find Ulrich envelope density
        rho = self.rho_0 * (grid.gr / self.rc) ** -1.5 \
                * (1 + mu / mu0) ** -0.5 \
                * (mu / mu0 + 2. * mu0 ** 2 * self.rc / grid.gr) ** -1.

        mid1 = (np.abs(mu) < 1.e-10) & (grid.gr < self.rc)
        rho[mid1] = self.rho_0 / np.sqrt(grid.gr[mid1] / self.rc) \
                  / (1. - grid.gr[mid1] / self.rc) / 2.

        mid2 = (np.abs(mu) < 1.e-10) & (grid.gr > self.rc)
        rho[mid2] = self.rho_0 / np.sqrt(2. * grid.gr[mid2] / self.rc - 1) \
                  / (grid.gr[mid2] / self.rc - 1.)

        if np.any((np.abs(mu) < 1.e-10) & (grid.gr == self.rc)):
            raise Exception("Grid point too close to Ulrich singularity")

        rho[grid.gr < self.rmin] = 0.
        rho[grid.gr > self.rmax] = 0.

        if not ignore_cavity and self.cavity is not None:
            mask = self.cavity.mask(grid)
            rho[~mask] = 0.

        return rho
Ejemplo n.º 30
0
    def _login(self, username=None, store_password=False,
               reenter_password=False):
        """
        Login to the ESO User Portal.

        Parameters
        ----------
        username : str, optional
            Username to the ESO Public Portal. If not given, it should be
            specified in the config file.
        store_password : bool, optional
            Stores the password securely in your keyring. Default is False.
        reenter_password : bool, optional
            Asks for the password even if it is already stored in the
            keyring. This is the way to overwrite an already stored passwork
            on the keyring. Default is False.
        """
        if username is None:
            if self.USERNAME == "":
                raise LoginError("If you do not pass a username to login(), "
                                 "you should configure a default one!")
            else:
                username = self.USERNAME

        # Get password from keyring or prompt
        if reenter_password is False:
            password_from_keyring = keyring.get_password(
                "astroquery:www.eso.org", username)
        else:
            password_from_keyring = None

        if password_from_keyring is None:
            if system_tools.in_ipynb():
                log.warn("You may be using an ipython notebook:"
                         " the password form will appear in your terminal.")
            password = getpass.getpass("{0}, enter your ESO password:\n"
                                       .format(username))
        else:
            password = password_from_keyring
        # Authenticate
        log.info("Authenticating {0} on www.eso.org...".format(username))
        # Do not cache pieces of the login process
        login_response = self._request("GET", "https://www.eso.org/sso/login",
                                       cache=False)
        login_result_response = self._activate_form(
            login_response, form_index=-1, inputs={'username': username,
                                                   'password': password})
        root = BeautifulSoup(login_result_response.content, 'html5lib')
        authenticated = not root.select('.error')
        if authenticated:
            log.info("Authentication successful!")
        else:
            log.exception("Authentication failed!")
        # When authenticated, save password in keyring if needed
        if authenticated and password_from_keyring is None and store_password:
            keyring.set_password("astroquery:www.eso.org", username, password)
        return authenticated
Ejemplo n.º 31
0
    def _parse_staging_request_page(self, data_list_page):
        """
        Parse pages like this one:
        https://almascience.eso.org/rh/requests/anonymous/786572566

        that include links to data sets that have been requested and staged

        Parameters
        ----------
        data_list_page : requests.Response object

        """

        root = BeautifulSoup(data_list_page.content, 'html5lib')

        #for link in root.findAll('a'):
        #    if 'script.sh' in link.text:
        #        download_script_url = urljoin(self.dataarchive_url,
        #                                      link['href'])
        #if 'download_script_url' not in locals():
        #    raise RemoteServiceError("No download links were found.")

        #download_script = self._request('GET', download_script_url,
        #                                cache=False)
        #download_script_target_urls = []
        #for line in download_script.text.split('\n'):
        #    if line and line.split() and line.split()[0] == 'wget':
        #        download_script_target_urls.append(line.split()[1].strip('"'))

        #if len(download_script_target_urls) == 0:
        #    raise RemoteServiceError("There was an error parsing the download "
        #                             "script; it is empty.  "
        #                             "You can access the download script "
        #                             "directly from this URL: "
        #                             "{0}".format(download_script_url))

        data_table = root.findAll('table', class_='list', id='report')[0]
        columns = {'uid':[], 'URL':[], 'size':[]}
        for tr in data_table.findAll('tr'):
            tds = tr.findAll('td')

            # Cannot check class if it is not defined
            cl = 'class' in tr.attrs

            if len(tds) > 1 and 'uid' in tds[0].text and (cl and
                                                          'Level' in tr['class'][0]):
                # New Style
                text = tds[0].text.strip().split()
                if text[0] in ('Asdm', 'Member'):
                    uid = text[-1]
            elif len(tds) > 1 and 'uid' in tds[1].text:
                # Old Style
                uid = tds[1].text.strip()
            elif cl and tr['class'] == 'Level_1':
                raise ValueError("A heading was found when parsing the download page but "
                                 "it was not parsed correctly")

            if len(tds) > 3 and (cl and tr['class'][0] == 'fileRow'):
                # New Style
                size,unit = re.search('(-|[0-9\.]*)([A-Za-z]*)', tds[2].text).groups()
                href = tds[1].find('a')
                if size == '':
                    # this is a header row
                    continue
                authorized = ('access_authorized.png' in tds[3].findChild('img')['src'])
                if authorized:
                    columns['uid'].append(uid)
                    if href and 'href' in href.attrs:
                        columns['URL'].append(href.attrs['href'])
                    else:
                        columns['URL'].append('None_Found')
                    unit = (u.Unit(unit) if unit in ('GB','MB')
                            else u.Unit('kB') if 'kb' in unit.lower()
                            else 1)
                    try:
                        columns['size'].append(float(size)*u.Unit(unit))
                    except ValueError:
                        # size is probably a string?
                        columns['size'].append(-1*u.byte)
                    log.log(level=5, msg="Found a new-style entry.  "
                            "size={0} uid={1} url={2}".format(size, uid,
                                                              columns['URL'][-1]))
                else:
                    log.warn("Access to {0} is not authorized.".format(uid))
            elif len(tds) > 3 and tds[2].find('a'):
                # Old Style
                href = tds[2].find('a')
                size,unit = re.search('([0-9\.]*)([A-Za-z]*)', tds[3].text).groups()
                columns['uid'].append(uid)
                columns['URL'].append(href.attrs['href'])
                unit = (u.Unit(unit) if unit in ('GB','MB')
                        else u.Unit('kB') if 'kb' in unit.lower()
                        else 1)
                columns['size'].append(float(size)*u.Unit(unit))
                log.log(level=5, msg="Found an old-style entry.  "
                        "size={0} uid={1} url={2}".format(size, uid,
                                                          columns['URL'][-1]))

        columns['size'] = u.Quantity(columns['size'], u.Gbyte)

        if len(columns['uid']) == 0:
            raise RemoteServiceError("No valid UIDs were found in the staged "
                                     "data table.  Please include {0} "
                                     "in a bug report."
                                     .format(self._staging_log['data_list_url']))

        #if len(download_script_target_urls) != len(columns['URL']):
        #    log.warn("There was an error parsing the data staging page.  "
        #             "The results from the page and the download script "
        #             "differ.  You can access the download script directly "
        #             "from this URL: {0}".format(download_script_url))
        #else:
        #    bad_urls = []
        #    for (rurl,url) in (zip(columns['URL'],
        #                           download_script_target_urls)):
        #        if rurl == 'None_Found':
        #            url_uid = os.path.split(url)[-1]
        #            ind = np.where(np.array(columns['uid']) == url_uid)[0][0]
        #            columns['URL'][ind] = url
        #        elif rurl != url:
        #            bad_urls.append((rurl, url))
        #    if bad_urls:
        #        log.warn("There were mismatches between the parsed URLs "
        #                 "from the staging page ({0}) and the download "
        #                 "script ({1})."
        #                 .format(self._staging_log['data_list_url'],
        #                         download_script_url))

        tbl = Table([Column(name=k, data=v) for k,v in iteritems(columns)])


        return tbl
Ejemplo n.º 32
0
    def __init__(
        self,
        collider_densities=None,
        density=None,
        total_density=None,
        temperature=None,
        species='co',
        column=None,
        column_per_bin=None,
        tbackground=2.7315,
        deltav=1.0,
        abundance=None,
        datapath=None,
        escapeProbGeom='lvg',
        outfile='radex.out',
        logfile='radex.log',
        debug=False,
        mu=2.8,
        source_area=None,
    ):
        """
        Direct wrapper of the radex FORTRAN code

        Parameters
        ----------
        collider_densities: dict
            Dictionary giving the volume densities of the collider(s) in units
            of cm^-3.  Valid entries are h2,oh2,ph2,e,He,H,H+.  The keys are
            case-insensitive.
        density: float
        total_density: float
            (optional) Alternative to ``collider_densities``: can specify a
            single number indicating the total density of H2.  This should
            not be used when electrons or H atoms are the intended collider.
            These keywords are synonymous and therefore only one can be used.
        temperature: float
            Local gas temperature in K
        species: str
            A string specifying a valid chemical species.  This is used to look
            up the specified molecule
        column: float
        column_per_bin : float
            The column density of the molecule of interest per bin, where
            a bin is (deltav km/s * 1 pc). These keywords are synonymous and
            therefore only one can be specified.
        abundance: float
            The molecule's abundance relative to the total collider density in
            each velocity bin, i.e. column = abundance * density * length * dv.
            If both abundance and column are specified, abundance is ignored.
        tbackground: float
            Background radiation temperature (e.g., CMB)
        deltav: float
            The FWHM line width (really, the single-zone velocity width to
            scale the column density by: this is most sensibly interpreted as a
            velocity gradient (dv/length))
        datapath: str
            Path to the molecular data files.  If it is not specified, defaults
            to the current directory, OR the shell variable RADEX_DATAPATH if
            it is specified.
        outfile: str
            Output file name
        logfile: str
            Log file name
        escapeProbGeom: 'lvg','sphere','slab'
            Which escape probability method to use
        mu: float
            Mean mass per particle in AMU.  Set to 2.8 for H2+Helium mix
        source_area: float / unit
            The emitting area of the source on the sky in steradians
        """

        from pyradex.radex import radex
        self.radex = radex

        self.mu = mu

        if os.getenv('RADEX_DATAPATH') and datapath is None:
            datapath = os.getenv('RADEX_DATAPATH')

        if datapath is not None:
            self.datapath = datapath
            if self.datapath != os.path.expanduser(datapath):
                raise ValueError("Data path %s was not successfully stored;"
                                 " instead %s was." %
                                 (datapath, self.datapath))
        self.species = species
        if self.molpath == b'':
            raise ValueError("Must set a species name.")
        if not os.path.exists(self.molpath):
            raise ValueError(
                "Must specify a valid path to a molecular data file "
                "else RADEX will crash."
                "  Current path is {0}".format(self.molpath))

        if sum(x is not None
               for x in (collider_densities, density, total_density)) > 1:
            raise ValueError("Can only specify one of density, total_density,"
                             " and collider_densities")

        if sum(x is not None for x in (column, column_per_bin)) > 1:
            raise ValueError("Can only specify one of column, column_per_bin.")

        n_specifications = sum(x is not None
                               for x in (column, column_per_bin,
                                         collider_densities, density,
                                         total_density, abundance))
        if (n_specifications > 2):
            raise ValueError(
                "Can only specify two of column, density, and abundance.")
        if (n_specifications < 2):
            raise ValueError(
                "Must specify two of column, density, and abundance.")

        self._locked_parameter = 'density'
        self._is_locked = True

        # This MUST happen before density is set, otherwise OPR will be
        # incorrectly set.
        self.radex.cphys.tkin = unitless(temperature)

        # density warnings will occur if a generic (number-based) density is
        # used.  It can be suppressed more directly by using a dictionary-style
        # density
        self._suppress_density_warning = False

        if collider_densities:
            self.density = collider_densities
            self._suppress_density_warning = True
            self._is_locked = False
            if total_density:
                log.warn(
                    "`total_density` was specified, but `collider_densities` "
                    "was used instead.  Set `collider_densities=None` if you "
                    "want to use `total_density`.")
        elif total_density:
            self.density = total_density
            self._suppress_density_warning = True
            self._is_locked = False
        elif density:
            self.density = density
            self._suppress_density_warning = True
            self._is_locked = False
        else:
            self._locked_parameter = 'column'
            self._is_locked = True

        self.outfile = outfile
        self.logfile = logfile
        self.escapeProbGeom = escapeProbGeom

        self.deltav = deltav
        self._set_parameters()

        if column_per_bin is not None:
            self.column_per_bin = column_per_bin
        elif column is not None:
            self.column_per_bin = column
        else:
            self._locked_parameter = 'density'

        self._is_locked = False

        if abundance:
            self.abundance = abundance

        self._validate_colliders()

        # This has to happen here, because the colliders are read in at
        # this point and rates interpolated
        self.temperature = temperature
        self.tbg = tbackground

        self.debug = debug

        self.source_area = source_area

        self._suppress_density_warning = False
Ejemplo n.º 33
0
    def stage_data(self, uids):
        """
        Stage ALMA data

        Parameters
        ----------
        uids : list or str
            A list of valid UIDs or a single UID.
            UIDs should have the form: 'uid://A002/X391d0b/X7b'

        Returns
        -------
        data_file_table : Table
            A table containing 3 columns: the UID, the file URL (for future
            downloading), and the file size
        """

        """
        With log.set_level(10)
        INFO: Staging files... [astroquery.alma.core]
        DEBUG: First request URL: https://almascience.eso.org/rh/submission [astroquery.alma.core]
        DEBUG: First request payload: {'dataset': [u'ALMA+uid___A002_X3b3400_X90f']} [astroquery.alma.core]
        DEBUG: First response URL: https://almascience.eso.org/rh/checkAuthenticationStatus/3f98de33-197e-4692-9afa-496842032ea9/submission [astroquery.alma.core]
        DEBUG: Request ID: 3f98de33-197e-4692-9afa-496842032ea9 [astroquery.alma.core]
        DEBUG: Submission URL: https://almascience.eso.org/rh/submission/3f98de33-197e-4692-9afa-496842032ea9 [astroquery.alma.core]
        .DEBUG: Data list URL: https://almascience.eso.org/rh/requests/anonymous/786823226 [astroquery.alma.core]
        """

        if isinstance(uids, six.string_types + (np.bytes_,)):
            uids = [uids]
        if not isinstance(uids, (list, tuple, np.ndarray)):
            raise TypeError("Datasets must be given as a list of strings.")

        log.info("Staging files...")

        self._get_dataarchive_url()

        url = urljoin(self.dataarchive_url, 'rh/submission')
        log.debug("First request URL: {0}".format(url))
        # 'ALMA+uid___A002_X391d0b_X7b'
        payload = {'dataset': ['ALMA+' + clean_uid(uid) for uid in uids]}
        log.debug("First request payload: {0}".format(payload))

        self._staging_log = {'first_post_url': url}

        # Request staging for the UIDs
        # This component cannot be cached, since the returned data can change
        # if new data are uploaded
        response = self._request('POST', url, data=payload,
                                 timeout=self.TIMEOUT, cache=False)
        self._staging_log['initial_response'] = response
        log.debug("First response URL: {0}".format(response.url))
        if response.status_code == 405:
            raise HTTPError("Received an error 405: this may indicate you "
                            "have already staged the data.  Try downloading "
                            "the file URLs directly with download_files.")
        response.raise_for_status()

        if 'j_spring_cas_security_check' in response.url:
            time.sleep(1)
            # CANNOT cache this stage: it not a real data page!  results in
            # infinite loops
            response = self._request('POST', url, data=payload,
                                     timeout=self.TIMEOUT, cache=False)
            self._staging_log['initial_response'] = response
            if 'j_spring_cas_security_check' in response.url:
                log.warn("Staging request was not successful.  Try again?")
            response.raise_for_status()

        if 'j_spring_cas_security_check' in response.url:
            raise RemoteServiceError("Could not access data.  This error "
                                     "can arise if the data are private and "
                                     "you do not have access rights or are "
                                     "not logged in.")

        request_id = response.url.split("/")[-2]
        self._staging_log['request_id'] = request_id
        log.debug("Request ID: {0}".format(request_id))

        # Submit a request for the specific request ID identified above
        submission_url = urljoin(self.dataarchive_url,
                                 os.path.join('rh/submission', request_id))
        log.debug("Submission URL: {0}".format(submission_url))
        self._staging_log['submission_url'] = submission_url
        staging_submission = self._request('GET', submission_url, cache=True)
        self._staging_log['staging_submission'] = staging_submission
        staging_submission.raise_for_status()

        data_page_url = staging_submission.url
        self._staging_log['data_page_url'] = data_page_url
        dpid = data_page_url.split("/")[-1]
        self._staging_log['staging_page_id'] = dpid

        # CANNOT cache this step: please_wait will happen infinitely
        data_page = self._request('GET', data_page_url, cache=False)
        self._staging_log['data_page'] = data_page
        data_page.raise_for_status()

        has_completed = False
        while not has_completed:
            time.sleep(1)
            summary = self._request('GET', os.path.join(data_page_url,
                                                        'summary'),
                                    cache=False)
            summary.raise_for_status()
            print(".", end='')
            sys.stdout.flush()
            has_completed = summary.json()['complete']

        self._staging_log['summary'] = summary
        summary.raise_for_status()
        self._staging_log['json_data'] = json_data = summary.json()

        username = self._username if hasattr(self, '_username') else 'anonymous'

        # templates:
        # https://almascience.eso.org/dataPortal/requests/keflavich/946895898/ALMA/
        # 2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar/2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar
        # uid___A002_X9ee74a_X26f0/2013.1.00308.S_uid___A002_X9ee74a_X26f0.asdm.sdm.tar

        url_decomposed = urlparse(data_page_url)
        base_url = ('{uri.scheme}://{uri.netloc}/'
                    'dataPortal/requests/{username}/'
                    '{staging_page_id}/ALMA'.format(uri=url_decomposed,
                                                    staging_page_id=dpid,
                                                    username=username,
                                                    ))
        tbl = self._json_summary_to_table(json_data, base_url=base_url)
        self._staging_log['result'] = tbl

        return tbl
Ejemplo n.º 34
0
    def compute(self,
                optical_properties,
                n_temp=1200,
                temp_min=0.1,
                temp_max=100000.):
        """
        Compute various mean opacities:

            * Planck mean opacity
            * Reciprocal Planck mean opacity
            * Rosseland mean opacity
        """

        # Set temperatures to compute the mean opacities for
        temperatures = np.logspace(np.log10(temp_min), np.log10(temp_max),
                                   n_temp)

        # To avoid issues that may be confusing to users if they ask for
        # temperatures at exactly the min max, we reset the temperature min/max
        # manually (otherwise the np.log10 and subsequent 10** cause a loss in
        # precision)
        temperatures[0] = temp_min
        temperatures[-1] = temp_max

        # Find common frequency scale
        planck_nu = planck_nu_range(temp_min, temp_max)
        nu = nu_common(planck_nu, optical_properties.nu)

        if planck_nu.min() < optical_properties.nu.min():
            logger.warn(
                "Planck function for lowest temperature not completely covered by opacity function"
            )
            nu = nu[nu >= optical_properties.nu.min()]

        if planck_nu.max() > optical_properties.nu.max():
            logger.warn(
                "Planck function for highest temperature not completely covered by opacity function"
            )
            nu = nu[nu <= optical_properties.nu.max()]

        # Interpolate opacity to new frequency grid
        chi_nu = interp1d_fast_loglog(optical_properties.nu,
                                      optical_properties.chi, nu)
        kappa_nu = interp1d_fast_loglog(optical_properties.nu,
                                        optical_properties.kappa, nu)

        # Set mean opacity variable
        self.var_name = 'specific_energy'

        # Initialize mean opacity arrays
        self.chi_planck = np.zeros(n_temp)
        self.kappa_planck = np.zeros(n_temp)
        self.chi_inv_planck = np.zeros(n_temp)
        self.kappa_inv_planck = np.zeros(n_temp)
        self.chi_rosseland = np.zeros(n_temp)
        self.kappa_rosseland = np.zeros(n_temp)

        # Loop through the emissivities and compute mean opacities
        for it, T in enumerate(temperatures):

            # Compute Planck function and derivative with respect to temperature
            b_nu = B_nu(nu, T)
            db_nu_dt = dB_nu_dT(nu, T)

            # Compute planck mean opacity
            self.chi_planck[it] = (integrate_loglog(nu, b_nu * chi_nu) /
                                   integrate_loglog(nu, b_nu))

            # Compute planck mean absoptive opacity
            self.kappa_planck[it] = (integrate_loglog(nu, b_nu * kappa_nu) /
                                     integrate_loglog(nu, b_nu))

            # Compute reciprocal planck mean opacity
            self.chi_inv_planck[it] = (integrate_loglog(nu, b_nu) /
                                       integrate_loglog(nu, b_nu / chi_nu))

            # Compute reciprocal planck mean aborptive opacity
            self.kappa_inv_planck[it] = (integrate_loglog(nu, b_nu) /
                                         integrate_loglog(nu, b_nu / kappa_nu))

            # Compute rosseland mean opacity
            self.chi_rosseland[it] = (integrate_loglog(nu, db_nu_dt) /
                                      integrate_loglog(nu, db_nu_dt / chi_nu))

            # Compute rosseland mean aborptive opacity
            self.kappa_rosseland[it] = (
                integrate_loglog(nu, db_nu_dt) /
                integrate_loglog(nu, db_nu_dt / kappa_nu))

        self.temperature = temperatures
        self.specific_energy = 4. * sigma * temperatures**4. * self.kappa_planck
Ejemplo n.º 35
0
    def fitter(self,
               xax,
               data,
               err=None,
               quiet=True,
               veryverbose=False,
               debug=False,
               parinfo=None,
               **kwargs):
        """
        Run the fitter using mpfit.
        
        kwargs will be passed to _make_parinfo and mpfit.

        Parameters
        ----------
        xax : SpectroscopicAxis 
            The X-axis of the spectrum
        data : ndarray
            The data to fit
        err : ndarray (optional)
            The error on the data.  If unspecified, will be uniform unity
        parinfo : ParinfoList
            The guesses, parameter limits, etc.  See
            `pyspeckit.spectrum.parinfo` for details
        quiet : bool
            pass to mpfit.  If False, will print out the parameter values for
            each iteration of the fitter
        veryverbose : bool
            print out a variety of mpfit output parameters
        debug : bool
            raise an exception (rather than a warning) if chi^2 is nan
        """

        if parinfo is None:
            parinfo, kwargs = self._make_parinfo(debug=debug, **kwargs)
        else:
            if debug: log.debug("Using user-specified parinfo dict")
            # clean out disallowed kwargs (don't want to pass them to mpfit)
            #throwaway, kwargs = self._make_parinfo(debug=debug, **kwargs)

        self.xax = xax  # the 'stored' xax is just a link to the original
        if hasattr(xax, 'as_unit') and self.fitunits is not None:
            # some models will depend on the input units.  For these, pass in an X-axis in those units
            # (gaussian, voigt, lorentz profiles should not depend on units.  Ammonia, formaldehyde,
            # H-alpha, etc. should)
            xax = copy.copy(xax)
            # xax.convert_to_unit(self.fitunits, quiet=quiet)
            xax = xax.as_unit(self.fitunits, quiet=quiet, **kwargs)
        elif self.fitunits is not None:
            raise TypeError("X axis does not have a convert method")

        if np.any(np.isnan(data)) or np.any(np.isinf(data)):
            err[np.isnan(data) + np.isinf(data)] = np.inf
            data[np.isnan(data) + np.isinf(data)] = 0

        if np.any(np.isnan(err)):
            raise ValueError(
                "One or more of the error values is NaN."
                "  This is not allowed.  Errors can be infinite "
                "(which is equivalent to giving zero weight to "
                "a data point), but otherwise they must be positive "
                "floats.")
        elif np.any(err < 0):
            raise ValueError("At least one error value is negative, which is "
                             "not allowed as negative errors are not "
                             "meaningful in the optimization process.")

        if debug:
            for p in parinfo:
                log.debug(p)
            log.debug("\n".join([
                "%s %i: tied: %s value: %s" %
                (p['parname'], p['n'], p['tied'], p['value']) for p in parinfo
            ]))

        mp = mpfit(self.mpfitfun(xax, data, err),
                   parinfo=parinfo,
                   quiet=quiet,
                   **kwargs)
        mpp = mp.params
        if mp.perror is not None: mpperr = mp.perror
        else: mpperr = mpp * 0
        chi2 = mp.fnorm

        if mp.status == 0:
            if "parameters are not within PARINFO limits" in mp.errmsg:
                log.warn(parinfo)
            raise mpfitException(mp.errmsg)

        for i, (p, e) in enumerate(zip(mpp, mpperr)):
            self.parinfo[i]['value'] = p
            self.parinfo[i]['error'] = e

        if veryverbose:
            log.info("Fit status: {0}".format(mp.status))
            log.info("Fit error message: {0}".format(mp.errmsg))
            log.info("Fit message: {0}".format(mpfit_messages[mp.status]))
            for i, p in enumerate(mpp):
                log.info("{0}: {1} +/- {2}".format(self.parinfo[i]['parname'],
                                                   p, mpperr[i]))
            log.info("Chi2: {0} Reduced Chi2: {1}  DOF:{2}".format(
                mp.fnorm, mp.fnorm / (len(data) - len(mpp)),
                len(data) - len(mpp)))

        self.mp = mp
        self.mpp = self.parinfo.values
        self.mpperr = self.parinfo.errors
        self.mppnames = self.parinfo.names
        self.model = self.n_modelfunc(self.parinfo,
                                      **self.modelfunc_kwargs)(xax)
        if debug:
            log.debug("Modelpars: {0}".format(self.mpp))
        if np.isnan(chi2):
            if debug:
                raise ValueError("Error: chi^2 is nan")
            else:
                log.warn("Warning: chi^2 is nan")
        return mpp, self.model, mpperr, chi2
Ejemplo n.º 36
0
    def fit(self, selected, iters=1):
        """
        Run a fit using the specified fitter
        """
        # Select all the TOAs if none are explicitly set
        if not any(selected):
            selected = ~selected
        """JUMP check, TODO: put in fitter?"""
        if "PhaseJump" in self.prefit_model.components:
            # if attempted fit (selected)
            # A) contains only jumps, don't do the fit and return an error
            # B) excludes a jump, turn that jump off
            # C) partially contains a jump, redefine that jump only with the overlap
            fit_jumps = []
            for param in self.prefit_model.params:
                if getattr(self.prefit_model,
                           param).frozen == False and param.startswith("JUMP"):
                    fit_jumps.append(int(param[4:]))

            numjumps = self.prefit_model.components[
                "PhaseJump"].get_number_of_jumps()
            if numjumps == 0:
                log.warn(
                    "There are no jumps (maskParameter objects) in PhaseJump. Please delete the PhaseJump object and try again. "
                )
                return None
            # boolean array to determine if all selected toas are jumped
            jumps = [
                True if "jump" in dict.keys() and dict["jump"] in fit_jumps
                else False for dict in self.all_toas.table["flags"][selected]
            ]
            # check if par file jumps in PhaseJump object
            if not any(jumps):
                # for every jump, set appropriate flag for TOAs it jumps
                for jump_par in self.prefit_model.components[
                        "PhaseJump"].get_jump_param_objects():
                    # find TOAs jump applies to
                    mask = jump_par.select_toa_mask(self.all_toas)
                    # apply to dictionaries for future use
                    for dict in self.all_toas.table["flags"][mask]:
                        dict["jump"] = jump_par.index
                jumps = [
                    True if "jump" in dict.keys() and dict["jump"] in fit_jumps
                    else False
                    for dict in self.all_toas.table["flags"][selected]
                ]
            if all(jumps):
                log.warn(
                    "toas being fit must not all be jumped. Remove or uncheck at least one jump in the selected toas before fitting."
                )
                return None
            # numerical array of selected jump flags
            sel_jump_nums = [
                dict["jump"] if "jump" in dict.keys() else np.nan
                for dict in self.all_toas.table["flags"][selected]
            ]
            # numerical array of all jump flags
            full_jump_nums = [
                dict["jump"] if "jump" in dict.keys() else np.nan
                for dict in self.all_toas.table["flags"]
            ]
            for num in range(1, numjumps + 1):
                num = int(num)
                if num not in sel_jump_nums:
                    getattr(self.prefit_model, "JUMP" + str(num)).frozen = True
                    continue
                jump_select = [num == jump_num for jump_num in full_jump_nums]
                overlap = [a and b for a, b in zip(jump_select, selected)]
                # remove the jump flags for that num
                for dict in self.all_toas.table["flags"]:
                    if "jump" in dict.keys() and dict["jump"] == num:
                        del dict["jump"]
                # re-add the jump using overlap as 'selected'
                for dict in self.all_toas.table["flags"][overlap]:
                    dict["jump"] = num

        if self.fitted:
            self.prefit_model = self.postfit_model
            self.prefit_resids = self.postfit_resids

        if self.fitter == Fitters.POWELL:
            fitter = pint.fitter.PowellFitter(self.selected_toas,
                                              self.prefit_model)
        elif self.fitter == Fitters.WLS:
            fitter = pint.fitter.WLSFitter(self.selected_toas,
                                           self.prefit_model)
        elif self.fitter == Fitters.GLS:
            fitter = pint.fitter.GLSFitter(self.selected_toas,
                                           self.prefit_model)
        chi2 = self.prefit_resids.chi2
        wrms = self.prefit_resids.rms_weighted()
        print("Pre-Fit Chi2:\t\t%.8g us^2" % chi2)
        print("Pre-Fit Weighted RMS:\t%.8g us" % wrms.to(u.us).value)

        fitter.fit_toas(maxiter=1)
        self.postfit_model = fitter.model
        self.postfit_resids = Residuals(self.all_toas, self.postfit_model)
        self.fitted = True
        self.write_fit_summary()

        # TODO: delta_pulse_numbers need some work. They serve both for PHASE and -padd functions from the TOAs
        # as well as for phase jumps added manually in the GUI. They really should not be zeroed out here because
        # that will wipe out preexisting values
        self.all_toas.table["delta_pulse_numbers"] = np.zeros(
            self.all_toas.ntoas)
        self.selected_toas.table["delta_pulse_number"] = np.zeros(
            self.selected_toas.ntoas)

        # plot the prefit without jumps
        pm_no_jumps = copy.deepcopy(self.postfit_model)
        for param in pm_no_jumps.params:
            if param.startswith("JUMP"):
                getattr(pm_no_jumps, param).value = 0.0
                getattr(pm_no_jumps, param).frozen = True
        self.prefit_resids_no_jumps = Residuals(self.selected_toas,
                                                pm_no_jumps)

        f = copy.deepcopy(fitter)
        no_jumps = [
            False if "jump" in dict.keys() else True
            for dict in f.toas.table["flags"]
        ]
        f.toas.select(no_jumps)

        selectedMJDs = self.selected_toas.get_mjds()
        if all(no_jumps):
            q = list(self.all_toas.get_mjds())
            index = q.index([
                i for i in self.all_toas.get_mjds() if i > selectedMJDs.min()
            ][0])
            rs_mean = (Residuals(
                self.all_toas,
                f.model).phase_resids[index:index + len(selectedMJDs)].mean())
        else:
            rs_mean = self.prefit_resids_no_jumps.phase_resids[no_jumps].mean()

        # determines how far on either side fake toas go
        # TODO: hard limit on how far fake toas can go --> can get clkcorr
        # errors if go before GBT existed, etc.
        minMJD, maxMJD = selectedMJDs.min(), selectedMJDs.max()
        spanMJDs = maxMJD - minMJD
        if spanMJDs < 30 * u.d:
            redge = ledge = 4
            npoints = 400
        elif spanMJDs < 90 * u.d:
            redge = ledge = 2
            npoints = 300
        elif spanMJDs < 200 * u.d:
            redge = ledge = 1
            npoints = 300
        elif spanMJDs < 400 * u.d:
            redge = ledge = 0.5
            npoints = 200
        else:
            redge = ledge = 1.0
            npoints = 250
        # Check to see if too recent
        nowish = (Time.now().mjd - 40) * u.d
        if maxMJD + spanMJDs * redge > nowish:
            redge = (nowish - maxMJD) / spanMJDs
            if redge < 0.0:
                redge = 0.0
        f_toas, rs, mrands = random_models(
            f,
            rs_mean=rs_mean,
            redge_multiplier=redge,
            ledge_multiplier=ledge,
            npoints=npoints,
            iter=10,
        )
        self.random_resids = rs
        self.fake_toas = f_toas
Ejemplo n.º 37
0
def main(rawdir, silent=False, verbose=True):
    '''
    Main function for get_OH_centers

    Parameters
    ----------

    silent : boolean
      Turns off stdout messages. Default: False

    verbose : boolean
      Turns on additional stdout messages. Default: True

    Returns
    -------

    Notes
    -----
    Created by Chun Ly, 12 June 2018

    Modified by Chun Ly, 13 June 2018
     - Fix rev_lines and rev_int (wrong indexing)
     - Switch to micron units; Do subplots_adjust for white space
     - Plot aesthetics: label plots, change page size, legend
     - Include parameters/assumptions in title
     - Plot aesthetics: Vertical lines for all possible OH skylines
     - Plot aesthetics: Label OH skylines
     - Plot aesthetics: Limit vertical lines for OH skylines
     - Plot aesthetics: group and label OH skylines to avoid overlap
     - Group lines (<5 Ang) before annotation
     - Opaque white background behind lines
     - Plot aesthetics: Draw OH lines to top of subplots
     - Plot aesthetics: Remove legend; adjust white space
     - WARN if more than six lines
     - Attempt to constraint fit using bounds but that did not work
     - Fix case if curve_fit solutions are outside of spectral range
    Modified by Chun Ly, 21 June 2018
     - Write npz file containing use lines that are grouped together
     - mylogger call for writing files
     - Switching back to uncompressed npz - Got interpret file pickle error (IOError)
    Modified by Chun Ly, 25 June 2018
     - Bug fix: Crash on call to group_OH_lines. Require in_zoom to not be empty array
    Modified by Chun Ly, 25 June 2018
     - Bug fix: Handle odd fit results (check wavelength within 5 Ang of guess)
     - Bug fix: Add try/except for curve_fit RuntimeError
     - Bug fix: tab fix
     - Bug fix: Add try/except for curve_fit RuntimeError for gauss_multi
     - Bug fix: list to np.array
    '''

    # + on 09/01/2018
    logfile = rawdir + 'get_OH_centers.log'
    mylogger = glog.log0(logfile)._get_logger()

    if silent == False: mylogger.info('### Begin main : ' + systime())

    if exists(OH_file):
        if silent == False: mylogger.info('Reading : ' + OH_file)
        OH_data = np.loadtxt(OH_file)
        OH_lines = OH_data[:, 0]
        OH_int = OH_data[:, 1]

    infile = rawdir + 'hdr_info.QA.tbl'
    tab0 = asc.read(infile, format='fixed_width_two_line')
    i_obj = [xx for xx in range(len(tab0)) if tab0['QA'][xx] == 'obj'][0]
    o_tab0 = tab0[i_obj]
    gratwave = o_tab0['gratwave']

    slitwidth = np.float(o_tab0['slit'].split('arcsec')[0])
    if '111/mm' in o_tab0['grating']:
        if o_tab0['filter2'] == 'X_G0518':
            R_spec = 6600.0 / (slitwidth / 0.3)
            x_diff = 0.06
        if o_tab0['filter2'] == 'J_G0517':
            R_spec = 7200.0 / (slitwidth / 0.3)
            x_diff = 0.07

    x_min = (gratwave - x_diff) * 1.E4
    x_max = (gratwave + x_diff) * 1.E4
    npix = np.round((x_max - x_min) / 0.15)

    x0 = x_min + 0.15 * np.arange(npix)

    OH_spec_mod = np.zeros(len(x0))
    in_rge = np.where((OH_lines >= x0[0]) & (OH_lines <= x0[-1]))[0]
    for ii in range(len(in_rge)):
        idx = in_rge[ii]
        temp = OH_int[idx] * gaussian_R(x0, OH_lines[idx], R_spec)
        OH_spec_mod += temp

    y_max = max(OH_spec_mod) * 1.25

    i_lines = np.where(OH_spec_mod >= np.max(OH_spec_mod) * 0.01)[0]

    lines_set = list(group(i_lines))

    OH_spec_mod_resid = OH_spec_mod.copy()

    rev_lines = []  #np.zeros(len(lines_set))
    rev_int = []  #np.zeros(len(lines_set))

    nrows = 3
    fig, ax = plt.subplots(nrows=nrows)

    use_lines = []  # + on 21/06/2018

    xlim_arr = []
    dx = (x0[-1] - x0[0]) / 3.0
    for aa in range(nrows):
        ax[aa].plot(x0 / 1e4,
                    OH_spec_mod,
                    color='black',
                    zorder=3,
                    label="Rousselot (2000)")
        xlim = np.array([x_min + dx * aa, x_min + dx * (aa + 1)])
        xlim_arr.append(xlim)

        ax[aa].set_xlim(xlim / 1e4)
        ax[aa].set_ylim([-10, y_max])

        # Draw vertical lines for all possible OH skylines
        for val in OH_lines[in_rge]:
            ax[aa].axvline(x=val / 1e4,
                           color='black',
                           linewidth=0.25,
                           linestyle=':',
                           zorder=2)

    for ii in range(len(lines_set)):
        x_avg = np.int(np.average(lines_set[ii]))
        tl_min, tl_max = x0[lines_set[ii][0]], x0[lines_set[ii][1]]
        in_zoom = np.where((OH_lines >= tl_min) & (OH_lines <= tl_max))[0]

        if len(in_zoom) > 0:
            group_lines, group_int = group_OH_lines(OH_lines[in_zoom],
                                                    OH_int[in_zoom])

            # print ii, group_lines

            sig = group_lines / R_spec / (2 * np.sqrt(2 * np.log(2)))
            peak0 = OH_spec_mod[np.int_(
                (group_lines - x_min) / (x0[1] - x0[0]))]

            if len(group_lines) == 1:
                p0 = [0.0, peak0, group_lines[0], sig[0]]
                #plt.axvline(group_lines[0]/1e4, color='blue')
            else:
                if len(group_lines) > n_multi:
                    log.warn('More than 8 lines found, N=' +
                             str(len(group_lines)) + '!!!')
                t_peak0 = peak0.tolist()
                t_lines = group_lines.tolist()
                t_sig = sig.tolist()

                t_peak0 += np.zeros(n_multi - len(group_lines)).tolist()
                t_lines += np.zeros(n_multi - len(group_lines)).tolist()
                t_sig += np.zeros(n_multi - len(group_lines)).tolist()

                p0 = t_peak0
                p0 += t_lines
                p0 += t_sig

                p0 = np.array(p0)

            zoom = np.arange(lines_set[ii][0], lines_set[ii][1])
            # print p0
            #bounds = ((-0.001, 0.0, lam_cen-0.5, 0.1),
            #          (0.001, 1.25*p0[1], lam_cen+0.5, 1.5*p0[3]))

            if len(group_lines) == 1:
                try:
                    popt, pcov = curve_fit(gauss1d,
                                           x0[zoom],
                                           OH_spec_mod[zoom],
                                           p0=p0)
                except RuntimeError:
                    mylogger.warn('Did not converge!')
                    mylogger.warn('Using initial guess : %.3f' %
                                  group_lines[0])
                    popt = list(p0)

                if np.absolute(popt[2] - p0[2]) >= 5:
                    mylogger.warn('Reliable fit not determined!')
                    mylogger.warn('Using initial guess : %.3f' %
                                  group_lines[0])
                    popt = list(p0)

                t_mod = gauss1d(x0, *popt)

                use_lines.append([popt[2]])  # + on 21/06/2018

                rev_lines.append(popt[2])
                rev_int.append(popt[1])

                i_ax = [
                    xx for xx in range(nrows) if
                    (popt[2] >= xlim_arr[xx][0] and popt[2] <= xlim_arr[xx][1])
                ][0]
                ax[i_ax].annotate('%.2f' % popt[2],
                                  [popt[2] / 1e4, y_max * 0.99],
                                  ha='center',
                                  va='top',
                                  rotation=90,
                                  fontsize=4,
                                  bbox=bbox_props)
            else:
                #low_bound = tuple([0] * n_multi) + tuple(p0[n_multi:2*n_multi]-0.5) + \
                #            tuple([0] * n_multi)
                #up_bound  = tuple(p0[0:n_multi]*1.25+0.1) + tuple(p0[n_multi:2*n_multi]+0.5) + \
                #            tuple(p0[2*n_multi:]+1)
                try:
                    popt, pcov = curve_fit(gauss_multi,
                                           x0[zoom],
                                           OH_spec_mod[zoom],
                                           p0=p0)
                except RuntimeError:
                    mylogger.warn('Did not converge!')
                    mylogger.warn('Using initial guesses')
                    popt = np.array(p0)

                t_mod = gauss_multi(x0, *popt)

                t_loc = popt[n_multi:2 * n_multi]  # Line wavelengths (Ang)
                t_str = popt[0:n_multi]  # Line peak strength

                nonzero = np.where(t_loc != 0)[0]
                use_lines.append(t_loc[nonzero].tolist())  # + on 21/06/2018

                wave0 = t_loc[nonzero]
                wave0.sort()

                # Check that lines are within range
                in_rge = np.where((wave0 >= x_min) & (wave0 <= x_max))[0]
                wave0 = wave0[in_rge]
                t_str = t_str[nonzero[in_rge]]

                rev_lines += wave0.tolist()
                rev_int += t_str.tolist()

                wave0 = np.array(wave0)
                # Label lines
                skip = np.zeros(len(wave0))
                for ww in range(len(wave0)):
                    if skip[ww] == 0:
                        w_diff = wave0[ww:] - wave0[ww]
                        t_close = np.where(w_diff <= 5)[0]
                        close = np.arange(ww, len(wave0))[t_close]
                        str_comb = "\n".join(
                            ['%.2f' % val for val in wave0[close]])
                        w_cen = np.average(wave0[close])
                        #0.5*(wave0[0]+wave0[-1]) #np.average(wave0)

                        i_ax = [
                            xx for xx in range(nrows)
                            if (w_cen >= xlim_arr[xx][0]
                                and w_cen <= xlim_arr[xx][1])
                        ][0]
                        ax[i_ax].annotate(str_comb,
                                          [w_cen / 1e4, y_max * 0.99],
                                          ha='center',
                                          va='top',
                                          rotation=90,
                                          fontsize=4,
                                          bbox=bbox_props)
                        skip[close] = 1
                    #endif
                #endfor

        # print '## t_mod : ', np.min(t_mod), np.max(t_mod)

        OH_spec_mod_resid -= t_mod

    for aa in range(nrows):
        ax[aa].plot(x0 / 1e4,
                    OH_spec_mod_resid,
                    linestyle='dashed',
                    color='blue',
                    zorder=3,
                    label='Residual')

        for t_line in rev_lines:
            ax[aa].axvline(x=t_line / 1e4,
                           color='red',
                           linestyle='--',
                           linewidth=1.0,
                           zorder=1)

    l_tab = Table([rev_lines, rev_int])
    out_file = rawdir + 'rousselot2000_convl.dat'
    mylogger.info('Writing : ' + out_file)
    asc.write(l_tab, out_file, format='no_header')

    ann_txt = r'%.2f $\mu$m; %s; ' % (gratwave, o_tab0['filter2'])
    ann_txt += '%s; %.3f" slit; R = %i' % (o_tab0['grating'], slitwidth,
                                           R_spec)
    ax[0].set_title(ann_txt)
    #leg = ax[0].legend(loc='upper right', fancybox=True) #, frameon=False)
    #leg.get_frame().set_alpha(0.75)

    ax[2].set_xlabel(r'Wavelength [$\mu$m]')
    plt.subplots_adjust(left=0.08,
                        right=0.95,
                        bottom=0.06,
                        top=0.96,
                        hspace=0.12)

    fig.set_size_inches(6, 8)
    out_pdf = out_file.replace('.dat', '.pdf')
    mylogger.info('Writing : ' + out_pdf)
    fig.savefig(out_pdf)

    # Write npz file containing final grouping | + on 21/06/2018
    savez_file = out_file.replace('.dat', '.npz')
    mylogger.info('Writing : ' + savez_file)
    np.savez(savez_file, use_lines=use_lines)

    if silent == False: mylogger.info('### End main : ' + systime())
Ejemplo n.º 38
0
def QA_combine(path0, targets0, out_pdf='', silent=False, verbose=True):
    '''
    Display sky-subtracted and shifted combined images for telluric and
    science data

    Parameters
    ----------
    path0 : str
      Full path to where output PDF and FITS file are located. Must end
      with a '/'

    targets0: list or numpy array
      A list or array of source names available through path0

    out_pdf : str
      Filename for output PDF. Do NOT include full path.
      Default: 'QA_combine.pdf'

    silent : boolean
      Turns off stdout messages. Default: False

    verbose : boolean
      Turns on additional stdout messages. Default: True

    Returns
    -------
    multi-page PDF plot, 'QA_combine.pdf'

    Notes
    -----
    Created by Chun Ly, 31 May 2017
    Modified by Chun Ly, 1 June 2017
     - Switch over to pyplot.imshow() since aplpy cannot allow for more customization
    '''

    if silent == False: log.info('### Begin QA_combine : ' + systime())

    out_pdf = path0 + 'QA_combine.pdf' if out_pdf == '' else path0 + out_pdf
    pp = PdfPages(out_pdf)

    for target in targets0:
        t_path = path0 + target + '/'

        dir_list, list_path = dir_check.main(t_path,
                                             silent=silent,
                                             verbose=verbose)

        for dpath in list_path:
            tel_file = glob.glob(dpath + 'tell_comb.fits')
            obj_file = glob.glob(dpath + 'obj_comb.fits')

            if len(tel_file) == 0 and len(obj_file) == 0:
                log.warn('## No tell_comb.fits and obj_comb.fits found in: ')
                log.warn('## ' + dpath)

            if len(tel_file) == 1 and len(obj_file) == 1:
                fig, (ax1, ax2) = plt.subplots(1, 2)  # Mod on 01/06/2017

            # Mod on 01/06/2017
            if len(tel_file) != 0:
                t_im, t_hdr = fits.getdata(tel_file[0], header=True)

                lam_max = t_hdr['CRVAL2'] + t_hdr['CD2_2'] * t_hdr['NAXIS2']
                extent = [0, t_hdr['NAXIS1'], t_hdr['CRVAL2'], lam_max]

                z1, z2 = zscale.get_limits(t_im)
                norm = ImageNormalize(vmin=z2, vmax=z1)
                ax1.imshow(t_im,
                           cmap='Greys',
                           origin='lower',
                           norm=norm,
                           extent=extent)
                yticks = np.array(ax1.get_yticks())
                ax1.set_yticklabels([val / 1e4 for val in yticks])

                ax1.get_yaxis().set_tick_params(which='major',
                                                direction='in',
                                                right=True,
                                                length=5,
                                                width=1)
                ax1.get_yaxis().set_tick_params(which='minor',
                                                direction='in',
                                                right=True,
                                                length=2.5)
                ax1.get_xaxis().set_tick_params(which='major',
                                                direction='in',
                                                top=True,
                                                length=5,
                                                width=1)
                ax1.get_xaxis().set_tick_params(which='minor',
                                                direction='in',
                                                top=True,
                                                length=2.5)
                ax1.minorticks_on()

                ax1.set_xlabel('X [pixels]', fontsize=14)
                ax1.set_ylabel(r'Wavelength ($\mu$m)', fontsize=14)

                ax1.annotate(tel_file[0], [0.025, 0.975],
                             xycoords='axes fraction',
                             ha='left',
                             va='top',
                             bbox=bbox_props)

            # Mod on 01/06/2017
            if len(obj_file) != 0:
                o_im, o_hdr = fits.getdata(obj_file[0], header=True)

                lam_max = o_hdr['CRVAL2'] + o_hdr['CD2_2'] * o_hdr['NAXIS2']
                extent = [0, o_hdr['NAXIS1'], o_hdr['CRVAL2'], lam_max]

                z1, z2 = zscale.get_limits(o_im)
                norm = ImageNormalize(vmin=z2, vmax=z1)
                ax2.imshow(o_im,
                           cmap='Greys',
                           origin='lower',
                           norm=norm,
                           extent=extent)
                yticks = np.array(ax2.get_yticks())
                ax2.set_yticklabels([val / 1e4 for val in yticks])

                ax2.get_yaxis().set_tick_params(which='major',
                                                direction='in',
                                                right=True,
                                                length=5,
                                                width=1)
                ax2.get_yaxis().set_tick_params(which='minor',
                                                direction='in',
                                                right=True,
                                                length=2.5)
                ax2.get_xaxis().set_tick_params(which='major',
                                                direction='in',
                                                top=True,
                                                length=5,
                                                width=1)
                ax2.get_xaxis().set_tick_params(which='minor',
                                                direction='in',
                                                top=True,
                                                length=2.5)
                ax2.minorticks_on()

                ax2.set_xlabel('X [pixels]', fontsize=14)
                ax2.set_ylabel('')
                ax2.set_yticklabels([])

                ax2.annotate(obj_file[0], [0.025, 0.975],
                             xycoords='axes fraction',
                             ha='left',
                             va='top',
                             bbox=bbox_props)

            if len(tel_file) == 1 and len(obj_file) == 1:  # Mod on 01/06/2017
                subplots_adjust(left=0.06,
                                bottom=0.06,
                                top=0.995,
                                right=0.99,
                                hspace=0.00,
                                wspace=0.00)
                fig.set_size_inches(11, 7.3)
                # fig.tight_layout()
                fig.savefig(pp, format='pdf')  #, bbox_inches='tight')

    if silent == False: log.info('## Writing : ' + out_pdf)
    pp.close()
    if silent == False: log.info('### End QA_combine : ' + systime())
Ejemplo n.º 39
0
    def compute_TDBs(self):
        """Compute and add TDB and TDB long double columns to the TOA table.

        This routine creates new columns 'tdb' and 'tdbld' in a TOA table
        for TDB times, using the Observatory locations and IERS A Earth
        rotation corrections for UT1.
        """
        from astropy.utils.iers import IERS_A, IERS_A_URL
        from astropy.utils.data import download_file, clear_download_cache
        global iers_a_file, iers_a
        # If previous columns exist, delete them
        if 'tdb' in self.table.colnames:
            log.info('tdb column already exists. Deleting...')
            self.table.remove_column('tdb')
        if 'tdbld' in self.table.colnames:
            log.info('tdbld column already exists. Deleting...')
            self.table.remove_column('tdbld')

        # First make sure that we have already applied clock corrections
        ccs = False
        for tfs in self.table['flags']:
            if 'clkcorr' in tfs: ccs = True
        if ccs is False:
            log.warn(
                "No TOAs have clock corrections.  Use .apply_clock_corrections() first."
            )
        # These will be the new table columns
        col_tdb = numpy.zeros_like(self.table['mjd'])
        col_tdbld = numpy.zeros(self.ntoas, dtype=numpy.longdouble)
        # Read the IERS for ut1_utc corrections, if needed
        iers_a_file = download_file(IERS_A_URL, cache=True)
        # Check to see if the cached file is older than any of the TOAs
        iers_file_time = time.Time(os.path.getctime(iers_a_file),
                                   format="unix")
        if (iers_file_time.mjd < self.last_MJD.mjd):
            clear_download_cache(iers_a_file)
            try:
                log.warn("Cached IERS A file is out-of-date.  Re-downloading.")
                iers_a_file = download_file(IERS_A_URL, cache=True)
            except:
                pass
        iers_a = IERS_A.open(iers_a_file)
        # Now step through in observatory groups to compute TDBs
        for ii, key in enumerate(self.table.groups.keys):
            grp = self.table.groups[ii]
            obs = self.table.groups.keys[ii]['obs']
            loind, hiind = self.table.groups.indices[ii:ii + 2]
            # Make sure the string precisions are all set to 9 for all TOAs
            for t in grp['mjd']:
                t.precision = 9
            if key['obs'] in ["Barycenter", "Geocenter", "Spacecraft"]:
                # For these special cases, convert the times to TDB.
                # For Barycenter this will be
                # a null conversion, but for Geocenter the scale will Likely
                # be TT (if they came from a spacecraft like Fermi, RXTE or NICER)
                tdbs = [t.tdb for t in grp['mjd']]
            elif key['obs'] in observatories:
                # For a normal observatory, convert to Time in UTC
                # with location specified as observatory,
                # and then convert to TDB
                utcs = time.Time([t.isot for t in grp['mjd']],
                                 format='isot',
                                 scale='utc',
                                 precision=9,
                                 location=observatories[obs].loc)
                utcs.delta_ut1_utc = utcs.get_delta_ut1_utc(iers_a)
                # Also save delta_ut1_utc for these TOAs for later use
                for toa, dut1 in zip(grp['mjd'], utcs.delta_ut1_utc):
                    toa.delta_ut1_utc = dut1
                # The actual conversion from UTC to TDB is done by astropy.Time
                # as described here <http://docs.astropy.org/en/stable/time/>,
                # with the real work done by the IAU SOFA library
                tdbs = utcs.tdb
            else:
                log.error("Unknown observatory ({0})".format(key['obs']))

            col_tdb[loind:hiind] = numpy.asarray([t for t in tdbs])
            col_tdbld[loind:hiind] = numpy.asarray(
                [utils.time_to_longdouble(t) for t in tdbs])
        # Now add the new columns to the table
        col_tdb = table.Column(name='tdb', data=col_tdb)
        col_tdbld = table.Column(name='tdbld', data=col_tdbld)
        self.table.add_columns([col_tdb, col_tdbld])
Ejemplo n.º 40
0
                try:
                    bsens_cube = SpectralCube.read(bsens, format='fits' if 'fits' in bsens else 'casa_image')
                except Exception as ex:
                    log.error(f"Failed to open 'bsens' image {bsens}")
                    raise ex

                if not os.path.exists(cleanest):
                    # hackaround for mismatched UID names, which shouldn't happen but did
                    ind = cleanest.find('uid')
                    cleanest_glob = cleanest[:ind] + "*" + cleanest[ind+21:]
                    cleanest_fl = glob.glob(cleanest_glob)
                    if len(cleanest_fl) > 0:
                        cleanest = cleanest_fl[0]
                        if len(cleanest_fl) > 1:
                            log.warn("WARNING: found multiple 'cleanest' matches {0}".format(cleanest_fl))

                    log.warn(f"Replaced 'cleanest' with {cleanest} to match {bsens}")

                    allow_reproj = True
                else:
                    allow_reproj = False

                if not os.path.exists(cleanest):
                    # hackaround for mismatched number of selfcal iterations
                    cfns = glob.glob(f"{basepath}/{field}/B{band}/cleanest/*_{config}_robust0_*finaliter*.{suffix}")
                    if len(cfns) == 1:
                        log.info(f"Replaced original cleanest {cleanest} with {cfns[0]}")
                        allow_reproj = False
                        cleanest = cfns[0]
                    elif len(cfns) == 0:
Ejemplo n.º 41
0
def write_table_fits(input, output, overwrite=False):
    """
    Write a Table object to a FITS file

    Parameters
    ----------
    input : Table
        The table to write out.
    output : str
        The filename to write the table to.
    overwrite : bool
        Whether to overwrite any existing file without warning.
    """

    # Check if output file already exists
    if isinstance(output, basestring) and os.path.exists(output):
        if overwrite:
            os.remove(output)
        else:
            raise IOError("File exists: {0}".format(output))

    # Create a new HDU object
    if input.masked:
        table_hdu = BinTableHDU(np.array(input.filled()))
        for col in table_hdu.columns:
            # The astype is necessary because if the string column is less
            # than one character, the fill value will be N/A by default which
            # is too long, and so no values will get masked.
            fill_value = input[col.name].get_fill_value()
            col.null = fill_value.astype(input[col.name].dtype)
    else:
        table_hdu = BinTableHDU(np.array(input))

    # Set units for output HDU
    for col in table_hdu.columns:
        if input[col.name].units is not None:
            col.unit = input[col.name].units.to_string(format='fits')

    for key, value in input.meta.items():

        if is_column_keyword(key.upper()) or key.upper() in REMOVE_KEYWORDS:

            log.warn("Meta-data keyword {0} will be ignored since it "
                     "conflicts with a FITS reserved keyword".format(key))

        if isinstance(value, list):
            for item in value:
                try:
                    table_hdu.header.append((key, item))
                except ValueError:
                    log.warn("Attribute `{0}` of type {1} cannot be written "
                             "to FITS files - skipping".format(
                                 key, type(value)))
        else:
            try:
                table_hdu.header[key] = value
            except ValueError:
                log.warn("Attribute `{0}` of type {1} cannot be written to "
                         "FITS files - skipping".format(key, type(value)))

    # Write out file
    table_hdu.writeto(output)
Ejemplo n.º 42
0
    def stage_data(self, uids):
        """
        Stage ALMA data

        Parameters
        ----------
        uids : list or str
            A list of valid UIDs or a single UID.
            UIDs should have the form: 'uid://A002/X391d0b/X7b'
        cache : True
            This is *forced* true, because the ALMA servers don't support repeats
            of the same request.
            Whether to cache the staging process.  This should generally be
            left as False when used interactively.

        Returns
        -------
        data_file_table : Table
            A table containing 3 columns: the UID, the file URL (for future
            downloading), and the file size
        """

        """
        With log.set_level(10)
        INFO: Staging files... [astroquery.alma.core]
        DEBUG: First request URL: https://almascience.eso.org/rh/submission [astroquery.alma.core]
        DEBUG: First request payload: {'dataset': [u'ALMA+uid___A002_X3b3400_X90f']} [astroquery.alma.core]
        DEBUG: First response URL: https://almascience.eso.org/rh/checkAuthenticationStatus/3f98de33-197e-4692-9afa-496842032ea9/submission [astroquery.alma.core]
        DEBUG: Request ID: 3f98de33-197e-4692-9afa-496842032ea9 [astroquery.alma.core]
        DEBUG: Submission URL: https://almascience.eso.org/rh/submission/3f98de33-197e-4692-9afa-496842032ea9 [astroquery.alma.core]
        .DEBUG: Data list URL: https://almascience.eso.org/rh/requests/anonymous/786823226 [astroquery.alma.core]
        """

        if isinstance(uids, six.string_types):
            uids = [uids]
        if not isinstance(uids, (list, tuple, np.ndarray)):
            raise TypeError("Datasets must be given as a list of strings.")

        log.info("Staging files...")

        self._get_dataarchive_url()

        url = urljoin(self.dataarchive_url, 'rh/submission')
        log.debug("First request URL: {0}".format(url))
        #'ALMA+uid___A002_X391d0b_X7b'
        #payload = [('dataset','ALMA+'+clean_uid(uid)) for uid in uids]
        payload = {'dataset':['ALMA+'+clean_uid(uid) for uid in uids]}
        log.debug("First request payload: {0}".format(payload))

        self._staging_log = {'first_post_url':url}

        # Request staging for the UIDs
        # This component cannot be cached, since the returned data can change
        # if new data are uploaded
        response = self._request('POST', url, data=payload,
                                 timeout=self.TIMEOUT, cache=False)
        self._staging_log['initial_response'] = response
        log.debug("First response URL: {0}".format(response.url))
        response.raise_for_status()

        if 'j_spring_cas_security_check' in response.url:
            time.sleep(1)
            # CANNOT cache this stage: it not a real data page!  results in
            # infinite loops
            response = self._request('POST', url, data=payload,
                                     timeout=self.TIMEOUT, cache=False)
            self._staging_log['initial_response'] = response
            if 'j_spring_cas_security_check' in response.url:
                log.warn("Staging request was not successful.  Try again?")
            response.raise_for_status()

        if 'j_spring_cas_security_check' in response.url:
            raise RemoteServiceError("Could not access data.  This error "
                                     "can arise if the data are private and "
                                     "you do not have access rights or are "
                                     "not logged in.")

        request_id = response.url.split("/")[-2]
        assert len(request_id) == 36
        self._staging_log['request_id'] = request_id
        log.debug("Request ID: {0}".format(request_id))

        # Submit a request for the specific request ID identified above
        submission_url = urljoin(self.dataarchive_url,
                                 os.path.join('rh/submission', request_id))
        log.debug("Submission URL: {0}".format(submission_url))
        self._staging_log['submission_url'] = submission_url
        has_completed = False
        staging_submission = self._request('GET', submission_url, cache=True)
        self._staging_log['staging_submission'] = staging_submission
        staging_submission.raise_for_status()

        data_page_url = staging_submission.url
        dpid = data_page_url.split("/")[-1]
        assert len(dpid) == 9
        self._staging_log['staging_page_id'] = dpid

        while not has_completed:
            time.sleep(1)
            # CANNOT cache this step: please_wait will happen infinitely
            data_page = self._request('GET', data_page_url, cache=False)
            if 'Please wait' not in data_page.text:
                has_completed = True
            print(".",end='')
        self._staging_log['data_page'] = data_page
        data_page.raise_for_status()
        staging_root = BeautifulSoup(data_page.content)
        downloadFileURL = staging_root.find('form').attrs['action']
        data_list_url = os.path.split(downloadFileURL)[0]

        # Old version, unreliable: data_list_url = staging_submission.url
        log.debug("Data list URL: {0}".format(data_list_url))
        self._staging_log['data_list_url'] = data_list_url

        time.sleep(1)
        data_list_page = self._request('GET', data_list_url, cache=True)
        self._staging_log['data_list_page'] = data_list_page
        data_list_page.raise_for_status()

        if 'Error' in data_list_page.text:
            errormessage = root.find('div', id='errorContent').string.strip()
            raise RemoteServiceError(errormessage)

        tbl = self._parse_staging_request_page(data_list_page)

        return tbl
Ejemplo n.º 43
0
def lightcurve_through_image(lightcurve,
                             exposure,
                             frame=np.array([30, 30]),
                             final_resolution=None,
                             duet=None,
                             gal_type=None,
                             gal_params=None,
                             debug=False,
                             debugfilename='lightcurve',
                             silent=False):
    """Transform a theoretical light curve into a flux measurement.

    1. Take the values of a light curve, optionally rebin it to a new time
    resolution.
    2. Then, create an image with a point source corresponding to each flux
    measurement, and calculate the flux from the image with ``daophot``.
    3. Return the ''realistic'' light curve

    Parameters
    ----------
    lightcurve : ``astropy.table.Table``
        The lightcurve has to contain the columns 'time', 'fluence_D1', and
        'fluence_D2'. Photon fluxes are in counts/s.
    exposure : ``astropy.units.Quantity``
        Exposure time used for the light curve

    Other parameters
    ----------------
    frame : [N, M]
        Number of pixel along x and y axis
    final_resolution : ``astropy.units.Quantity``, default None
        Rebin the light curve to this time resolution before creating the light
         curve. Must be > exposure
    duet : ``astroduet.config.Telescope``
        If None, a default one is created
    gal_type : string
        Default galaxy string ("spiral"/"elliptical") or "custom" w/ Sersic parameters
        in gal_params
    gal_params : dict
        Dictionary of parameters for Sersic model (see sim_galaxy)
    debug : bool
        If True, save the light curves to file
    debugfilename : str
        File to save the light curves to
    silent : bool
        Suppress progress bars

    Returns
    -------
    lightcurve : ``astropy.table.Table``
        A light curve, rebinned to ``final_resolution``, and with four new
        columns: 'fluence_D1_fit', 'fluence_D1_fiterr', 'fluence_D2_fit',
        and 'fluence_D2_fiterr', containing the flux measurements from the
        intermediate images and their errorbars.
    """
    from astropy.table import Table
    lightcurve = copy.deepcopy(lightcurve)
    if silent:
        tqdm = lambda x: x
    else:
        tqdm = imported_tqdm

    with suppress_stdout():
        if duet is None:
            duet = Telescope()

    with suppress_stdout():
        [bgd_band1, bgd_band2] = background_pixel_rate(duet,
                                                       low_zodi=True,
                                                       diag=True)
    # Directory for debugging purposes
    if debugfilename != 'lightcurve':
        debugdir = os.path.join('debug_imgs', debugfilename)
    else:
        rand = np.random.randint(0, 99999999)
        debugdir = f'debug_imgs_{rand}'

    if debug:
        mkdir_p(debugdir)

    good = (lightcurve['fluence_D1'] > 0) & (lightcurve['fluence_D2'] > 0)
    if not np.any(good):
        log.warn("Light curve has no points with fluence > 0")
        return
    lightcurve = lightcurve[good]

    lightcurve = \
        construct_images_from_lightcurve(
            lightcurve, exposure, duet=duet, gal_type=gal_type,
            gal_params=gal_params, frame=frame, debug=debug,
            debugfilename=os.path.join(debugdir, debugfilename+'.hdf5'),
            low_zodi=True, silent=silent)

    total_image_rate1 = np.sum(lightcurve['imgs_D1'], axis=0)
    total_image_rate2 = np.sum(lightcurve['imgs_D2'], axis=0)

    total_image_rate = total_image_rate1 + total_image_rate2

    total_image_rate_bkgsub1 = np.sum(lightcurve['imgs_D1_bkgsub'], axis=0)
    total_image_rate_bkgsub2 = np.sum(lightcurve['imgs_D2_bkgsub'], axis=0)

    total_image_rate_bkgsub = \
        total_image_rate_bkgsub1 + total_image_rate_bkgsub2

    total_images_rate_list = [total_image_rate1, total_image_rate2]

    psf_fwhm_pix = duet.psf_fwhm / duet.pixel

    log.info('Constructing reference images')
    # Make reference images (5 exposures)
    nexp = 5
    ref_image1 = construct_image(frame,
                                 exposure,
                                 duet=duet,
                                 band=duet.bandpass1,
                                 gal_type=gal_type,
                                 gal_params=gal_params,
                                 sky_rate=bgd_band1,
                                 n_exp=nexp)
    ref_image_rate1 = ref_image1 / (exposure * nexp)
    ref_bkg1, ref_bkg_rms_median1 = estimate_background(ref_image_rate1,
                                                        method='1D',
                                                        sigma=2)
    ref_rate_bkgsub1 = ref_image_rate1 - ref_bkg1

    ref_image2 = construct_image(frame,
                                 exposure,
                                 duet=duet,
                                 band=duet.bandpass2,
                                 gal_type=gal_type,
                                 gal_params=gal_params,
                                 sky_rate=bgd_band2,
                                 n_exp=nexp)
    ref_image_rate2 = ref_image2 / (exposure * nexp)
    ref_bkg2, ref_bkg_rms_median2 = estimate_background(ref_image_rate2,
                                                        method='1D',
                                                        sigma=2)
    ref_rate_bkgsub2 = ref_image_rate2 - ref_bkg2

    # psf_array = duet.psf_model(x_size=5,y_size=5).array

    total_ref_img_rate = ref_image_rate1 + ref_image_rate2
    total_ref_img_rate_bkgsub = \
        ref_rate_bkgsub1 + ref_rate_bkgsub2

    log.info('Finding source in integrated diff image')
    diff_total_image = \
        calculate_diff_image(total_image_rate, total_image_rate_bkgsub,
                             total_ref_img_rate,
                             total_ref_img_rate_bkgsub,
                             duet)

    with suppress_stdout():
        star_tbl, bkg_image, threshold = find(diff_total_image,
                                              psf_fwhm_pix.value,
                                              method='daophot',
                                              background='1D',
                                              frame='diff')
    if len(star_tbl) < 1:
        log.warn("No good detections in this field")
        return

    if debug:
        outfile = os.path.join(debugdir, f'images_total.p')
        with open(outfile, 'wb') as fobj:
            pickle.dump(
                {
                    'imgD1': total_images_rate_list[0],
                    'imgD2': total_images_rate_list[1]
                }, fobj)
        outfile = os.path.join(debugdir, f'images_ref.p')
        with open(outfile, 'wb') as fobj:
            pickle.dump({
                'imgD1': ref_image_rate1,
                'imgD2': ref_image_rate2
            }, fobj)

        outfile = os.path.join(debugdir, f'images_diff.p')
        with open(outfile, 'wb') as fobj:
            pickle.dump({'imgD1': diff_total_image}, fobj)

    star_tbl.sort('flux')
    star_tbl = star_tbl[-1:]['x', 'y']

    # decide light curve bins before image generation, for speed.
    lightcurve['nbin'] = 1
    if final_resolution is not None:
        lightcurve = rebin_lightcurve(lightcurve,
                                      exposure,
                                      final_resolution,
                                      debug=debug)

    for duet_no in [1, 2]:
        for suffix in ['', 'err']:
            colname = f'fluence_D{duet_no}_fit{suffix}'
            lightcurve[colname] = 0.
            lightcurve[colname].unit = u.ph / (u.cm**2 * u.s)
            colname = f'ABmag_D{duet_no}_fit{suffix}'
            lightcurve[colname] = 0.

    lightcurve['snr_D1'] = 0.
    lightcurve['snr_D2'] = 0.
    # Generate light curve
    log.info('Measuring fluxes and creating light curve')
    for i, row in enumerate(tqdm(lightcurve)):
        time = row['time']

        image_rate1 = lightcurve['imgs_D1'][i]
        image_rate_bkgsub1 = lightcurve['imgs_D1_bkgsub'][i]

        diff_image1 = calculate_diff_image(image_rate1,
                                           image_rate_bkgsub1,
                                           ref_image_rate1,
                                           ref_rate_bkgsub1,
                                           duet=duet)

        with suppress_stdout():
            result1, _ = run_daophot(diff_image1,
                                     threshold,
                                     star_tbl,
                                     niters=1,
                                     snr_lim=0.,
                                     duet=duet)

        fl1_fit = result1['flux_fit'][0] * image_rate1.unit
        fl1_fite = result1['flux_unc'][0] * image_rate1.unit
        lightcurve['fluence_D1_fit'][i] = duet.rate_to_fluence(fl1_fit)
        lightcurve['fluence_D1_fiterr'][i] = duet.rate_to_fluence(fl1_fite)
        if (fl1_fit > 0) & (fl1_fite > 0):
            lightcurve['snr_D1'][i] = fl1_fit / fl1_fite
            lightcurve['ABmag_D1_fit'][i] = \
                duet_fluence_to_abmag(lightcurve['fluence_D1_fit'][i], 1,
                                      duet=duet).value
            ABerr = 2.5 * np.log(1 + 1 / lightcurve['snr_D1'][i])
            lightcurve['ABmag_D1_fiterr'][i] = ABerr

        image_rate2 = lightcurve['imgs_D2'][i]
        image_rate_bkgsub2 = lightcurve['imgs_D2_bkgsub'][i]

        diff_image2 = calculate_diff_image(image_rate2,
                                           image_rate_bkgsub2,
                                           ref_image_rate2,
                                           ref_rate_bkgsub2,
                                           duet=duet)

        with suppress_stdout():
            result2, _ = run_daophot(diff_image2,
                                     threshold,
                                     star_tbl,
                                     niters=1,
                                     snr_lim=0.,
                                     duet=duet)
        fl2_fit = result2['flux_fit'][0] * image_rate2.unit
        fl2_fite = result2['flux_unc'][0] * image_rate2.unit

        lightcurve['fluence_D2_fit'][i] = duet.rate_to_fluence(fl2_fit)
        lightcurve['fluence_D2_fiterr'][i] = duet.rate_to_fluence(fl2_fite)
        if (fl2_fit > 0) & (fl2_fite > 0):
            lightcurve['snr_D2'][i] = fl2_fit / fl2_fite
            lightcurve['ABmag_D2_fit'][i] = \
                duet_fluence_to_abmag(lightcurve['fluence_D2_fit'][i], 2,
                                      duet=duet).value
            ABerr = 2.5 * np.log(1 + 1 / lightcurve['snr_D2'][i])
            lightcurve['ABmag_D2_fiterr'][i] = ABerr

    return lightcurve
Ejemplo n.º 44
0
 def units(self, value):
     log.warn("'units' is deprecated; please use 'unit'", DeprecationWarning)
     self._unit = value
Ejemplo n.º 45
0
                          'c18o_h2co_ku_rgb_logred.png',
                          'w',
                          'y',
                          "14.5 GHz Continuum",
                          "H$_2$CO",
                          "C$^{18}$O",
                      ), ('hc3n_ch3oh_ocs_rgb.fits',
                          'hc3n_ch3oh_ocs_rgb_auto.png', 'y', 'b', 'HC$_3$N',
                          'CH$_3$OH', 'OCS')):
        name = rgb_cube_fits[:-9]
        # stupid hack, should just rewrite from scratch but it's easier to edit inplace...
        if suffix != 'auto':
            name += suffix
            rgb_cube_png = rgb_cube_png.replace('auto', suffix)
            if not os.path.exists(rgb_cube_png):
                log.warn("Skipping {0}: does not exist".format(rgb_cube_png))
                continue

        pl.rcParams['font.size'] = 18
        fig1 = pl.figure(1)
        fig1.clf()
        F = aplpy.FITSFigure(rgb_cube_png, figure=fig1)
        F.show_rgb(rgb_cube_png)
        #F.recenter(290.93315, 14.509584, radius=0.0075)
        F.add_scalebar(
            (0.5 * u.pc / (5400 * u.pc)).to(u.deg, u.dimensionless_angles()))
        F.scalebar.set_label('0.5 pc')
        F.scalebar.set_color('w')
        #F.set_tick_xspacing(0.0005)
        F.add_label(0.05,
                    0.95,
Ejemplo n.º 46
0
 def units(self):
     log.warn("'units' is deprecated; please use 'unit'", DeprecationWarning)
     return self._unit
Ejemplo n.º 47
0
    def add_jump(self, selected):
        """
        jump the toas selected or unjump them if already jumped

        :param selected: boolean array to apply to toas, True = selected toa
        """
        # TODO: split into two functions
        if "PhaseJump" not in self.prefit_model.components:
            # if no PhaseJump component, add one
            log.info("PhaseJump component added")
            a = pint.models.jump.PhaseJump()
            a.setup()
            self.prefit_model.add_component(a)
            self.prefit_model.remove_param("JUMP1")
            param = pint.models.parameter.maskParameter(
                name="JUMP",
                index=1,
                key="-gui_jump",
                key_value=1,
                value=0.0,
                units="second",
            )
            self.prefit_model.add_param_from_top(param, "PhaseJump")
            getattr(self.prefit_model, param.name).frozen = False
            self.prefit_model.components[
                "PhaseJump"]._parent = self.prefit_model
            if self.fitted:
                self.postfit_model.add_component(a)
            for dict1, dict2 in zip(
                    self.all_toas.table["flags"][selected],
                    self.selected_toas.table["flags"],
            ):
                dict1["gui_jump"] = 1
                dict1["jump"] = 1
                dict2["gui_jump"] = 1
                dict2["jump"] = 1
            return param.name
        # if gets here, has at least one jump param already
        # if doesnt overlap or cancel, add the param
        jump_nums = [
            int(dict["jump"]) if "jump" in dict.keys() else np.nan
            for dict in self.all_toas.table["flags"]
        ]
        numjumps = self.prefit_model.components[
            "PhaseJump"].get_number_of_jumps()
        if numjumps == 0:
            log.warn(
                "There are no jumps (maskParameter objects) in PhaseJump. Please delete the PhaseJump object and try again. "
            )
            return None
        # if only par file jumps in PhaseJump object
        if np.isnan(np.nanmax(jump_nums)):
            # for every jump, set appropriate flag for TOAs it jumps
            for jump_par in self.prefit_model.components[
                    "PhaseJump"].get_jump_param_objects():
                # find TOAs jump applies to
                mask = jump_par.select_toa_mask(self.all_toas)
                # apply to dictionaries for future use
                for dict in self.all_toas.table["flags"][mask]:
                    dict["jump"] = jump_par.index
            jump_nums = [
                int(dict["jump"]) if "jump" in dict.keys() else np.nan
                for dict in self.all_toas.table["flags"]
            ]
        for num in range(1, numjumps + 1):
            num = int(num)
            jump_select = [num == jump_num for jump_num in jump_nums]
            if np.array_equal(jump_select, selected):
                # if current jump exactly matches selected, remove it
                self.prefit_model.remove_param("JUMP" + str(num))
                if self.fitted:
                    self.postfit_model.remove_param("JUMP" + str(num))
                for dict1, dict2 in zip(
                        self.all_toas.table["flags"][selected],
                        self.selected_toas.table["flags"],
                ):
                    if "jump" in dict1.keys() and dict1["jump"] == num:
                        del dict1["jump"]
                        if "gui_jump" in dict1.keys():
                            del dict1["gui_jump"]
                    if "jump" in dict2.keys() and dict2["jump"] == num:
                        del dict2["jump"]
                        if "gui_jump" in dict2.keys():
                            del dict2["gui_jump"]
                nums_subset = range(num + 1, numjumps + 1)
                for n in nums_subset:
                    # iterate through jump params and rename them so that they are always in numerical order starting with JUMP1
                    n = int(n)
                    param = getattr(self.prefit_model.components["PhaseJump"],
                                    "JUMP" + str(n))
                    for dict in self.all_toas.table["flags"]:
                        if "jump" in dict.keys() and dict["jump"] == n:
                            dict["jump"] = n - 1
                            if "gui_jump" in dict.keys():
                                dict["gui_jump"] = n - 1
                                param.key_value = n - 1
                    newpar = param.new_param(index=(n - 1), copy_all=True)
                    self.prefit_model.add_param_from_top(newpar, "PhaseJump")
                    self.prefit_model.remove_param(param.name)
                    if self.fitted:
                        self.postfit_model.add_param_from_top(
                            newpar, "PhaseJump")
                        self.postfit_model.remove_param(param.name)
                if "JUMP1" not in self.prefit_model.params:
                    # remove PhaseJump component if no jump params
                    comp_list = getattr(self.prefit_model,
                                        "PhaseComponent_list")
                    for item in comp_list:
                        if isinstance(item, pint.models.jump.PhaseJump):
                            self.prefit_model.remove_component(item)
                            if self.fitted:
                                self.postfit_model.remove_component(item)
                else:
                    self.prefit_model.components["PhaseJump"].setup()
                    if self.fitted:
                        self.postfit_model.components["PhaseJump"].setup()
                log.info("removed param", "JUMP" + str(num))
                return jump_select
            elif True in [a and b for a, b in zip(jump_select, selected)]:
                # if current jump overlaps selected, raise and error and end
                log.warn(
                    "The selected toa(s) overlap an existing jump. Remove all interfering jumps before attempting to jump these toas."
                )
                return None
        # if here, then doesn't overlap or match anything
        for dict1, dict2 in zip(self.all_toas.table["flags"][selected],
                                self.selected_toas.table["flags"]):
            dict1["jump"] = numjumps + 1
            dict1["gui_jump"] = numjumps + 1
            dict2["jump"] = numjumps + 1
            dict2["gui_jump"] = numjumps + 1
        param = pint.models.parameter.maskParameter(
            name="JUMP",
            index=numjumps + 1,
            key="-gui_jump",
            key_value=numjumps + 1,
            value=0.0,
            units="second",
            aliases=["JUMP"],
        )
        self.prefit_model.add_param_from_top(param, "PhaseJump")
        getattr(self.prefit_model, param.name).frozen = False
        self.prefit_model.components["PhaseJump"].setup()
        if (self.fitted and not self.prefit_model.components["PhaseJump"]
                == self.postfit_model.components["PhaseJump"]):
            self.postfit_model.add_param_from_top(param, "PhaseJump")
            getattr(self.postfit_model, param.name).frozen = False
            self.postfit_model.components["PhaseJump"].setup()
        return param.name
Ejemplo n.º 48
0
def fit_a_spectrum(sp,
                   radexfit=False,
                   write=True,
                   vlimits=(-105, 125),
                   pars=pars):
    sp.plotter.autorefresh = False
    sp.plotter(figure=1)
    ncomp = pars[sp.specname]['ncomp']
    if ncomp == 0:
        log.info(
            "Skipping {0} - no velocity components detected.".format(ncomp))
        return
    returns = [ncomp]
    velos = pars[sp.specname]['velo']
    spname = sp.specname.replace(" ", "_")

    width_min = 1

    if 'width_max' in pars[sp.specname]:
        width_max = pars[sp.specname]['width_max']
    elif 'Map' in sp.specname or 'box' in sp.specname:
        width_max = 40
    else:
        width_max = 15

    sp.specfit.Registry.add_fitter('h2co_simple',
                                   simple_fitter2,
                                   6,
                                   multisingle='multi')
    guesses_simple = [
        x for ii in range(ncomp)
        for x in (sp.data.max(), velos[ii], 5, 0.5, 1.0, sp.data.max())
    ]

    if not (min(velos) > vlimits[0] and max(velos) < vlimits[1]):
        log.warn("A velocity guess {0} is outside limits {1}.".format(
            velos, vlimits))
        vlimits = (min(velos) - 25, max(velos) + 25)
        log.warn("Changing limits to {0}".format(vlimits))

    sp.specfit(
        fittype='h2co_simple',
        multifit=True,
        guesses=guesses_simple,
        limited=[(True, True)] * 6,
        limits=[(0, 20), vlimits, (width_min, width_max), (0, 1), (0.3, 1.1),
                (0, 1e5)],
    )
    sp.baseline(excludefit=True,
                subtract=True,
                highlight_fitregion=True,
                order=1)

    sp.plotter(clear=True)
    sp.specfit(
        fittype='h2co_simple',
        multifit=True,
        guesses=guesses_simple,
        limited=[(True, True)] * 6,
        limits=[(0, 20), vlimits, (width_min, width_max), (0, 1), (0.3, 1.1),
                (0, 1e5)],
    )

    returns.append(copy.copy(sp.specfit.parinfo))

    err = sp.error.mean()

    sp.plotter()
    sp.specfit.plot_fit(show_components=True)
    sp.specfit.annotate(fontsize=font_sizes[ncomp])
    sp.specfit.plotresiduals(axis=sp.plotter.axis,
                             yoffset=-err * 5,
                             clear=False,
                             color='#444444',
                             label=False)
    sp.plotter.axis.set_ylim(sp.plotter.ymin - err * 5, sp.plotter.ymax)
    sp.plotter.savefig(
        os.path.join(figurepath,
                     "simple/{0}_fit_4_lines_simple.pdf".format(spname)))
    if write:
        sp.write(mpath("spectra/{0}_spectrum.fits".format(spname)))

    # This will mess things up for the radexfit (maybe in a good way) but *cannot*
    # be done after the radexfit
    # Set the spectrum to be the fit residuals.  The linear baseline has
    # already been subtracted from both the data and the residuals
    linear_baseline = sp.baseline.basespec
    sp.baseline.unsubtract()
    fitted_residuals = sp.baseline.spectofit = sp.specfit.residuals
    sp.baseline.includemask[:] = True  # Select ALL residuals
    sp.baseline.fit(spline=True, order=3, spline_sampling=50)
    spline_baseline = sp.baseline.basespec
    sp.data -= spline_baseline + linear_baseline
    sp.baseline.subtracted = True
    sp.error[:] = sp.stats((218.5e9, 218.65e9))['std']
    sp.specfit(
        fittype='h2co_simple',
        multifit=True,
        guesses=guesses_simple,
        limited=[(True, True)] * 6,
        limits=[(0, 1e5), vlimits, (width_min, width_max), (0, 1), (0.3, 1.1),
                (0, 1e5)],
    )
    sp.plotter()
    sp.plotter.axis.plot(sp.xarr,
                         spline_baseline - err * 5,
                         color='orange',
                         alpha=0.75,
                         zorder=-1,
                         linewidth=2)
    sp.specfit.plot_fit(show_components=True)
    sp.specfit.annotate(fontsize=font_sizes[ncomp])
    sp.plotter.axis.plot(sp.xarr,
                         fitted_residuals - err * 5,
                         color="#444444",
                         linewidth=0.5,
                         drawstyle='steps-mid')
    #sp.specfit.plotresiduals(axis=sp.plotter.axis, yoffset=-err*5, clear=False,
    #                         color='#444444', label=False)
    sp.plotter.axis.set_ylim(sp.plotter.ymin - err * 5, sp.plotter.ymax)
    sp.plotter.savefig(
        os.path.join(
            figurepath,
            "simple/{0}_fit_4_lines_simple_splinebaselined.pdf".format(
                spname)))

    returns.append(copy.copy(sp.specfit.parinfo))

    if write:
        sp.write(mpath("spectra/{0}_spectrum_basesplined.fits".format(spname)))

    if radexfit:
        guesses = [
            x for ii in range(ncomp) for x in
            (100, 14, 4.5, sp.specfit.parinfo['VELOCITY{0}'.format(ii)].value,
             (sp.specfit.parinfo['WIDTH{0}'.format(ii)].value if
              (sp.specfit.parinfo['WIDTH{0}'.format(ii)].value < width_max
               and sp.specfit.parinfo['WIDTH{0}'.format(ii)].value > width_min
               ) else 5))
        ]

        sp.specfit.Registry.add_fitter('h2co_mm_radex',
                                       h2co_radex_fitter,
                                       5,
                                       multisingle='multi')
        sp.specfit(
            fittype='h2co_mm_radex',
            multifit=True,
            guesses=guesses,
            limits=[(10, 300), (11, 15), (3, 5.5), (-105, 125),
                    (width_min, width_max)] * ncomp,
            limited=[(True, True)] * 5 * ncomp,
            fixed=[False, False, False, True, True] * ncomp,
            quiet=False,
        )
        sp.plotter.savefig(
            os.path.join(figurepath,
                         "radex/{0}_fit_h2co_mm_radex.pdf".format(spname)))

        returns.append(copy.copy(sp.specfit.parinfo))

    return returns
Ejemplo n.º 49
0
    def spectrum(self, value):

        if value is not None:

            if hasattr(self, '_temperature') and self._temperature is not None:
                raise Exception(
                    "A temperature has already been set, so cannot set a spectrum"
                )

            if isinstance(value, Table):

                if 'nu' not in value.columns:
                    raise TypeError("spectrum Table does not contain a"
                                    " 'nu' column")

                if 'fnu' not in value.columns:
                    raise TypeError("spectrum Table does not contain an"
                                    " 'fnu' column")

                nu, fnu = value['nu'], value['fnu']

            elif type(value) in (tuple, list):

                if len(value) == 2:
                    nu, fnu = value
                else:
                    raise TypeError("spectrum tuple or list should contain"
                                    " two elements")

                if type(nu) in [list, tuple]:
                    nu = np.array(nu, dtype=float)
                else:
                    nu = nu.astype(float)

                if type(fnu) in [list, tuple]:
                    fnu = np.array(fnu, dtype=float)
                else:
                    fnu = fnu.astype(float)

                if not is_numpy_array(nu) or nu.ndim != 1:
                    raise TypeError("nu should be a 1-D sequence")

                if not is_numpy_array(fnu) or fnu.ndim != 1:
                    raise TypeError("fnu should be a 1-D sequence")

                if nu.shape != fnu.shape:
                    raise TypeError("nu and fnu should have the same shape")

            else:

                raise TypeError('spectrum should be specified either as an '
                                'astropy.table.Table instance, or a tuple '
                                'of two 1-D Numpy arrays (nu, fnu) with the '
                                'same length')

            # Check if frequency array has duplicate values
            if len(np.unique(nu)) != len(nu):
                raise ValueError("nu sequence contains duplicate values")

            # Check for any negative values

            if np.any(nu <= 0.):
                raise ValueError("nu should be strictly positive")

            if np.any(fnu < 0.):
                raise ValueError("fnu should be positive")

            # Check for any NaN or Inf values

            if np.any(np.isnan(nu) | np.isinf(nu)):
                raise ValueError("nu contains NaN/Inf values")

            if np.any(np.isnan(fnu) | np.isinf(fnu)):
                raise ValueError("fnu contains NaN/Inf values")

            # Check if spectrum needs sorting
            if not monotonically_increasing(nu):
                logger.warn(
                    "Spectrum is being re-sorted in order of increasing frequency"
                )
                order = np.argsort(nu)
                nu = nu[order]
                fnu = fnu[order]

            self._spectrum = {'nu': nu, 'fnu': fnu}

        else:

            self._spectrum = value
Ejemplo n.º 50
0
    def density(self, grid):
        '''
        Return the density grid

        Parameters
        ----------
        grid : :class:`~hyperion.grid.SphericalPolarGrid` or :class:`~hyperion.grid.CylindricalPolarGrid` instance.
            The spherical or cylindrical polar grid object containing
            information about the position of the grid cells.

        Returns
        -------
        rho : np.ndarray
            A 3-dimensional array containing the density of the disk inside
            each cell. The shape of this array is the same as
            ``grid.shape``.
        '''

        self._check_all_set()

        if self.rmax <= self.rmin:
            logger.warn("Ignoring disk, since rmax < rmin")
            return np.zeros(grid.shape)

        if self.mass == 0:
            return np.zeros(grid.shape)

        # Find disk scaleheight at each cylindrical radius
        h = self.h_0 * (grid.gw / self.r_0)**self.beta

        # Find disk density at all positions
        rho = (self.r_0 / grid.gw) ** (self.beta - self.p) \
            * np.exp(-0.5 * (grid.gz / h) ** 2)

        # Truncate below rmin and above rmax
        if self.cylindrical_inner_rim:
            rho[grid.gw < self.rmin] = 0.
        else:
            rho[grid.gr < self.rmin] = 0.

        if self.cylindrical_outer_rim:
            rho[grid.gw > self.rmax] = 0.
        else:
            rho[grid.gr > self.rmax] = 0.

        # Find density factor
        rho *= self.rho_0

        if np.sum(rho * grid.volumes) == 0. and self.mass > 0:
            raise Exception(
                "Discretized disk mass is zero, suggesting that the grid is too coarse"
            )

        norm = self.mass / np.sum(rho * grid.volumes)

        logger.info(
            "Disk density is being re-scaled by a factor of %.2f to give the correct mass."
            % norm)

        if norm > 1.1 or norm < 1. / 1.1:
            logger.warn(
                "Re-scaling factor is significantly different from 1, which indicates that the grid may be too coarse to properly resolve the disk."
            )

        # Normalize to total disk mass
        rho = rho * norm

        return rho
Ejemplo n.º 51
0
    def _parse_staging_request_page(self, data_list_page):
        """
        Parse pages like this one:
        https://almascience.eso.org/rh/requests/anonymous/786572566

        that include links to data sets that have been requested and staged

        Parameters
        ----------
        data_list_page : requests.Response object

        """

        root = BeautifulSoup(data_list_page.content, 'html5lib')

        data_table = root.findAll('table', class_='list', id='report')[0]
        columns = {'uid': [], 'URL': [], 'size': []}
        for tr in data_table.findAll('tr'):
            tds = tr.findAll('td')

            # Cannot check class if it is not defined
            cl = 'class' in tr.attrs

            if (len(tds) > 1 and 'uid' in tds[0].text and
                    (cl and 'Level' in tr['class'][0])):
                # New Style
                text = tds[0].text.strip().split()
                if text[0] in ('Asdm', 'Member'):
                    uid = text[-1]
            elif len(tds) > 1 and 'uid' in tds[1].text:
                # Old Style
                uid = tds[1].text.strip()
            elif cl and tr['class'] == 'Level_1':
                raise ValueError("Heading was found when parsing the download "
                                 "page but it was not parsed correctly")

            if len(tds) > 3 and (cl and tr['class'][0] == 'fileRow'):
                # New Style
                size, unit = re.search('(-|[0-9\.]*)([A-Za-z]*)',
                                       tds[2].text).groups()
                href = tds[1].find('a')
                if size == '':
                    # this is a header row
                    continue
                authorized = ('access_authorized.png' in
                              tds[3].findChild('img')['src'])
                if authorized:
                    columns['uid'].append(uid)
                    if href and 'href' in href.attrs:
                        columns['URL'].append(href.attrs['href'])
                    else:
                        columns['URL'].append('None_Found')
                    unit = (u.Unit(unit) if unit in ('GB', 'MB')
                            else u.Unit('kB') if 'kb' in unit.lower()
                            else 1)
                    try:
                        columns['size'].append(float(size) * u.Unit(unit))
                    except ValueError:
                        # size is probably a string?
                        columns['size'].append(-1 * u.byte)
                    log.log(level=5, msg="Found a new-style entry.  "
                            "size={0} uid={1} url={2}"
                            .format(size, uid, columns['URL'][-1]))
                else:
                    log.warn("Access to {0} is not authorized.".format(uid))
            elif len(tds) > 3 and tds[2].find('a'):
                # Old Style
                href = tds[2].find('a')
                size, unit = re.search('([0-9\.]*)([A-Za-z]*)',
                                       tds[3].text).groups()
                columns['uid'].append(uid)
                columns['URL'].append(href.attrs['href'])
                unit = (u.Unit(unit) if unit in ('GB', 'MB')
                        else u.Unit('kB') if 'kb' in unit.lower()
                        else 1)
                columns['size'].append(float(size) * u.Unit(unit))
                log.log(level=5, msg="Found an old-style entry.  "
                        "size={0} uid={1} url={2}".format(size, uid,
                                                          columns['URL'][-1]))

        columns['size'] = u.Quantity(columns['size'], u.Gbyte)

        if len(columns['uid']) == 0:
            raise RemoteServiceError(
                "No valid UIDs were found in the staged data table. "
                "Please include {0} in a bug report."
                .format(self._staging_log['data_list_url']))

        tbl = Table([Column(name=k, data=v) for k, v in iteritems(columns)])

        return tbl
Ejemplo n.º 52
0
    def __init__(self, model):

        SphericalDust.__init__(self)

        wav = np.loadtxt('%s.alb' % model, usecols=[0])
        self.optical_properties.albedo = np.loadtxt('%s.alb' % model,
                                                    usecols=[1])
        kappa = np.loadtxt('%s.k_abs' % model, usecols=[1])
        self.optical_properties.chi = kappa / (1 -
                                               self.optical_properties.albedo)

        # Check for NaN values
        for quantity in ['chi', 'albedo']:

            values = self.optical_properties.__dict__[quantity]

            if np.any(np.isnan(values)):
                logger.warn(
                    "NaN values found inside MieX %s file - interpolating" %
                    quantity)
                invalid = np.isnan(values)
                values[invalid] = interp1d_fast_loglog(wav[~invalid],
                                                       values[~invalid],
                                                       wav[invalid])
                if np.any(np.isnan(values)):
                    raise Exception(
                        "Did not manage to fix NaN values in MieX %s" %
                        quantity)

        self.optical_properties.nu = c / wav * 1.e4

        n_wav = len(wav)
        n_mu = (len(open('%s.f11' % model).readlines()) // n_wav) - 1

        mu = np.zeros(n_mu)

        # Read mu
        f11 = open('%s.f11' % model)
        f11.readline()
        f11.readline()
        for i in range(n_mu):
            mu[i] = np.cos(np.radians(float(f11.readline().split()[0])))
        f11.close()
        self.optical_properties.mu = mu[::-1]

        # Read in matrix elements

        self.optical_properties.initialize_scattering_matrix()

        f11 = open('%s.f11' % model)
        f12 = open('%s.f12' % model)
        f33 = open('%s.f33' % model)
        f34 = open('%s.f34' % model)

        f11.readline()
        f12.readline()
        f33.readline()
        f34.readline()

        for j in range(n_wav):

            if float(f11.readline()) != wav[j]:
                raise Exception("Incorrect wavelength in f11")
            if float(f12.readline()) != wav[j]:
                raise Exception("Incorrect wavelength in f12")
            if float(f33.readline()) != wav[j]:
                raise Exception("Incorrect wavelength in f33")
            if float(f34.readline()) != wav[j]:
                raise Exception("Incorrect wavelength in f34")

            for i in range(n_mu):

                self.optical_properties.P1[j, n_mu - i - 1] = float(
                    f11.readline().split()[1])
                self.optical_properties.P2[j, n_mu - i - 1] = float(
                    f12.readline().split()[1])
                self.optical_properties.P3[j, n_mu - i - 1] = float(
                    f33.readline().split()[1])
                self.optical_properties.P4[j, n_mu - i - 1] = float(
                    f34.readline().split()[1])

        for i in range(n_mu):

            for quantity in ['P1', 'P2', 'P3', 'P4']:

                values = self.optical_properties.__dict__[quantity]

                if np.any(np.isnan(values[:, i])):
                    logger.warn(
                        "NaN values found inside MieX %s file - interpolating"
                        % quantity)
                    invalid = np.isnan(values[:, i])
                    values[:, i][invalid] = interp1d_fast_loglog(
                        wav[~invalid], values[:, i][~invalid], wav[invalid])
                    if np.any(np.isnan(values[:, i])):
                        raise Exception(
                            "Did not manage to fix NaN values in MieX %s" %
                            quantity)
def make_spw_cube(spw='spw{0}', spwnum=0, fntemplate='SgrB2',
                  overwrite_existing=False, bmaj_limits=None,
                  fnsuffix="", filesuffix='image.pbcor.fits',
                  cropends=False,
                  minimize=True,
                  add_beam_info=True):
    """
    Parameters
    ----------
    spw : str
        String template for the input/output name
    spwnum : int
        The spectral window number
    fntemplate : str
        Filename template (goes into the glob)
    overwrite_existing : bool
        Overwrite data in the output cube?
    cropends: bool or int
        Number of pixels to crop off the ends of an image
    minimize: bool
        Compute the spatial minimal subcube before building the cube?  Slices
        for all subsequent cubes will be computed from the first cube.
    """
    spw = spw.format(spwnum)

    big_filename = '{1}_{0}{2}_lines.fits'.format(spw, fntemplate, fnsuffix)

    # First set up an empty file
    if not os.path.exists(big_filename):
        header_fn = glob.glob('piece_of_{1}_cube{2}.{0}.channels0to*.{3}'.format(spw, fntemplate, fnsuffix, filesuffix))
        if len(header_fn) != 1:
            raise ValueError("Found too many or too few matches: {0}".format(header_fn))
        else:
            header_fn = header_fn[0]

        if minimize:
            cube0 = SpectralCube.read(header_fn)
            slices = cube0.subcube_slices_from_mask(cube0.mask,
                                                    spatial_only=True)
            # use the calculated 3rd dimension, plus the difference of the
            # x and y slices
            #header['NAXIS2'] = slices[1].stop-slices[1].start
            #header['NAXIS1'] = slices[2].stop-slices[2].start
            header = cube0[slices].header
        else:
            header = fits.getheader(header_fn)

        # Make an arbitrary, small data before prepping the header
        data = np.zeros((100, 100), dtype=np.float32)
        hdu = fits.PrimaryHDU(data=data, header=header)
        cdelt_sign = np.sign(hdu.header['CDELT3'])
        # Set the appropriate output size (this can be extracted from the LISTOBS)
        header['NAXIS3'] = nchans_total[spwnum]
        if cdelt_sign == -1:
            ind0, ind1 = getinds(header_fn)
            header['CRPIX3'] = nchans_total[spwnum] - ind1 + 1

        shape = (header['NAXIS3'], header['NAXIS2'], header['NAXIS1'])

        # Write to disk
        header.tofile(big_filename)
        # Using the 'append' io method, update the *header*
        with open(big_filename, 'rb+') as fobj:
            # Seek past the length of the header, plus the length of the
            # data we want to write.
            # The -1 is to account for the final byte that we are about to
            # write:
            # 'seek' works on bytes, so divide #bits / (bytes/bit)
            fobj.seek(len(header.tostring()) + (shape[0] *
                                                shape[1] *
                                                shape[2] *
                                                int(np.abs(header['BITPIX'])/8)) -
                      1)
            fobj.write(b'\0')

        big_cube = SpectralCube.read(big_filename)
        header_cube = SpectralCube.read(header_fn)
        # in both cases, SpectralCube sorts the extrema
        assert big_cube.spectral_extrema[0] == header_cube.spectral_extrema[0]
        assert np.all(big_cube.wcs.wcs.cdelt == header_cube.wcs.wcs.cdelt)


    # Find the appropriate files (this is NOT a good way to do this!  Better to
    # provide a list.  But wildcards are quick & easy...
    files = glob.glob("piece_of_{1}_cube{2}.{0}.chan*{3}".format(spw,fntemplate,fnsuffix,filesuffix))
    log.info("Files to be merged: ")
    log.info(str(files))

    # open the file in update mode (it should have the right dims now)
    hdul = fits.open(big_filename, mode='update')

    if add_beam_info:
        shape = hdul[0].data.shape[0]
        if len(hdul) > 1 and isinstance(hdul[1], fits.BinTableHDU):
            pass
        else:
            hdul.append(fits.BinTableHDU(np.recarray(shape,
                                                     names=['BMAJ','BMIN','BPA','CHAN','POL'],
                                                     formats=['f4','f4','f4','i4','i4'])))

    for fn in ProgressBar(files):
        log.info("{0} {1}".format(getinds(fn), fn))
        ind0,ind1 = getinds(fn)

        if 'slices' not in locals():
            if minimize:
                cube0 = SpectralCube.read(fn)
                slices = cube0.subcube_slices_from_mask(cube0.mask,
                                                        spatial_only=True)
            else:
                slices = (slice(None),)*3

        if cropends:
            # don't crop 1st or last pixel in full cube
            if ind0 > 0:
                ind0 = ind0 + cropends
                dataind0 = cropends
                extra = 0
            else:
                # because I forgot to reduce nchan, there is an "extra" pixel
                # when we start at zero (there should not be a corresponding one
                # when we end too late)
                dataind0 = 0
                extra = 1

            if ind1 < nchans_total[spwnum] - 1:
                ind1 = ind1 - cropends
                dataind1 = - cropends - extra
            else:
                dataind1 = None

        if 'cdelt_sign' not in locals():
            cdelt_sign = np.sign(fits.getheader(fn)['CDELT3'])
            log.warn("cdelt_sign was not defined: overwriting a"
                     " previously-existing file.  "
                     "This may not be what you want; the data could be going "
                     "opposite the parent cube.  Check that the original "
                     "header is OK. sign(CDELT) is now {0}, "
                     "while for the big header it is {1}"
                     .format(cdelt_sign,
                             np.sign(fits.getheader(big_filename)['CDELT3'])))
        if cdelt_sign == -1:
            ind1, ind0 = (nchans_total[spwnum] - ind0 - 1,
                          nchans_total[spwnum] - ind1 - 1)
        plane = hdul[0].data[ind0]
        if np.all(plane == 0) or overwrite_existing:
            log.info("Replacing indices {0}->{2} {1}"
                     .format(getinds(fn), fn, (ind0,ind1)))

            data = fits.getdata(fn)

            if bmaj_limits is not None:
                beamtable = fits.open(fn)[1]
                ok_beam = ((beamtable.data['BMAJ'] > bmaj_limits[0]) &
                           (beamtable.data['BMAJ'] < bmaj_limits[1]))
                data[~ok_beam] = np.nan
            if add_beam_info:
                beamtable = fits.open(fn)[1]
                hdul[1].data[ind0:ind1] = beamtable.data[dataind0:dataind1]


            hdul[0].data[ind0:ind1,:,:] = data[dataind0:dataind1, slices[1], slices[2]]
            hdul.flush()
Ejemplo n.º 54
0
    def density(self, collider_density):

        collider_ids = {
            'H2': 0,
            'PH2': 1,
            'OH2': 2,
            'E': 3,
            'H': 4,
            'HE': 5,
            'H+': 6
        }

        self._use_thermal_opr = False

        if isinstance(collider_density, (float, int, _quantity, np.ndarray)):
            if not self._suppress_density_warning:
                log.warn("Assuming the density is n(H_2).")
            collider_density = {'H2': collider_density}

        collider_densities = defaultdict(lambda: 0)
        for k in collider_density:
            collider_densities[k.upper()] = unitless(
                u.Quantity(collider_density[k], self._u_cc))
            if k.upper() not in self._all_valid_colliders:
                raise ValueError(
                    'Collider %s is not one of the valid colliders: %s' %
                    (k, self._all_valid_colliders))

        if (('OH2' in collider_densities and collider_densities['OH2'] != 0) or
            ('PH2' in collider_densities and collider_densities['PH2'] != 0)):

            # this is simply not true: NH3 has just ph2 as a collider
            #if not 'PH2' in collider_densities or not 'OH2' in collider_densities:
            #    raise ValueError("If o-H2 density is specified, p-H2 must also be.")
            # TODO: look up whether RADEX uses density[0] if density[1] and [2] are specified
            # (it looks like the answer is "no" based on a quick test)
            #self.radex.cphys.density[0] = 0 # collider_densities['OH2'] + collider_densities['PH2']
            # PARA is [1], ORTHO is [2]
            # See lines 91, 92 of io.f
            if 'PH2' in collider_densities:
                self.radex.cphys.density[1] = collider_densities['PH2']
            if 'OH2' in collider_densities:
                self.radex.cphys.density[2] = collider_densities['OH2']
            self._use_thermal_opr = False
        elif 'H2' in collider_densities:
            warnings.warn("Using a default ortho-to-para ratio (which "
                          "will only affect species for which independent "
                          "ortho & para collision rates are given)")
            self._use_thermal_opr = True
            #self.radex.cphys.density[0] = collider_densities['H2']

            T = unitless(self.temperature)
            if T > 0:
                # From Faure, private communication
                opr = min(3.0, 9.0 * np.exp(-170.6 / T))
            else:
                opr = 3.0
            fortho = opr / (1 + opr)
            log.debug("Set OPR to {0} and fortho to {1}".format(opr, fortho))
            self.radex.cphys.density[1] = collider_densities['H2'] * (1 -
                                                                      fortho)
            self.radex.cphys.density[2] = collider_densities['H2'] * (fortho)

        # RADEX relies on n(H2) = n(oH2) + n(pH2)
        # We have set n(oH2) and n(pH2) above
        vc = [x.lower() for x in self.valid_colliders]
        if 'h2' in vc:
            self.radex.cphys.density[0] = self.radex.cphys.density[1:3].sum()
            self.radex.cphys.density[1] = 0
            self.radex.cphys.density[2] = 0
        elif 'oh2' in vc or 'ph2' in vc:
            self.radex.cphys.density[0] = 0

        self.radex.cphys.density[3] = collider_densities['E']
        self.radex.cphys.density[4] = collider_densities['H']
        self.radex.cphys.density[5] = collider_densities['HE']
        self.radex.cphys.density[6] = collider_densities['H+']

        # skip H2 when computing by assuming OPR correctly distributes ortho & para
        # It's not obvious that RADEX does this correctly in readdata.f
        self.radex.cphys.totdens = self.radex.cphys.density.sum()

        # Unfortunately,
        # must re-read molecular file and re-interpolate to new density
        self._validate_colliders()
        self.radex.readdata()

        if not self._is_locked:
            self._is_locked = True
            assert self.locked_parameter in ('column', 'abundance', 'density')
            if self.locked_parameter == 'density':  # self is locked, still need to update
                if hasattr(self, '_previous_locked_parameter'):
                    self._lock_param(self._previous_locked_parameter)
                else:
                    self._lock_param('abundance')  # choose arbitrarily
            if self.locked_parameter == 'column':
                self.abundance = self.column_per_bin / (self.total_density *
                                                        self.length)
            elif self.locked_parameter == 'abundance':
                self.column_per_bin = self.total_density * self.length * self.abundance
            else:
                raise ValueError("Neither column nor abundance were updated")
            self._lock_param('density')
            self._is_locked = False

            invab = (self.total_density /
                     (self.column / self.length)).decompose().value
            if not np.allclose(invab, 1 / self.abundance):
                raise ValueError("Can not set density to %s" %
                                 collider_density)