Exemple #1
0
def percentile_function(array):

    if np.all(np.isnan(array) | np.isinf(array)):
        log.warning("Image contains only NaN or Inf values")
        return lambda x: 0

    array = array.ravel()
    array = array[np.where(np.isnan(array) == False)]
    array = array[np.where(np.isinf(array) == False)]

    n_total = np.shape(array)[0]

    if n_total == 0:
        def return_zero(x):
            return 0
        return return_zero
    elif n_total == 1:
        def return_single(x):
            return array[0]
        return return_single

    array = np.sort(array)

    x = np.linspace(0., 100., num=n_total)

    spl = interp1d(x=x, y=array)

    if n_total > 10000:
        x = np.linspace(0., 100., num=10000)
        spl = interp1d(x=x, y=spl(x))

    array = None

    return spl
Exemple #2
0
def test_log_to_list_level():

    with log.log_to_list(filter_level='ERROR') as log_list:
        log.error("Error message")
        log.warning("Warning message")

    assert len(log_list) == 1 and log_list[0].levelname == 'ERROR'
Exemple #3
0
def link_or_copy(group, name, link, copy, absolute_paths=False):
    '''
    Link or copy a dataset or group

    Parameters
    ----------
    group : h5py.Group
        The group to create the link, dataset, or group in
    name : str
        The name of the link, dataset, or group in the new file
    link : h5py.ExternalLink
        A link to the group or dataset to include
    copy : bool
        Whether to copy or link to the dataset
    absolute_paths : bool
        If copy=False, then if absolute_paths is True, absolute filenames
        are used in the link, otherwise the path relative to the file is used.
    '''
    if copy:
        f = h5py.File(link.filename, 'r')
        f.copy(link.path, group, name=name)
        f.close()
    else:
        if absolute_paths:
            group[name] = h5py.ExternalLink(os.path.abspath(link.filename), link.path)
        else:
            group[name] = h5py.ExternalLink(os.path.relpath(link.filename, os.path.dirname(group.file.filename)), link.path)
        try:
            group[name]
        except KeyError:  # indicates linking failed (h5py < 2.1.0)
            logger.warning("Linking failed, copying instead (indicates an outdated version of h5py)")
            del group[name]
            f = h5py.File(link.filename, 'r')
            f.copy(link.path, group, name=name)
            f.close()
Exemple #4
0
def _matplotlib_pil_bug_present():
    """
    Determine whether PIL images should be pre-flipped due to a bug in Matplotlib.

    Prior to Matplotlib 1.2.0, RGB images provided as PIL objects were
    oriented wrongly. This function tests whether the bug is present.
    """

    from matplotlib.image import pil_to_array

    try:
        from PIL import Image
    except:
        import Image

    from astropy import log

    array1 = np.array([[1,2],[3,4]], dtype=np.uint8)
    image = Image.fromarray(array1)
    array2 = pil_to_array(image)

    if np.all(array1 == array2):
        log.debug("PIL Image flipping bug not present in Matplotlib")
        return False
    elif np.all(array1 == array2[::-1,:]):
        log.debug("PIL Image flipping bug detected in Matplotlib")
        return True
    else:
        log.warning("Could not properly determine Matplotlib behavior for RGB images - image may be flipped incorrectly")
        return False
Exemple #5
0
    def from_fits(cls, hdu_list):
        """Create `EnergyDependentMultiGaussPSF` from HDU list.

        Parameters
        ----------
        hdu_list : `~astropy.io.fits.HDUList`
            HDU list with correct extensions.
        """
        valid_extnames = ['POINT SPREAD FUNCTION', 'PSF_2D']
        hdu = get_hdu_with_valid_name(hdu_list, valid_extnames)

        energy_lo = Quantity(hdu.data['ENERG_LO'][0], 'TeV')
        energy_hi = Quantity(hdu.data['ENERG_HI'][0], 'TeV')
        theta = Angle(hdu.data['THETA_LO'][0], 'deg')

        # Get sigmas
        shape = (len(theta), len(energy_hi))
        sigmas = []
        for key in ['SIGMA_1', 'SIGMA_2', 'SIGMA_3']:
            sigmas.append(hdu.data[key].reshape(shape))

        # Get amplitudes
        norms = []
        for key in ['SCALE', 'AMPL_2', 'AMPL_3']:
            norms.append(hdu.data[key].reshape(shape))
        try:
            energy_thresh_lo = Quantity(hdu.header['LO_THRES'], 'TeV')
            energy_thresh_hi = Quantity(hdu.header['HI_THRES'], 'TeV')
            return cls(energy_lo, energy_hi, theta, sigmas,
                       norms, energy_thresh_lo, energy_thresh_hi)
        except KeyError:
            log.warning('No safe energy thresholds found. Setting to default')
            return cls(energy_lo, energy_hi, theta, sigmas, norms)
Exemple #6
0
    def get_field_shifts(self, fieldid):
        """Returns the calibration shifts for a given field.

        Parameters
        ----------
        fieldid : str
            Field identifier, e.g. "0001_aug2003"

        Returns
        -------
        shifts : dictionary {'r':shift_r, 'i': shift_i, 'ha': shift_ha}
            Shifts to add to the magnitudes to calibrate a field.
        """
        idx_field = np.argwhere(IPHASQC.field('id') == fieldid)[0]

        shifts = {}
        for band in constants.BANDS:
            cond_run = (self.calib[band]['run']
                        == IPHASQC.field('run_' + band)[idx_field])
            if cond_run.sum() > 0:
                shifts[band] = self.calib[band]['shift'][cond_run][0]
            else:
                log.warning('No shift for %s' % fieldid)
                shifts[band] = 0.0

        log.debug("Shifts for {0}: {1}".format(fieldid, shifts))
        return shifts            
Exemple #7
0
    def _A(self):
        """Returns the matrix called "A" in [Glazebrook 1994, Section 3.3]
        """
        log.info('Glazebrook: creating a sparse {0}x{0} matrix (might take a while)'.format(self.n_nonanchors))
        A = sparse.lil_matrix((self.n_nonanchors,
                               self.n_nonanchors))

        nonanchorruns = self.runs[self.nonanchors]
        # Loop over all non-anchors that make up the matrix
        for i, run in enumerate(nonanchorruns):
            
            try:
                # On the diagonal, the matrix holds the negative sum of weights
                A[i, i] = -float(np.sum(self.overlaps[run]['weights']))

                # Off the diagonal, the matrix holds the weight where two runs overlap
                for run2, weight in zip(self.overlaps[run]['runs'],
                                        self.overlaps[run]['weights']):
                    idx_run2 = np.argwhere(run2 == nonanchorruns)
                    if len(idx_run2) > 0:
                        j = idx_run2[0]  # Index of the overlapping run
                        A[i, j] = weight
                        A[j, i] = weight  # Symmetric matrix
            except KeyError:
                log.warning('Glazebrook: no overlap data for run {0}'.format(run))
                A[i, i] = -1.0
        return A
Exemple #8
0
def test_log_to_list_origin2():

    with log.log_to_list(filter_origin='astropy.wcs') as log_list:
        log.error("Error message")
        log.warning("Warning message")

    assert len(log_list) == 0
Exemple #9
0
    def get_primaryID(self):
        """Identifies and ranks duplicate detections.

        Returns
        -------
        matchdata : tuple (sourceID, matchinfo)

        Notes
        -----
        The ranking criteria are
          1. source w/ best filter coverage wins (3 > 2 > 1); 
             if no winner, then
          2. source w/ smallest 'errBits' error flag wins; 
             if no winner, then
          3. source w/ best seeing wins (if seeing better by >20%);
             if no winner, then
          4. source closest to the optical axis wins.

        This function is optimised for speed at the expense of readability :-(
        """

        # Open the file with the sources crossmatched across fields
        try:
            crossmatch = fits.getdata(self.crossmatch_file, 1, Memmap=True)
        except OSError, e:  # Anticipate a "No such file" error
            log.warning('Failed to open {0}: {1}'.format(self.crossmatch_file,
                                                         e))
            log.warning('Will try again in 5 seconds.')
            time.sleep(5)
            crossmatch = fits.getdata(self.crossmatch_file, 1)
    def add_settled_disks(self, reference_disk, reference_size, eta=0.,
                          sizes=[], dust_files=[]):
        '''
        Automatically create disks with varying degrees of settling

        .. warning:: this function is still experimental, and will be documented once stable
        '''

        exists = False

        for disk in self.disks:
            if disk is reference_disk:
                logger.warning("Reference disk already exists, not re-adding")
                exists = True

        if not exists:
            logger.warning("Reference disk does not exist, adding")
            self.disks.append(reference_disk)

        for i, size in enumerate(sizes):
            disk = deepcopy(reference_disk)
            disk.star = self.star
            disk.h_0 *= (size / reference_size) ** (-eta)
            disk.dust = dust_files[i]
            self.disks.append(disk)
Exemple #11
0
    def from_table(cls, table):
        """Create `PSF3D` from `~astropy.table.Table`.

        Parameters
        ----------
        table : `~astropy.table.Table`
            Table Table-PSF info.
        """
        theta_lo = table['THETA_LO'].squeeze()
        theta_hi = table['THETA_HI'].squeeze()
        offset = (theta_hi + theta_lo) / 2
        offset = Angle(offset, unit=table['THETA_LO'].unit)

        energy_lo = table['ENERG_LO'].squeeze()
        energy_hi = table['ENERG_HI'].squeeze()
        energy_lo = Energy(energy_lo, unit=table['ENERG_LO'].unit)
        energy_hi = Energy(energy_hi, unit=table['ENERG_HI'].unit)

        rad_lo = Quantity(table['RAD_LO'].squeeze(), table['RAD_LO'].unit)
        rad_hi = Quantity(table['RAD_HI'].squeeze(), table['RAD_HI'].unit)

        psf_value = Quantity(table['RPSF'].squeeze(), table['RPSF'].unit)

        try:
            energy_thresh_lo = Quantity(table.meta['LO_THRES'], 'TeV')
            energy_thresh_hi = Quantity(table.meta['HI_THRES'], 'TeV')
            return cls(energy_lo, energy_hi, offset, rad_lo, rad_hi, psf_value, energy_thresh_lo, energy_thresh_hi)
        except KeyError:
            log.warning('No safe energy thresholds found. Setting to default')
            return cls(energy_lo, energy_hi, offset, rad_lo, rad_hi, psf_value)
Exemple #12
0
    def units(self,unt):
        # Setup unit and num unit
        if isinstance(unt,(u.Unit,u.CompositeUnit)):
            self._units = unt.to_string()
            self._num_unit = unt
        elif isinstance(unt,(str)):
            if unt in pint_units.keys():
                self._units = unt
                self._num_unit = pint_units[unt]
            else:
                self._units = unt
                self._num_unit = u.Unit(self._units)
        elif unt is None:
            self._units = unt
            self._num_unit = unt

        else:
            raise ValueError('Units can only take string, astropy units or None')

        # Check if this is the first time set units
        if hasattr(self,'value'):
            wmsg = 'Parameter '+self.name+'units has been reset to '+unt
            log.warning(wmsg)
            try:
                if hasattr(self.value,'unit'):
                    temp = self.value.to(self.num_unit)
            except:
                log.warning('The value unit is not compatable with'\
                                 ' parameter units,right now.')
Exemple #13
0
    def from_fits(cls, hdu):
        """Create `EnergyDependentMultiGaussPSF` from HDU list.

        Parameters
        ----------
        hdu : `~astropy.io.fits.BintableHDU`
            HDU
        """
        energy_lo = Quantity(hdu.data['ENERG_LO'][0], 'TeV')
        energy_hi = Quantity(hdu.data['ENERG_HI'][0], 'TeV')
        theta = Angle(hdu.data['THETA_LO'][0], 'deg')

        # Get sigmas
        shape = (len(theta), len(energy_hi))
        sigmas = []
        for key in ['SIGMA_1', 'SIGMA_2', 'SIGMA_3']:
            sigmas.append(hdu.data[key].reshape(shape))

        # Get amplitudes
        norms = []
        for key in ['SCALE', 'AMPL_2', 'AMPL_3']:
            norms.append(hdu.data[key].reshape(shape))
        try:
            energy_thresh_lo = Quantity(hdu.header['LO_THRES'], 'TeV')
            energy_thresh_hi = Quantity(hdu.header['HI_THRES'], 'TeV')
            return cls(energy_lo, energy_hi, theta, sigmas,
                       norms, energy_thresh_lo, energy_thresh_hi)
        except KeyError:
            log.warning('No safe energy thresholds found. Setting to default')
            return cls(energy_lo, energy_hi, theta, sigmas, norms)
Exemple #14
0
    def earth_location_itrf(self, time=None):
        '''Return Fermi spacecraft location in ITRF coordinates'''

        if self.tt2tdb_mode.lower().startswith('none'):
            log.warning('Using location=None for TT to TDB conversion')
            return None
        elif self.tt2tdb_mode.lower().startswith('geo'):
            log.warning('Using location geocenter for TT to TDB conversion')
            return EarthLocation.from_geocentric(0.0*u.m,0.0*u.m,0.0*u.m)
        elif self.tt2tdb_mode.lower().startswith('spacecraft'):
            # First, interpolate Earth-Centered Inertial (ECI) geocentric
            # location from orbit file.
            # These are inertial coordinates aligned with ICRS, called GCRS
            # <http://docs.astropy.org/en/stable/api/astropy.coordinates.GCRS.html>
            pos_gcrs =  GCRS(CartesianRepresentation(self.X(time.tt.mjd)*u.m,
                                                     self.Y(time.tt.mjd)*u.m,
                                                     self.Z(time.tt.mjd)*u.m),
                             obstime=time)

            # Now transform ECI (GCRS) to ECEF (ITRS)
            # By default, this uses the WGS84 ellipsoid
            pos_ITRS = pos_gcrs.transform_to(ITRS(obstime=time))

            # Return geocentric ITRS coordinates as an EarthLocation object
            return pos_ITRS.earth_location
        else:
            log.error('Unknown tt2tdb_mode %s, using None', self.tt2tdb_mode)
            return None
Exemple #15
0
    def __init__(self, name, FPorbname, tt2tdb_mode = 'pint'):


        if FPorbname.startswith('@'):
            # Read multiple orbit files names
            FPlist = []
            fnames = [ll.strip() for ll in open(FPorbname[1:]).readlines()]
            for fn in fnames:
                FPlist.append(load_FPorbit(fn))
            self.FPorb = vstack(FPlist)
            # Make sure full table is sorted
            self.FPorb.sort('MJD_TT')
        else:
            self.FPorb = load_FPorbit(FPorbname)
        # Now build the interpolator here:
        self.X = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['X'])
        self.Y = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['Y'])
        self.Z = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['Z'])
        self.Vx = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['Vx'])
        self.Vy = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['Vy'])
        self.Vz = InterpolatedUnivariateSpline(self.FPorb['MJD_TT'],self.FPorb['Vz'])
        super(NICERObs, self).__init__(name=name, tt2tdb_mode=tt2tdb_mode)
        # Print this warning once, mainly for @paulray
        if self.tt2tdb_mode.lower().startswith('pint'):
            log.debug('Using location=None for TT to TDB conversion (pint mode)')
        elif self.tt2tdb_mode.lower().startswith('geo'):
            log.warning('Using location geocenter for TT to TDB conversion')
Exemple #16
0
    def __init__(self, pos, vel):

        # make sure position and velocity input are 2D
        pos = atleast_2d(pos, insert_axis=1)
        vel = atleast_2d(vel, insert_axis=1)

        # make sure position and velocity have at least a dimensionless unit!
        if not hasattr(pos, "unit"):
            pos = pos * uno

        if not hasattr(vel, "unit"):
            vel = vel * uno

        if pos.unit == uno and vel.unit != uno:
            logger.warning(
                "Position unit is dimensionless but velocity unit is not." "Are you sure that's what you want?"
            )
        elif vel.unit == uno and pos.unit != uno:
            logger.warning(
                "Velocity unit is dimensionless but position unit is not." "Are you sure that's what you want?"
            )

        # make sure shape is the same
        for i in range(pos.ndim):
            if pos.shape[i] != vel.shape[i]:
                raise ValueError(
                    "Position and velocity must have the same shape " "{} vs {}".format(pos.shape, vel.shape)
                )

        self.pos = pos
        self.vel = vel
Exemple #17
0
def read_lmv(filename):
    """
    Read an LMV cube file

    Specification is primarily in GILDAS image_def.f90
    """
    log.warning("CLASS LMV cube reading is tentatively supported.  "
             "Please post bug reports at the first sign of danger!")

    with open(filename,'rb') as lf:
        # lf for "LMV File"
        filetype = _read_string(lf, 12)
        #!---------------------------------------------------------------------
        #! @ private
        #!       SYCODE system code
        #!       '-'    IEEE
        #!       '.'    EEEI (IBM like)
        #!       '_'    VAX
        #!       IMCODE file code
        #!       '<'    IEEE  64 bits    (Little Endian, 99.9 % of recent computers)
        #!       '>'    EEEI  64 bits    (Big Endian, HPUX, IBM-RISC, and SPARC ...)
        #!---------------------------------------------------------------------
        imcode = filetype[6]
        if filetype[:6] != 'GILDAS' or filetype[7:] != 'IMAGE':
            raise TypeError("File is not a GILDAS Image file")

        if imcode in ('<','>'):
            if imcode =='>':
                log.warning("Swap the endianness first...")
            return read_lmv_type2(lf)
        else:
            return read_lmv_type1(lf)
Exemple #18
0
def _calc_CI(sampler,
             modelidx=0,
             confs=[3, 1],
             last_step=False,
             e_range=None,
             e_npoints=100,
             threads=None):
    """Calculate confidence interval.
    """
    from scipy import stats

    # If we are computing the samples for the confidence intervals, we need at
    # least one sample to constrain the highest confidence band
    # 1 sigma -> 6 samples
    # 2 sigma -> 43 samples
    # 3 sigma -> 740 samples
    # 4 sigma -> 31574 samples
    # 5 sigma -> 3488555 samples
    # We limit it to 1000 samples and warn that it might not be enough
    if e_range:
        maxconf = np.max(confs)
        minsamples = min(100, int(1 / stats.norm.cdf(-maxconf) + 1))
        if minsamples > 1000:
            log.warning(
                'In order to sample the confidence band for {0} sigma,'
                ' {1} new samples need to be computed, but we are limiting'
                ' it to 1000 samples, so the confidence band might not be'
                ' well constrained.'
                ' Consider reducing the maximum'
                ' confidence significance or using the samples stored in'
                ' the sampler by setting e_range'
                ' to None'.format(maxconf, minsamples))
            minsamples = 1000
    else:
        minsamples = None

    modelx, model = _read_or_calc_samples(sampler,
                                          modelidx,
                                          last_step=last_step,
                                          e_range=e_range,
                                          e_npoints=e_npoints,
                                          n_samples=minsamples,
                                          threads=threads)

    nwalkers = len(model) - 1
    CI = []
    for conf in confs:
        fmin = stats.norm.cdf(-conf)
        fmax = stats.norm.cdf(conf)
        ymin, ymax = [], []
        for fr, y in ((fmin, ymin), (fmax, ymax)):
            nf = int((fr * nwalkers))
            for i in six.moves.range(len(modelx)):
                ysort = np.sort(model[:, i])
                y.append(ysort[nf])

        # create an array from lists ymin and ymax preserving units
        CI.append((u.Quantity(ymin), u.Quantity(ymax)))

    return modelx, CI
Exemple #19
0
    def _vertical_profile(self, r, theta):

        self._check_all_set()

        if self.rmax <= self.rmin:
            logger.warning("Ignoring disk, since rmax < rmin")
            return np.zeros(theta.shape)

        # Convert coordinates to cylindrical polars
        z = r * np.cos(theta)
        w = r * np.sin(theta)

        # Find disk scaleheight at each cylindrical radius
        h = self.h_0 * (w / self.r_0) ** self.beta

        # Find disk density at all positions
        rho = (self.r_0 / w) ** (self.beta - self.p) \
            * np.exp(-0.5 * (z / h) ** 2)

        # Geometrical factor
        rho *= (1. - np.sqrt(self.star.radius / w))

        rho *= self.rho_0

        # What about normalization

        return rho
Exemple #20
0
    def midplane_cumulative_density(self, r):
        '''
        Find the cumulative column density as a function of radius.

        The cumulative density is measured outwards from the origin, and in
        the midplane.

        Parameters
        ----------
        r : np.ndarray
            Array of values of the radius up to which to tabulate the
            cumulative density.

        Returns
        -------
        rho : np.ndarray
            Array of values of the cumulative density.
        '''

        self._check_all_set()

        if self.rmax <= self.rmin:
            logger.warning("Ignoring disk, since rmax < rmin")
            return np.zeros(r.shape)

        int1 = integrate_powerlaw(self.rmin, r.clip(self.rmin, self.rmax), self.p - self.beta)
        int1 *= self.r_0 ** (self.beta - self.p)

        int2 = integrate_powerlaw(self.rmin, r.clip(self.rmin, self.rmax), -0.5 + self.p - self.beta)
        int2 *= self.star.radius ** 0.5 * self.r_0 ** (self.beta - self.p)

        return self.rho_0 * (int1 - int2)
Exemple #21
0
 def lvisc(self, value):
     if value is not None:
         validate_scalar('lvisc', value, domain='positive')
         if self._mdot is not None:
             logger.warning("Overriding value of mdot with value derived from lvisc")
             self._mdot = None
     self._lvisc = value
Exemple #22
0
 def __setitem__(self, item, value):
     if isinstance(value, AMRGridView):
         if self.levels == [] and value.levels != []:
             logger.warning("No geometry in target grid - copying from original grid")
             for level in value.levels:
                 level_ref = self.add_level()
                 for grid in level.grids:
                     grid_ref = level_ref.add_grid()
                     grid_ref.nx = grid.nx
                     grid_ref.ny = grid.ny
                     grid_ref.nz = grid.nz
                     grid_ref.xmin, grid_ref.xmax = grid.xmin, grid.xmax
                     grid_ref.ymin, grid_ref.ymax = grid.ymin, grid.ymax
                     grid_ref.zmin, grid_ref.zmax = grid.zmin, grid.zmax
                     grid_ref.quantities = {}
         for ilevel, level_ref in enumerate(self.levels):
             level = value.levels[ilevel]
             for igrid, grid_ref in enumerate(level_ref.grids):
                 grid = level.grids[igrid]
                 grid_ref.quantities[item] = deepcopy(grid.quantities[value.viewed_quantity])
     elif isinstance(value, h5py.ExternalLink):
         filename = value.filename
         base_path = os.path.dirname(value.path)
         array_name = os.path.basename(value.path)
         for ilevel, level_ref in enumerate(self.levels):
             level_path = 'level_%05i' % (ilevel + 1)
             for igrid, grid_ref in enumerate(level_ref.grids):
                 grid_path = 'grid_%05i' % (ilevel + 1)
                 grid_ref.quantities[item] = h5py.ExternalLink(filename, os.path.join(base_path, level_path, grid_path, array_name))
     elif value == []:
         for level in self.levels:
             for grid in level.grids:
                 grid.quantities[item] = []
     else:
         raise ValueError('value should be an empty list or an AMRGridView instance')
Exemple #23
0
    def set_lte(self, optical_properties, mean_opacities):

        # Specify that emissivities are LTE
        self.is_lte = True

        # Get temperatures from mean opacities
        temperature = mean_opacities.temperature
        specific_energy = mean_opacities.specific_energy

        # Set frequency scale
        planck_nu = planck_nu_range(temperature[0], temperature[-1])
        self.nu = nu_common(planck_nu, optical_properties.nu)

        if planck_nu.min() < optical_properties.nu.min():
            logger.warning("Planck function for lowest temperature not completely covered by opacity function")
            self.nu = self.nu[self.nu >= optical_properties.nu.min()]

        if planck_nu.max() > optical_properties.nu.max():
            logger.warning("Planck function for highest temperature not completely covered by opacity function")
            self.nu = self.nu[self.nu <= optical_properties.nu.max()]

        # Compute opacity to absorption
        kappa_nu = interp1d_fast_loglog(optical_properties.nu,
                                        optical_properties.kappa, self.nu)

        # Compute LTE emissivities
        self.var_name = 'specific_energy'
        self.var = specific_energy
        self.jnu = np.zeros((len(self.nu), len(temperature)))

        # Find LTE emissivities
        for it, T in enumerate(temperature):
            self.jnu[:, it] = kappa_nu * B_nu(self.nu, T)
 def rho_0(self, value):
     if value is not None:
         validate_scalar('rho_0', value, domain='positive')
         if self._mdot is not None:
             logger.warning("Overriding value of mdot with value derived from rho_0")
             self._mdot = None
     self._rho_0 = value
 def mdot(self, value):
     if value is not None:
         validate_scalar('mdot', value, domain='positive')
         if self._rho_0 is not None:
             logger.warning("Overriding value of rho_0 with value derived from mdot")
             self._rho_0 = None
     self._mdot = value
Exemple #26
0
    def from_table(cls, table):
        """Create `PSFKing` from `~astropy.table.Table`.

        Parameters
        ----------
        table : `~astropy.table.Table`
            Table King PSF info.
        """
        theta_lo = table['THETA_LO'].squeeze()
        theta_hi = table['THETA_HI'].squeeze()
        offset = (theta_hi + theta_lo) / 2
        offset = Angle(offset, unit=table['THETA_LO'].unit)

        energy_lo = table['ENERG_LO'].squeeze()
        energy_hi = table['ENERG_HI'].squeeze()
        energy_lo = Energy(energy_lo, unit=table['ENERG_LO'].unit)
        energy_hi = Energy(energy_hi, unit=table['ENERG_HI'].unit)

        gamma = Quantity(table['GAMMA'].squeeze(), table['GAMMA'].unit)
        sigma = Quantity(table['SIGMA'].squeeze(), table['SIGMA'].unit)

        try:
            energy_thresh_lo = Quantity(table.meta['LO_THRES'], 'TeV')
            energy_thresh_hi = Quantity(table.meta['HI_THRES'], 'TeV')
            return cls(energy_lo, energy_hi, offset, gamma, sigma, energy_thresh_lo, energy_thresh_hi)
        except KeyError:
            log.warning('No safe energy thresholds found. Setting to default')
            return cls(energy_lo, energy_hi, offset, gamma, sigma)
Exemple #27
0
    def from_fits(cls, hdu, unit=None):
        """Read ENERGIES fits extension (`~gammapy.utils.energy.Energy`).

        Parameters
        ----------
        hdu: `~astropy.io.fits.BinTableHDU`
            ``ENERGIES`` extensions.
        unit : `~astropy.units.UnitBase`, str, None
            Energy unit
        """

        header = hdu.header
        fitsunit = header.get('TUNIT1')

        if fitsunit is None:
            if unit is not None:
                log.warning("No unit found in the FITS header."
                         " Setting it to {0}".format(unit))
                fitsunit = unit
            else:
                raise ValueError("No unit found in the FITS header."
                                 " Please specifiy a unit")

        energy = cls(hdu.data['Energy'], fitsunit)

        return energy.to(unit)
Exemple #28
0
def gaussian_profile(N, phase, fwhm):
    """
    gaussian_profile(N, phase, fwhm):
        Return a gaussian pulse profile with 'N' bins and
        an integrated 'flux' of 1 unit.
            'N' = the number of points in the profile
            'phase' = the pulse phase (0-1)
            'fwhm' = the gaussian pulses full width at half-max
        Note:  The FWHM of a gaussian is approx 2.35482 sigma
    """
    sigma = fwhm / 2.35482
    mean = phase % 1.0
    phsval = np.arange(N, dtype='d') / float(N)
    if (mean < 0.5):
        phsval = np.where(np.greater(phsval, mean+0.5),
                           phsval-1.0, phsval)
    else:
        phsval = np.where(np.less(phsval, mean-0.5),
                           phsval+1.0, phsval)
    try:
        zs = (phsval-mean)/sigma
        okzinds = np.compress(np.fabs(zs)<20.0, np.arange(N))
        okzs = np.take(zs, okzinds)
        retval = np.zeros(N, 'd')
        np.put(retval, okzinds, np.exp(-0.5*(okzs)**2.0)/(sigma*np.sqrt(2*np.pi)))
        return retval
    except OverflowError:
        log.warning("Problem in gaussian prof:  mean = %f  sigma = %f" % \
              (mean, sigma))
        return np.zeros(N, 'd')
Exemple #29
0
def check(header, convention=None, dimensions=[0, 1]):

    ix = dimensions[0] + 1
    iy = dimensions[1] + 1

    # If header does not contain CTYPE keywords, assume that the WCS is
    # missing or incomplete, and replace it with a 1-to-1 pixel mapping
    if 'CTYPE%i' % ix not in header or 'CTYPE%i' % iy not in header:
        log.warning("No WCS information found in header - using pixel coordinates")
        header['CTYPE%i' % ix] = 'PIXEL'
        header['CTYPE%i' % iy] = 'PIXEL'
        header['CRVAL%i' % ix] = 0.
        header['CRVAL%i' % iy] = 0.
        header['CRPIX%i' % ix] = 0.
        header['CRPIX%i' % iy] = 0.
        header['CDELT%i' % ix] = 1.
        header['CDELT%i' % iy] = 1.

    if header['CTYPE%i' % ix][4:] == '-CAR' and header['CTYPE%i' % iy][4:] == '-CAR':

        if header['CTYPE%i' % ix][:4] == 'DEC-' or header['CTYPE%i' % ix][1:4] == 'LAT':
            ilon = iy
            ilat = ix
        elif header['CTYPE%i' % iy][:4] == 'DEC-' or header['CTYPE%i' % iy][1:4] == 'LAT':
            ilon = ix
            ilat = iy
        else:
            ilon = None
            ilat = None

        if ilat is not None and header['CRVAL%i' % ilat] != 0:

            if convention == 'calabretta':
                pass  # we don't need to do anything
            elif convention == 'wells':
                if 'CDELT%i' % ilat not in header:
                    raise Exception("Need CDELT%i to be present for wells convention" % ilat)
                crpix = header['CRPIX%i' % ilat]
                crval = header['CRVAL%i' % ilat]
                cdelt = header['CDELT%i' % ilat]
                crpix = crpix - crval / cdelt
                try:
                    header['CRPIX%i' % ilat] = crpix
                    header['CRVAL%i' % ilat] = 0.
                except:  # older versions of PyFITS
                    header.update('CRPIX%i' % ilat, crpix)
                    header.update('CRVAL%i' % ilon, 0.)

            else:
                raise Exception('''WARNING: projection is Plate Caree (-CAR) and
                CRVALy is not zero. This can be intepreted either according to
                Wells (1981) or Calabretta (2002). The former defines the
                projection as rectilinear regardless of the value of CRVALy,
                whereas the latter defines the projection as rectilinear only when
                CRVALy is zero. You will need to specify the convention to assume
                by setting either convention='wells' or convention='calabretta'
                when initializing the FITSFigure instance. ''')

    return header
Exemple #30
0
    def align_circulation_with_z(self, circulation=None):
        """
        If the input orbit is a tube orbit, this function aligns the circulation
        axis with the z axis and returns a copy.

        Parameters
        ----------
        circulation : array_like (optional)
            Array of bits that specify the axis about which the orbit circulates. If
            not provided, will compute this using
            :meth:`~gary.dynamics.CartesianOrbit.circulation`. See that method for
            more information.

        Returns
        -------
        orb : :class:`~gary.dynamics.CartesianOrbit`
            A copy of the original orbit object with circulation aligned with the z axis.
        """

        if circulation is None:
            circulation = self.circulation()
        circulation = atleast_2d(circulation, insert_axis=1)

        if self.pos.ndim < 3:
            pos = self.pos[...,np.newaxis]
            vel = self.vel[...,np.newaxis]
        else:
            pos = self.pos
            vel = self.vel

        if circulation.shape[0] != self.ndim or circulation.shape[1] != pos.shape[2]:
            raise ValueError("Shape of 'circulation' array should match the shape"
                             " of the position/velocity (minus the time axis).")

        new_pos = pos.copy()
        new_vel = vel.copy()
        for n in range(pos.shape[2]):
            if circulation[2,n] == 1 or np.all(circulation[:,n] == 0):
                # already circulating about z or box orbit
                continue

            if sum(circulation[:,n]) > 1:
                logger.warning("Circulation about multiple axes - are you sure "
                               "the orbit has been integrated for long enough?")

            if circulation[0,n] == 1:
                circ = 0
            elif circulation[1,n] == 1:
                circ = 1
            else:
                raise RuntimeError("Should never get here...")

            new_pos[circ,:,n] = pos[2,:,n]
            new_pos[2,:,n] = pos[circ,:,n]

            new_vel[circ,:,n] = vel[2,:,n]
            new_vel[2,:,n] = vel[circ,:,n]

        return self.__class__(pos=new_pos, vel=new_vel, t=self.t, potential=self.potential)
Exemple #31
0
    def _vertical_profile(self, r, theta):

        self._check_all_set()

        if self.rmax <= self.rmin:
            logger.warning("Ignoring disk, since rmax < rmin")
            return np.zeros(theta.shape)

        # Convert coordinates to cylindrical polars
        z = r * np.cos(theta)
        w = r * np.sin(theta)

        # Find disk scaleheight at each cylindrical radius
        h = self.h_0 * (w / self.r_0)**self.beta

        # Find disk density at all positions
        rho = (self.r_0 / w) ** (self.beta - self.p) \
            * np.exp(-0.5 * (z / h) ** 2)

        rho *= self.rho_0

        # What about normalization

        return rho
Exemple #32
0
 def __init__(self):
     """ Show a warning message if the API key is not in the configuration file. """
     super(AstrometryNetClass, self).__init__()
     if not conf.api_key:
         log.warning(
             "Astrometry.net API key not found in configuration file")
         log.warning(
             "You need to manually edit the configuration file and add it")
         log.warning(
             "You may also register it for this session with AstrometryNet.key = 'XXXXXXXX'"
         )
     self._session_id = None
Exemple #33
0
def _load_kernel_link(ephem, link=None):
    if link == '':
        raise ValueError("Empty string is not a valid URL")
    try:
        # If we found it earlier just pull it from cache
        l = _ephemeris_hits[ephem]
        download_file(l, cache=True)
        coor.solar_system_ephemeris.set(l)
        return True
    except KeyError:
        pass
    except ValueError:
        log.warning("Previously found ephemeris '{}' at '{}' but "
                    "it is now missing from both the cache and the "
                    "internet location".format(ephem, l))
    if link is not None:
        search_list = [link] + ephemeris_mirrors
    else:
        search_list = ephemeris_mirrors
    for l in search_list:
        ephem_link = urljoin(l, "%s.bsp" % ephem)
        if ephem_link in _ephemeris_failures:
            continue
        try:
            coor.solar_system_ephemeris.set(ephem_link)
            _ephemeris_hits[ephem] = ephem_link
            return True
        except (ValueError, IOError) as e:
            log.warning("Did not find '{}' because: {}, will retry".format(
                ephem_link, e))
            # FIXME: detect which errors are worth retrying seconds later
            # with a longer timeout and only retry those
    log.info("Retrying network requests with a longer timeout")
    for l in search_list:
        ephem_link = urljoin(l, "%s.bsp" % ephem)
        if ephem_link in _ephemeris_failures:
            continue
        try:
            log.debug(
                'Re-trying to set astropy ephemeris to {0}'.format(ephem_link))
            download_file(ephem_link, timeout=300, cache=True)
            log.warning("Only able to download '{}' on a second try".format(
                ephem_link))
            coor.solar_system_ephemeris.set(ephem_link)
            _ephemeris_hits[ephem] = ephem_link
            return True
        except (ValueError, IOError) as e:
            # Retry failures are not surprising
            log.info("Retry did not find '{}' because: {}".format(
                ephem_link, e))
            _ephemeris_failures.add(ephem_link)
    return False
Exemple #34
0
    def analyzer_filenames_restored(*args, **kwargs):
        a = analyzer_initter(*args, **kwargs)

        for file_attr, key in zip(
                ['stats_file', 'data_file', 'equal_weighted_file'],
                ['stats.dat', '.txt', 'post_equal_weights.dat']):
            fname = a.__getattribute__(file_attr)
            if not os.path.exists(fname):
                dirname = os.path.dirname(fname)
                prefix = os.path.basename(fname).replace(key, '')
                try:
                    f_lst = os.listdir(os.path.dirname(fname))
                except (FileNotFoundError, OSError) as e:
                    return a # should be handled upstream
                truncated_paths = [os.path.join(dirname, f) for f in f_lst
                        if f.startswith(prefix+_multikeys_shortest[key])]
                if len(truncated_paths) > 1:
                    raise log.warning("Multiple candidates for file truncated"
                                      " by MultiNest. Bad things may happen!")
                elif not len(truncated_paths):
                    return a
                a.__setattr__(file_attr, truncated_paths[0])

        return a
Exemple #35
0
    def add_by_bibcode(self, bibcode, interactive=False, **kwargs):
        if ads is None:
            log.error("This action requires the ADS key to be setup.")
            return

        q = ads.SearchQuery(q="identifier:{}".format(bibcode), fl=FIELDS)
        for article in q:
            # Print useful warnings
            if bibcode != article.bibcode:
                log.warning("Requested {} but ADS API returned {}".format(bibcode, article.bibcode))
            if 'NONARTICLE' in article.property:
                # Note: data products are sometimes tagged as NONARTICLE
                log.warning("{} is not an article.".format(article.bibcode))

            if article in self:
                log.warning("{} is already in the db.".format(article.bibcode))
            else:
                if interactive:
                    self.add_interactively(article)
                else:
                    self.add(article, **kwargs)
Exemple #36
0
def save_run(filename, sampler, compression=True, clobber=False):
    """
    Save the sampler chain, data table, parameter labels, metadata blobs, and
    run information to a hdf5 file.

    The data table and parameter labels stored in the sampler will also be
    saved to the hdf5 file.

    Parameters
    ----------
    filename : str
        Filename for hdf5 file. If the filename extension is not 'h5' or
        'hdf5', the suffix '_chain.h5' will be appended to the filename.

    sampler : `emcee.EnsembleSampler` instance
        Sampler instance for which chain and run information is saved.

    compression : bool, optional
        Whether gzip compression is applied to the dataset on write. Default is
        True.

    clobber : bool, optional
        Whether to overwrite the output filename if it exists.
    """

    if filename.split('.')[-1] not in ['h5', 'hdf5']:
        filename += '_chain.h5'

    if os.path.exists(filename) and not clobber:
        log.warning(
            'Not writing file because file exists and clobber is False')
        return

    f = h5py.File(filename, 'w')
    group = f.create_group('sampler')
    group.create_dataset('chain', data=sampler.chain, compression=compression)
    group.create_dataset('lnprobability',
                         data=sampler.lnprobability,
                         compression=compression)

    # blobs
    blob = sampler.blobs[-1][0]
    for idx, item in enumerate(blob):
        if isinstance(item, u.Quantity):
            # scalar or array quantity
            units = [item.unit.to_string()]
        elif isinstance(item, float):
            units = ['']
        elif isinstance(item, tuple) or isinstance(item, list):
            arearrs = np.all([isinstance(x, np.ndarray) for x in item])
            if arearrs:
                units = []
                for x in item:
                    if isinstance(x, u.Quantity):
                        units.append(x.unit.to_string())
                    else:
                        units.append('')
        else:
            log.warning(
                'blob number {0} has unknown format and cannot be saved '
                'in HDF5 file')
            continue

        # traverse blobs list. This will probably be slow and there should be a
        # better way
        blob = []
        for step in sampler.blobs:
            for walkerblob in step:
                blob.append(walkerblob[idx])
        blob = u.Quantity(blob).value

        blobdataset = group.create_dataset('blob{0}'.format(idx),
                                           data=blob,
                                           compression=compression)
        if len(units) > 1:
            for j, unit in enumerate(units):
                blobdataset.attrs['unit{0}'.format(j)] = unit
        else:
            blobdataset.attrs['unit'] = units[0]

    if hasattr(sampler, 'data'):
        data = group.create_dataset('data',
                                    data=Table(sampler.data).as_array(),
                                    compression=compression)

        for col in sampler.data.colnames:
            f['sampler/data'].attrs[col + 'unit'] = str(sampler.data[col].unit)

        for key in sampler.data.meta:
            val = sampler.data.meta[key]
            try:
                data.attrs[key] = val
            except TypeError:
                try:
                    data.attrs[key] = str(val)
                except:
                    warnings.warn(
                        "Attribute `{0}` of type {1} of the data table"
                        " of the sampler cannot be written to HDF5 files"
                        "- skipping".format(key, type(val)),
                        AstropyUserWarning)

    # add all run info to group attributes
    if hasattr(sampler, 'run_info'):
        for key in sampler.run_info.keys():
            val = sampler.run_info[key]
            try:
                group.attrs[key] = val
            except TypeError:
                group.attrs[key] = str(val)

    # add other sampler info to the attrs
    group.attrs['acceptance_fraction'] = np.mean(sampler.acceptance_fraction)

    # add labels as individual attrs (there might be a better way)
    for i, label in enumerate(sampler.labels):
        group.attrs['label{0}'.format(i)] = label

    f.close()
Exemple #37
0
def save_diagnostic_plots(outname,
                          sampler,
                          modelidxs=None,
                          pdf=False,
                          sed=True,
                          blob_labels=None,
                          last_step=False,
                          dpi=100):
    """
    Generate diagnostic plots.

    - A plot for each of the chain parameters showing walker progression, final
      sample distribution and several statistical measures of this
      distribution: ``outname_chain_parN.png`` (see `naima.plot_chain`).
    - A corner plot of sample density in the two dimensional parameter space of
      all parameter pairs of the run, with the Maximum Likelihood parameter
      vector indicated in blue: ``outname_corner.png`` (see `corner.corner`).
    - A plot for each of the models returned as blobs by the model function.
      The maximum likelihood model is shown, as well as the 1 and 3 sigma
      confidence level contours. The first model will be compared with
      observational data and residuals shown. ``outname_fit_modelN.png`` (see
      `naima.plot_fit` and `naima.plot_blob`).

    Parameters
    ----------
    outname : str
        Name to be used to save diagnostic plot files.

    sampler : `emcee.EnsembleSampler` instance
        Sampler instance from which chains, blobs and data are read.

    modelidxs : iterable of integers, optional
        Model numbers to be plotted. Default: All returned in sampler.blobs

    blob_labels : list of strings, optional
        Label for each of the outputs of the model. They will be used as title
        for the corresponding plot.

    pdf : bool, optional
        Whether to save plots to multipage pdf.
    """

    from .plot import plot_chain, plot_blob, plot_corner
    from matplotlib import pyplot as plt
    # This function should never be interactive
    old_interactive = plt.rcParams['interactive']
    plt.rcParams['interactive'] = False

    if pdf:
        plt.rc('pdf', fonttype=42)
        log.info('Saving diagnostic plots in file '
                 '{0}_plots.pdf'.format(outname))
        from matplotlib.backends.backend_pdf import PdfPages
        outpdf = PdfPages('{0}_plots.pdf'.format(outname))

    # Chains

    for par, label in six.moves.zip(six.moves.range(sampler.chain.shape[-1]),
                                    sampler.labels):
        try:
            log.info('Plotting chain of parameter {0}...'.format(label))
            f = plot_chain(sampler, par, last_step=last_step)
            if pdf:
                f.savefig(outpdf, format='pdf', dpi=dpi)
            else:
                if 'log(' in label or 'log10(' in label:
                    label = label.split('(')[-1].split(')')[0]
                f.savefig('{0}_chain_{1}.png'.format(outname, label), dpi=dpi)
            f.clf()
            plt.close(f)
        except Exception as e:
            log.warning('plot_chain failed for paramter'
                        ' {0} ({1}): {2}'.format(label, par, e))

    # Corner plot

    log.info('Plotting corner plot...')

    f = plot_corner(sampler)
    if f is not None:
        if pdf:
            f.savefig(outpdf, format='pdf', dpi=dpi)
        else:
            f.savefig('{0}_corner.png'.format(outname), dpi=dpi)
        f.clf()
        plt.close(f)

    # Fit

    if modelidxs is None:
        nmodels = len(sampler.blobs[-1][0])
        modelidxs = list(six.moves.range(nmodels))

    if isinstance(sed, bool):
        sed = [sed for idx in modelidxs]

    if blob_labels is None:
        blob_labels = ['Model output {0}'.format(idx) for idx in modelidxs]
    elif len(modelidxs) == 1 and isinstance(blob_labels, str):
        blob_labels = [blob_labels]
    elif len(blob_labels) < len(modelidxs):
        # Add labels
        n = len(blob_labels)
        blob_labels += [
            'Model output {0}'.format(idx) for idx in modelidxs[n:]
        ]

    for modelidx, plot_sed, label in six.moves.zip(modelidxs, sed,
                                                   blob_labels):

        try:
            log.info('Plotting {0}...'.format(label))
            f = plot_blob(sampler,
                          blobidx=modelidx,
                          label=label,
                          sed=plot_sed,
                          n_samples=100,
                          last_step=last_step)
            if pdf:
                f.savefig(outpdf, format='pdf', dpi=dpi)
            else:
                f.savefig('{0}_model{1}.png'.format(outname, modelidx),
                          dpi=dpi)
            f.clf()
            plt.close(f)
        except Exception as e:
            log.warning('plot_blob failed for {0}: {1}'.format(label, e))

    if pdf:
        outpdf.close()

    # set interactive back to original
    plt.rcParams['interactive'] = old_interactive
Exemple #38
0
def save_results_table(outname,
                       sampler,
                       format='ascii.ecsv',
                       convert_log=True,
                       last_step=False,
                       include_blobs=True):
    """
    Save an ASCII table with the results stored in the
    `~emcee.EnsembleSampler`.

    The table contains the median, 16th and 84th percentile confidence region
    (~1sigma) for each parameter.

    Parameters
    ----------
    outname : str
        Root name to be used to save the table. ``_results.dat`` will be
        appended for the output filename.

    sampler : `emcee.EnsembleSampler` instance
        Sampler instance from which chains, blobs and data are read.

    format : str, optional
        Format of the saved table. Must be a format string accepted by
        `astropy.table.Table.write`, see the `astropy unified file read/write
        interface documentation
        <https://astropy.readthedocs.org/en/latest/io/unified.html>`_. Only the
        ``ascii.ecsv`` and ``ascii.ipac`` formats are able to preserve all the
        information stored in the ``run_info`` dictionary of the sampler.
        Defaults to ``ascii.ecsv`` if available (only in astropy > v1.0), else
        ``ascii.ipac``.

    convert_log : bool, optional
        Whether to convert natural or base-10 logarithms into original values
        in addition to saving the logarithm value.

    last_step : bool, optional
        Whether to only use the positions in the final step of the run (True,
        default) or the whole chain (False).

    include_blobs : bool, optional
        Whether to save the distribution properties of the scalar blobs in the
        sampler. Default is True.

    Returns
    -------

    table : `~astropy.table.Table`
        Table with the results.
    """

    if not HAS_PYYAML and format == 'ascii.ecsv':
        format = 'ascii.ipac'
        log.warning("PyYAML package is required for ECSV format,"
                    " falling back to {0}...".format(format))
    elif format not in ['ascii.ecsv', 'ascii.ipac']:
        log.warning('The chosen table format does not have an astropy'
                    ' writer that suppports metadata writing, no run info'
                    ' will be saved to the file!')

    file_extension = 'dat'
    if format == 'ascii.ecsv':
        file_extension = 'ecsv'

    log.info('Saving results table in {0}_results.{1}'.format(
        outname, file_extension))

    labels = sampler.labels

    if last_step:
        dists = sampler.chain[:, -1, :]
    else:
        dists = sampler.flatchain

    quant = [16, 50, 84]
    # Do we need more info on the distributions?
    t = Table(names=['label', 'median', 'unc_lo', 'unc_hi'],
              dtype=['S72', 'f8', 'f8', 'f8'])
    t['label'].description = 'Name of the parameter'
    t['median'].description = 'Median of the posterior distribution function'
    t['unc_lo'].description = (
        'Difference between the median and the'
        ' {0}th percentile of the pdf, ~1sigma lower uncertainty'.format(
            quant[0]))
    t['unc_hi'].description = (
        'Difference between the {0}th percentile'
        ' and the median of the pdf, ~1sigma upper uncertainty'.format(
            quant[2]))

    metadata = {}
    # Start with info from the distributions used for storing the results
    metadata['n_samples'] = dists.shape[0]
    # save ML parameter vector and best/median loglikelihood
    ML, MLp, MLerr, _ = find_ML(sampler, None)
    metadata['ML_pars'] = [float(p) for p in MLp]
    metadata['MaxLogLikelihood'] = float(ML)

    # compute and save BIC
    BIC = len(MLp) * np.log(len(sampler.data)) - 2 * ML
    metadata['BIC'] = BIC

    # And add all info stored in the sampler.run_info dict
    if hasattr(sampler, 'run_info'):
        metadata.update(sampler.run_info)

    for p, label in enumerate(labels):
        dist = dists[:, p]
        xquant = np.percentile(dist, quant)
        quantiles = dict(six.moves.zip(quant, xquant))
        med = quantiles[50]
        lo, hi = med - quantiles[16], quantiles[84] - med

        t.add_row((label, med, lo, hi))

        if convert_log and ('log10(' in label or 'log(' in label):
            nlabel = label.split('(')[-1].split(')')[0]
            ltype = label.split('(')[0]
            if ltype == 'log10':
                new_dist = 10**dist
            elif ltype == 'log':
                new_dist = np.exp(dist)

            quantiles = dict(
                six.moves.zip(quant, np.percentile(new_dist, quant)))
            med = quantiles[50]
            lo, hi = med - quantiles[16], quantiles[84] - med

            t.add_row((nlabel, med, lo, hi))

    if include_blobs:
        nblobs = len(sampler.blobs[-1][0])
        for idx in range(nblobs):
            blob0 = sampler.blobs[-1][0][idx]

            IS_SCALAR = False
            if isinstance(blob0, u.Quantity):
                if blob0.size == 1:
                    IS_SCALAR = True
                    unit = blob0.unit
            elif np.isscalar(blob0):
                IS_SCALAR = True
                unit = None

            if IS_SCALAR:
                if last_step:
                    blobl = [m[idx] for m in sampler.blobs[-1]]
                else:
                    blobl = []
                    for step in sampler.blobs:
                        for walkerblob in step:
                            blobl.append(walkerblob[idx])
                if unit:
                    dist = np.array([b.value for b in blobl])
                    metadata['blob{0}_unit'.format(idx)] = unit.to_string()
                else:
                    dist = np.array(blobl)

                quantiles = dict(
                    six.moves.zip(quant, np.percentile(dist, quant)))
                med = quantiles[50]
                lo, hi = med - quantiles[16], quantiles[84] - med

                t.add_row(('blob{0}'.format(idx), med, lo, hi))

    if format == 'ascii.ipac':
        # Only keywords are written to IPAC tables
        t.meta['keywords'] = {}
        for di in metadata.items():
            t.meta['keywords'][di[0]] = {'value': di[1]}
    else:
        if format == 'ascii.ecsv':
            # there can be no numpy arrays in the metadata (YAML doesn't like
            # them)
            for di in list(metadata.items()):
                if type(di[1]).__module__ == np.__name__:
                    try:
                        # convert arrays
                        metadata[di[0]] = [np.asscalar(a) for a in di[1]]
                    except TypeError:
                        # convert scalars
                        metadata[di[0]] = np.asscalar(di[1])
        # Save it directly in meta for readability in ECSV
        t.meta.update(metadata)

    t.write('{0}_results.{1}'.format(outname, file_extension), format=format)

    return t
Exemple #39
0
 def units(self):
     log.warning("'units' is deprecated; please use 'unit'",
                 DeprecationWarning)
     return self._unit
Exemple #40
0
def fix_header(input_fn, output_path=DESTINATION, add_prov_keywords=True):
    """Takes a VPHAS DR2 PSC catalogue tile and makes it compatible with
    the ESO PHASE 3 data products standard."""
    log.info('Opening {}'.format(input_fn))
    f = fits.open(input_fn)
    if len(f[1].data) == 0:
        log.warning('{} has no rows -- skipping'.format(input_fn))
        return ""
    # Extract glon and glat
    match = re.findall('vphas_l([0-9]+)_b([+-][0-9]+).fits', input_fn)
    l, b = int(match[0][0]), int(match[0][1])
    tilename = 'VPHASDR2_PSC_L{:.0f}_B{:+.0f}'.format(l, b)
    ra, dec = tile_centre(l, b)
    fpra, fpde = tile_footprint(l, b)

    # Set headers
    f[0].header.set('NAXIS', 0, 'No data in the primary extension, just keywords')
    f[0].header.set('ORIGIN', 'ESO-PARANAL', 'European Southern Observatory')
    f[0].header.set('DATE', datetime.now().strftime('%Y-%m-%dT%H:%M:%S'),
                    'UT date when this file was written')
    f[0].header.set('TELESCOP', 'ESO-VST', 'ESO Telescope designation')
    f[0].header.set('INSTRUME', 'OMEGACAM', 'Instrument name')
    f[0].header.set('OBJECT', tilename, 'Survey tile designation')
    f[0].header.set('RA', ra, 'Survey tile centre (J2000.0)')
    f[0].header.set('DEC', dec, 'Survey tile centre (J2000.0)')
    f[0].header.set('PROG_ID', 'MULTI', 'ESO programme identification')
    f[0].header.set('PROGID1', '177.D-3023(B)', 'ESO programme identification')
    f[0].header.set('PROGID2', '177.D-3023(C)', 'ESO programme identification')
    f[0].header.set('PROGID3', '177.D-3023(D)', 'ESO programme identification')
    f[0].header.set('PROGID4', '177.D-3023(E)', 'ESO programme identification')
    f[0].header.set('OBSTECH', 'IMAGE,OFFSET', 'Originating science file')
    f[0].header.set('PRODCATG', 'SCIENCE.CATALOGTILE', 'Data product category')
    f[0].header.set('REFERENC', '2014MNRAS.440.2036D', 'Survey paper reference')
    f[0].header.set('SKYSQDEG', 1.0, 'Sky coverage in units of square degrees')   
    # Set the footprint vectors
    for idx, ra in enumerate(fpra):
        f[0].header.set('FPRA{}'.format(idx+1), fpra[idx],
                        'Footprint (J2000.0)')
        f[0].header.set('FPDE{}'.format(idx+1), fpde[idx],
                        'Footprint (J2000.0)')
    # Remove all previous comments
    del f[0].header['COMMENT']
    # Set the provenance pointers
    if add_prov_keywords:
        prov = []
        for band in ['u', 'g', 'r2', 'ha', 'r', 'i']:
            for prefix in np.unique(f[1].data['detectionID_' + band].astype('|S14')):
                if prefix == '':
                    continue
                fn = 'o' + prefix.decode('ascii').replace('-', '_') + '.fits.fz'
                # As a safety measure, make sure the filename has the expected format
                if re.fullmatch('o[0-9]{8}_[0-9]*.fits.fz', fn) != None:
                    prov.append(fn)
        for idx, fn in enumerate(sorted(prov)):
            f[0].header.set('PROV{}'.format(idx+1), '184/' + fn, 'Originating science file')

    # Now we set the keywords of the first extension that contains the data
    f[1].header.set('EXTNAME', 'PHASE3CATALOG', 'FITS Extension name')
    del f[1].header['DATE-HDU']
    del f[1].header['STILCLAS']
    del f[1].header['STILVERS']
    # The column metadata to register in the headers is stored in a JSON file
    coldef_fn = os.path.join(SURVEYTOOLS_DATA, "vphas-psc-columns.json")
    colmeta = json.load(open(coldef_fn))[0]
    for kw in f[1].header['TTYPE*']:
        colname = f[1].header[kw]
        if colname in colmeta:
            for field in ['tdisp', 'tucd', 'tunit', 'tcomm']:
                if colmeta[colname][field] != '':
                    f[1].header.set(kw.replace('TTYPE', field),
                                    colmeta[colname][field],
                                    '',
                                    after=kw.replace('TTYPE', 'TFORM'))
        # Indicate which columns deserve a database index
        if colname in INDEXES:
            f[1].header.set(kw.replace('TTYPE', 'TINDX'),
                            True,
                            after=kw.replace('TTYPE', 'tcomm'))

    #shift_u = -0.63
    #shift_g = -10.347
    #for idx in range(len(f[1].data)):
    #    f[1].data['u'][idx] += shift_u
    #    f[1].data['g'][idx] += shift_g
    #f[1].data['u'] += shift_u
    #f[1].data['g'] += shift_g

    output_fn = os.path.join(output_path, tilename + '.fits')
    log.info('Writing {}'.format(output_fn))
    f.writeto(output_fn, clobber=True, checksum=True)
    return output_fn
Exemple #41
0
def load_FPorbit(orbit_filename):
    """Load data from an (RXTE or NICER) FPorbit file

    Reads a FPorbit FITS file

    Parameters
    ----------
    orbit_filename : str
        Name of file to load

    Returns
    -------
    astropy Table containing Time, x, y, z, v_x, v_y, v_z data

    """
    # Load orbit FITS file
    hdulist = pyfits.open(orbit_filename)
    # log.info('orb file HDU name is {0}'.format(hdulist[1].name))
    if hdulist[1].name not in ("ORBIT", "XTE_PE"):
        log.error(
            "NICER orb file first extension is {0}. It should be ORBIT".format(
                hdulist[1].name
            )
        )
    FPorbit_hdr = hdulist[1].header
    FPorbit_dat = hdulist[1].data

    log.info("Opened FPorbit FITS file {0}".format(orbit_filename))
    # TIMESYS should be 'TT'

    # TIMEREF should be 'LOCAL', since no delays are applied

    timesys = FPorbit_hdr["TIMESYS"]
    log.debug("FPorbit TIMESYS {0}".format(timesys))
    timeref = FPorbit_hdr["TIMEREF"]
    log.debug("FPorbit TIMEREF {0}".format(timeref))

    mjds_TT = read_fits_event_mjds(hdulist[1])
    mjds_TT = mjds_TT * u.d
    log.debug("FPorbit spacing is {0}".format((mjds_TT[1] - mjds_TT[0]).to(u.s)))
    X = FPorbit_dat.field("X") * u.m
    Y = FPorbit_dat.field("Y") * u.m
    Z = FPorbit_dat.field("Z") * u.m
    Vx = FPorbit_dat.field("Vx") * u.m / u.s
    Vy = FPorbit_dat.field("Vy") * u.m / u.s
    Vz = FPorbit_dat.field("Vz") * u.m / u.s
    log.info(
        "Building FPorbit table covering MJDs {0} to {1}".format(
            mjds_TT.min(), mjds_TT.max()
        )
    )
    FPorbit_table = Table(
        [mjds_TT, X, Y, Z, Vx, Vy, Vz],
        names=("MJD_TT", "X", "Y", "Z", "Vx", "Vy", "Vz"),
        meta={"name": "FPorbit"},
    )
    # Make sure table is sorted by time
    log.debug("Sorting FPorbit table")
    FPorbit_table.sort("MJD_TT")
    # Now delete any bad entries where the positions are 0.0
    idx = np.where(
        np.logical_and(FPorbit_table["X"] != 0.0, FPorbit_table["Y"] != 0.0)
    )[0]
    if len(idx) != len(FPorbit_table):
        log.warning(
            "Dropping {0} zero entries from FPorbit table".format(
                len(FPorbit_table) - len(idx)
            )
        )
        FPorbit_table = FPorbit_table[idx]
    return FPorbit_table
Exemple #42
0
 def add_url_to_check(self, url):
     """Adds a url to the crawling queue."""
     if url in self.visited:
         log.warning('already visited {0}'.format(url))
     else:
         self.urlqueue.append(url)
Exemple #43
0
def _read_data_fitszilla(lchdulist):
    """Open a fitszilla FITS file and read all relevant information."""
    is_new_fitszilla = np.any(['coord' in i.name.lower() for i in lchdulist])

    # ----------- Extract generic observation information ------------------
    headerdict = dict(lchdulist[0].header.items())
    source = lchdulist[0].header['SOURCE']
    site = lchdulist[0].header['ANTENNA'].lower()
    receiver = lchdulist[0].header['RECEIVER CODE']

    ra = lchdulist[0].header['RIGHTASCENSION'] * u.rad
    dec = lchdulist[0].header['DECLINATION'] * u.rad
    ra_offset = dec_offset = az_offset = el_offset = 0 * u.rad
    if 'RightAscension Offset' in lchdulist[0].header:
        ra_offset = \
            lchdulist[0].header['RightAscension Offset'] * u.rad
    if 'Declination Offset' in lchdulist[0].header:
        dec_offset = lchdulist[0].header['Declination Offset'] * u.rad
    if 'Azimuth Offset' in lchdulist[0].header:
        az_offset = lchdulist[0].header['Azimuth Offset'] * u.rad
    if 'Elevation Offset' in lchdulist[0].header:
        el_offset = lchdulist[0].header['Elevation Offset'] * u.rad

    # ----------- Read the list of channel ids ------------------
    section_table_data = lchdulist['SECTION TABLE'].data
    chan_ids = get_value_with_units(section_table_data, 'id')
    nbin_per_chan = get_value_with_units(section_table_data, 'bins')
    sample_rate = get_value_with_units(section_table_data, 'sampleRate')
    try:
        bw_section = \
            get_value_with_units(section_table_data, 'bandWidth')
        fr_section = \
            get_value_with_units(section_table_data, 'frequency')
    except KeyError:
        bw_section = None
        fr_section = None
    integration_time = lchdulist['SECTION TABLE'].header['Integration'] * u.ms
    if len(list(set(nbin_per_chan))) > 1:
        raise ValueError('Only datasets with the same nbin per channel are '
                         'supported at the moment')
    nbin_per_chan = list(set(nbin_per_chan))[0]
    types = get_value_with_units(section_table_data, 'type')
    if 'stokes' in types:
        is_polarized = True
    else:
        is_polarized = False

    # Check. If backend is not specified, use Total Power
    try:
        backend = lchdulist[0].header['BACKEND NAME']
    except Exception:
        if 'stokes' in types:
            if nbin_per_chan == 2048:
                backend = 'XARCOS'
            else:
                backend = 'SARDARA'
        elif 'spectra' in types:
            backend = 'SARDARA'
        else:
            backend = 'TP'

    # ----------- Read the list of RF inputs, feeds, polarization, etc. --
    rf_input_data = lchdulist['RF INPUTS'].data
    feeds = get_value_with_units(rf_input_data, 'feed')
    IFs = get_value_with_units(rf_input_data, 'ifChain')
    polarizations = get_value_with_units(rf_input_data, 'polarization')
    sections = get_value_with_units(rf_input_data, 'section')
    frequencies_rf = get_value_with_units(rf_input_data, 'frequency')
    bandwidths_rf = get_value_with_units(rf_input_data, 'bandWidth')
    local_oscillator = get_value_with_units(rf_input_data, 'localOscillator')
    try:
        cal_mark_temp = get_value_with_units(rf_input_data, 'calibrationMark')
    except KeyError:
        # Old, stupid typo
        cal_mark_temp = get_value_with_units(rf_input_data, 'calibratonMark')

    if bw_section is not None:
        bandwidths_section = [bw_section[i] for i in sections]
        frequencies_section = [fr_section[i] for i in sections]
        frequencies_section = [f + l for (f, l) in zip(frequencies_section,
                                                       local_oscillator)]

    if backend == 'TP' or bw_section is None:
        frequencies, bandwidths = frequencies_rf, bandwidths_rf
    else:
        frequencies, bandwidths = frequencies_section, bandwidths_section

    combinations = list(zip(frequencies, bandwidths))
    combination_idx = np.arange(len(combinations))

    # Solve stupid problem with old CCB data
    if receiver.lower() == 'ccb':
        feeds[:] = 0

    if len(set(combinations)) > 1:
        chan_names = [_chan_name(f, p, c)
                      for f, p, c in zip(feeds, polarizations,
                                         combination_idx)]
    else:
        chan_names = [_chan_name(f, p)
                      for f, p in zip(feeds, polarizations)]

    # ----- Read the offsets of different feeds (nonzero only if multifeed)--
    feed_input_data = lchdulist['FEED TABLE'].data
    # Add management of historical offsets.
    # Note that we need to add the units by hand in this case.
    xoffsets = get_value_with_units(feed_input_data, 'xOffset', default='rad')
    yoffsets = get_value_with_units(feed_input_data, 'yOffset', default='rad')

    relpowers = get_value_with_units(feed_input_data, 'relativePower')

    # -------------- Read data!-----------------------------------------
    datahdu = lchdulist['DATA TABLE']
    # N.B.: there is an increase in memory usage here. This is just because
    # data are being read from the file at this point, not before.
    data_table_data = Table(datahdu.data)
    tempdata = Table(lchdulist['ANTENNA TEMP TABLE'].data)

    for col in data_table_data.colnames:
        if col == col.lower():
            continue
        data_table_data.rename_column(col, col.lower())
    for col in tempdata.colnames:
        if col == col.lower():
            continue
        tempdata.rename_column(col, col.lower())

    is_old_spectrum = 'SPECTRUM' in list(datahdu.header.values())
    if is_old_spectrum:
        data_table_data.rename_column('spectrum', 'ch0')
        sections = np.array([0, 0])

    existing_columns = [chn for chn in data_table_data.colnames
                        if chn.startswith('ch')]
    if existing_columns == []:
        raise ValueError('Invalid data')

    is_spectrum = nbin_per_chan > 1

    is_single_channel = len(set(combinations)) == 1

    good = np.ones(len(feeds), dtype=bool)

    for i, s in enumerate(sections):
        section_name = 'ch{}'.format(s)
        if section_name not in existing_columns:
            good[i] = False
    allfeeds = feeds
    feeds = allfeeds[good]
    IFs = IFs[good]
    polarizations = polarizations[good]
    sections = sections[good]

    if is_spectrum:
        nchan = len(chan_ids)

        sample_channel = existing_columns[0]

        _, nbins = data_table_data[sample_channel].shape

        # Development version of SARDARA -- will it remain the same?
        if nbin_per_chan == nbins:
            IFs = np.zeros_like(IFs)

        if nbin_per_chan * nchan * 2 == nbins \
                and not is_polarized:
            warnings.warn('Data appear to contain polarization information '
                          'but are classified as simple, not stokes, in the '
                          'Section table.')
            is_polarized = True

        if nbin_per_chan != nbins and nbin_per_chan * nchan != nbins and \
                nbin_per_chan * nchan * 2 != nbins and not is_polarized:
            raise ValueError('Something wrong with channel subdivision: '
                             '{} bins/channel, {} channels, '
                             '{} total bins'.format(nbin_per_chan, nchan,
                                                    nbins))

        for f, ic, p, s in zip(feeds, IFs, polarizations, sections):
            c = s
            if is_single_channel:
                c = None
            section_name = 'ch{}'.format(s)
            ch = _chan_name(f, p, c)
            start, end = ic * nbin_per_chan, (ic + 1) * nbin_per_chan
            data_table_data[ch] = \
                data_table_data[section_name][:, start:end]

        if is_polarized:
            # for f, ic, p, s in zip(feeds, IFs, polarizations, sections):
            for s in list(set(sections)):
                f = feeds[sections == s][0]
                c = s
                if is_single_channel:
                    c = None

                section_name = 'ch{}'.format(s)
                qname, uname = _chan_name(f, 'Q', c), _chan_name(f, 'U', c)
                qstart, qend = 2 * nbin_per_chan, 3 * nbin_per_chan
                ustart, uend = 3 * nbin_per_chan, 4 * nbin_per_chan
                data_table_data[qname] = \
                    data_table_data[section_name][:, qstart:qend]
                data_table_data[uname] = \
                    data_table_data[section_name][:, ustart:uend]

                chan_names += [qname, uname]

        for f, ic, p, s in zip(feeds, IFs, polarizations, sections):
            section_name = 'ch{}'.format(s)
            if section_name in data_table_data.colnames:
                data_table_data.remove_column(section_name)
    else:
        for ic, ch in enumerate(chan_names):
            data_table_data[ch] = \
                data_table_data['ch{}'.format(chan_ids[ic])]

    # ----------- Read temperature data, if possible ----------------
    try:
        for ic, ch in enumerate(chan_names):
            td = np.asarray(tempdata['ch{}'.format(chan_ids[ic])])
            Ntemp = td.size
            Ndata = data_table_data['time'].size

            temp_func = interp1d(np.arange(Ntemp), td)

            data_table_data[ch + '-Temp'] = \
                temp_func(np.arange(Ndata) * (Ntemp / Ndata))

    except Exception as e:
        warnings.warn("Could not read temperature information from file. "
                      "This is usually a minor problem.\n"
                 "Exception: {}".format(str(e)))
        for ic, ch in enumerate(chan_names):
            data_table_data[ch + '-Temp'] = \
                np.zeros_like(data_table_data['time'])

    info_to_retrieve = \
        ['time', 'derot_angle', 'weather', 'par_angle', 'flag_track',
         'flag_cal'] + \
            [ch + '-Temp' for ch in chan_names]

    new_table = Table()

    new_table.meta.update(headerdict)
    new_table.meta['SOURCE'] = source
    new_table.meta['site'] = site
    new_table.meta['backend'] = backend
    new_table.meta['receiver'] = receiver
    new_table.meta['RA'] = ra
    new_table.meta['Dec'] = dec
    new_table.meta['channels'] = nbin_per_chan
    new_table.meta['VLSR'] = new_table.meta['VLSR'] * u.Unit("km/s")

    for i, off in zip("ra,dec,el,az".split(','),
                      [ra_offset, dec_offset, el_offset, az_offset]):
        new_table.meta[i + "_offset"] = off

    for info in info_to_retrieve:
        new_table[info] = data_table_data[info]

    if not _check_derotator(new_table['derot_angle']):
        log.warning('Derotator angle looks weird. Setting to 0')
        new_table['derot_angle'][:] = 0

    # Duplicate raj and decj columns (in order to be corrected later)
    Nfeeds = np.max(allfeeds) + 1
    new_table['ra'] = \
        np.tile(data_table_data['raj2000'],
                (Nfeeds, 1)).transpose()
    new_table['dec'] = \
        np.tile(data_table_data['decj2000'],
                (Nfeeds, 1)).transpose()
    new_table['el'] = \
        np.tile(data_table_data['el'],
                (Nfeeds, 1)).transpose()
    new_table['az'] = \
        np.tile(data_table_data['az'],
                (Nfeeds, 1)).transpose()

    new_table.meta['is_skydip'] = \
        infer_skydip_from_elevation(data_table_data['el'],
                                    data_table_data['az'])

    for info in ['ra', 'dec', 'az', 'el', 'derot_angle']:
        new_table[info].unit = u.radian

    if not is_new_fitszilla:
        update_table_with_offsets(new_table, xoffsets, yoffsets, inplace=True)
    else:
        for i in range(len(xoffsets)):
            try:
                ext = lchdulist['Coord{}'.format(i)]
                extdata = ext.data
                ra, dec = extdata['raj2000'], extdata['decj2000']
                el, az = extdata['el'], extdata['az']
            except KeyError:
                ra, dec = new_table['ra'][:, 0], new_table['dec'][:, 0]
                el, az = new_table['el'][:, 0], new_table['az'][:, 0]

            new_table['ra'][:, i] = ra
            new_table['dec'][:, i] = dec
            new_table['el'][:, i] = el
            new_table['az'][:, i] = az

    lchdulist.close()

    # So ugly. But it works
    filtered_frequencies = [f for (f, g) in zip(frequencies, good) if g]

    for i, fr in enumerate(filtered_frequencies):
        f = feeds[i]
        s = sections[i]
        ic = IFs[i]
        p = polarizations[i]
        b = bandwidths[i]
        lo = local_oscillator[i]
        cal = cal_mark_temp[i]

        c = s
        if is_single_channel:
            c = None
        chan_name = _chan_name(f, p, c)
        if bandwidths[ic] < 0:
            frequencies[ic] -= bandwidths[ic]
            bandwidths[ic] *= -1
            for i in range(
                    data_table_data[chan_name].shape[0]):
                data_table_data[chan_name][f, :] = \
                    data_table_data[chan_name][f, ::-1]

        new_table[chan_name] = \
            data_table_data[chan_name] * relpowers[feeds[ic]]

        new_table[chan_name + '-filt'] = \
            np.ones(len(data_table_data[chan_name]), dtype=bool)
        data_table_data.remove_column(chan_name)

        newmeta = \
            {'polarization': polarizations[ic],
             'feed': int(f),
             'IF': int(ic),
             'frequency': fr.to("MHz"),
             'bandwidth': b.to("MHz"),
             'sample_rate': sample_rate[s],
             'sample_time': (1 / (sample_rate[s].to(u.Hz))).to('s'),
             'local_oscillator': lo.to("MHz"),
             'cal_mark_temp': cal.to("K"),
             'integration_time': integration_time.to('s'),
             'xoffset': xoffsets[f].to(u.rad),
             'yoffset': yoffsets[f].to(u.rad),
             'relpower': float(relpowers[f])
             }
        new_table[chan_name].meta.update(headerdict)
        new_table[chan_name].meta.update(new_table.meta)
        new_table[chan_name].meta.update(newmeta)

    if is_polarized:
        for s in list(set(sections)):
            feed = feeds[sections == s][0]
            c = s
            if is_single_channel:
                c = None
            for stokes_par in 'QU':
                chan_name = _chan_name(feed, stokes_par, c)
                try:
                    new_table[chan_name] = \
                        data_table_data[chan_name]
                except KeyError:
                    continue
                sample_time = (1 / (sample_rate[s].to(u.Hz)))

                newmeta = \
                    {'polarization': stokes_par,
                     'feed': int(feed),
                     'IF': -1,
                     # There are two IFs for each section
                     'frequency': frequencies[2 * s].to("MHz"),
                     'bandwidth': bandwidths[2 * s].to("MHz"),
                     'sample_rate': sample_rate[s],
                     'sample_time': sample_time.to('s'),
                     'local_oscillator': local_oscillator[2 * s].to("MHz"),
                     'cal_mark_temp': cal_mark_temp[2 * s].to("K"),
                     'integration_time': integration_time.to('s'),
                     'xoffset': xoffsets[feed].to(u.rad),
                     'yoffset': yoffsets[feed].to(u.rad),
                     'relpower': 1.
                     }
                new_table[chan_name].meta.update(headerdict)
                new_table[chan_name].meta.update(new_table.meta)
                new_table[chan_name].meta.update(newmeta)

                new_table[chan_name + '-filt'] = \
                    np.ones(len(data_table_data[chan_name]), dtype=bool)
                data_table_data.remove_column(chan_name)

    return new_table
Exemple #44
0
def overscan_estimate(ccd_in,
                      meta=None,
                      master_bias=None,
                      binsize=None,
                      min_width=1,
                      max_width=8,
                      box_size=100,
                      min_hist_val=10,
                      show=False,
                      *args,
                      **kwargs):
    """Estimate overscan in ADU in the absense of a formal overscan region

    For biases, returns in the median of the image.  For all others,
    uses the minimum of: (1) the first peak in the histogram of the
    image or (2) the minimum of the median of four boxes at the
    corners of the image (specific to the IoIO coronagraph)

    Works best if bias shape (particularly bias ramp) is subtracted
    first.  Will subtract bias if bias is supplied and has not been
    subtracted.

    Parameters
    ----------
    ccd_in : `~astropy.nddata.CCDData` or filename
        Image from which to extract overscan estimate

    meta : `astropy.io.fits.header` or None
        referece to metadata of ccd into which to write OVERSCAN_* cards.
        If None, no metadata will be returned

    master_bias : `~astropy.nddata.CCDData`, filename, or None
        Bias to subtract from ccd before estimate is calculated.
        Improves accruacy by removing bias ramp.  Bias can be in units
        of ADU or electrons and is converted using the specified gain.
        If bias has already been subtracted, this step will be skipped
        but the bias header will be used to extract readnoise and gain
        using the *_key keywords.  Default is ``None``.

    binsize: float or None, optional
        The binsize to use for the histogram.  If None, binsize is 
        (readnoise in ADU)/4.  Default = None

    min_width : int, optional
        Minimum width peak to search for in histogram.  Keep in mind
        histogram bins are binsize ADU wide.  Default = 1

    max_width : int, optional
        See min_width.  Default = 8

    box_size : int
        Edge size of square box used to extract biweight median location
        from the corners of the image for this method of  overscan
        estimation.  Default = 100

    show : boolean
       Show image with min/max set to highlight overscan pixels and
       histogram with overscan chopped  histogram.  Default is False [consider making this boolean or name of plot file]

    """
    if ccd_in.meta.get('overscan_value') is not None:
        # Overscan has been subtracted in a previous reduction step,
        # so exit quietly
        return 0

    # Work with a copy since we mess with both the ccd.data and .meta
    ccd = ccd_in.copy()
    # Get CCD characteristics
    ccd.meta = sx694.metadata(ccd.meta)
    if meta is None:
        meta = ccd.meta
    if ccd.unit != u.adu:
        # For now don't get fancy with unit conversion
        raise ValueError('CCD units must be in ADU for overscan estimation')
    if ccd.meta['IMAGETYP'] == "BIAS":
        overscan = np.median(ccd)
        meta['HIERARCH OVERSCAN_MEDIAN'] = (overscan, 'ADU')
        meta['HIERARCH OVERSCAN_METHOD'] = \
            ('median', 'Method used for overscan estimation')
        return overscan

    # Prepare for histogram method of overscan estimation.  These
    # keywords are guaranteed to be in meta because we put there there
    # in ccd_metadata
    readnoise = ccd.meta['RDNOISE']
    gain = ccd.meta['GAIN']
    if ccd.meta.get('subtract_bias') is None and master_bias is not None:
        # Bias has not been subtracted and we have a bias around to be
        # able to do that subtraction
        if isinstance(master_bias, str):
            bias = CorData.read(master_bias)
            meta['HIERARCH OVERSCAN_MASTER_BIAS'] = 'OSBIAS'
            meta['OSBIAS'] = master_bias
        else:
            # Work with a copy since we are going to muck with it
            bias = master_bias.copy()
            meta['HIERARCH OVERSCAN_MASTER_BIAS'] = 'CCDData object provided'
        # Improve our readnoise (measured) and gain (probably not
        # re-measured) values
        readnoise = bias.meta['RDNOISE']
        gain = bias.meta['GAIN']
        if bias.unit is u.electron:
            # Convert bias back to ADU for subtraction
            bias = bias.divide(gain * u.electron / u.adu)
        ccd = ccd.subtract(bias)
        ccd.meta['HIERARCH subtract_bias'] = True
    if type(ccd) != CorData and ccd.meta.get('subtract_bias') is None:
        # Don't gunk up logs when we are taking data, but subclasses
        # of CorObs (e.g. RedCorObs) will produce message
        log.warning(
            'overscan_estimate: bias has not been subtracted, which can lead to inaccuracy of overscan estimate'
        )
    # The coronagraph creates a margin of un-illuminated pixels on the
    # CCD.  These are great for estimating the bias and scattered
    # light for spontanous subtraction.
    # Corners method
    s = ccd.shape
    bs = box_size
    c00 = biweight_location(ccd[0:bs, 0:bs])
    c10 = biweight_location(ccd[s[0] - bs:s[0], 0:bs])
    c01 = biweight_location(ccd[0:bs, s[1] - bs:s[1]])
    c11 = biweight_location(ccd[s[0] - bs:s[0], s[1] - bs:s[1]])
    corners_method = min(c00, c10, c01, c11)
    # Histogram method.  The first peak is the bias, the second is the
    # ND filter.  Note that the 1.25" filters do a better job at this
    # than the 2" filters but with carefully chosen parameters, the
    # first small peak can be spotted.
    if binsize is None:
        # Calculate binsize based on readnoise in ADU, but oversample
        # by 4.  Note need to convert from Quantity to float
        binsize = readnoise / gain / 4.
    im_hist, im_hist_centers = hist_of_im(ccd, binsize)
    # Note that after bias subtraction, there is sometimes some noise
    # at low counts.  We expect a lot of pixels in the histogram, so filter
    good_idx = np.flatnonzero(im_hist > min_hist_val)
    im_hist = im_hist[good_idx]
    im_hist_centers = im_hist_centers[good_idx]
    # The arguments to linspace are the critical parameters I played
    # with together with binsize to get the first small peak to be recognized
    im_peak_idx = signal.find_peaks_cwt(im_hist,
                                        np.linspace(min_width, max_width))
    hist_method = im_hist_centers[im_peak_idx[0]]
    overscan_methods = ['corners', 'histogram']
    overscan_values = np.asarray((corners_method, hist_method))
    meta['HIERARCH OVERSCAN_CORNERS'] = (corners_method, 'ADU')
    meta['HIERARCH OVERSCAN_HISTOGRAM'] = (hist_method, 'ADU')
    o_idx = np.argmin(overscan_values)
    overscan = overscan_values[o_idx]
    meta['HIERARCH OVERSCAN_METHOD'] = (overscan_methods[o_idx],
                                        'Method used for overscan estimation')
    if show:
        fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8.5, 9))
        ccds = ccd.subtract(1000 * u.adu)
        range = 5 * readnoise / gain
        vmin = overscan - range - 1000
        vmax = overscan + range - 1000
        ax1.imshow(ccds,
                   origin='lower',
                   cmap=plt.cm.gray,
                   filternorm=0,
                   interpolation='none',
                   vmin=vmin,
                   vmax=vmax)
        ax1.set_title('Image minus 1000 ADU')
        ax2.plot(im_hist_centers, im_hist)
        ax2.set_yscale("log")
        ax2.set_xscale("log")
        ax2.axvline(overscan, color='r')
        # https://stackoverflow.com/questions/13413112/creating-labels-where-line-appears-in-matplotlib-figure
        # the x coords of this transformation are data, and the
        # y coord are axes
        trans = transforms.blended_transform_factory(ax2.transData,
                                                     ax2.transAxes)
        ax2.set_title('Histogram')
        ax2.text(overscan + 20,
                 0.05,
                 overscan_methods[o_idx] +
                 ' overscan = {:.2f}'.format(overscan),
                 rotation=90,
                 transform=trans,
                 verticalalignment='bottom')
        plt.show()
    return overscan
Exemple #45
0
def keyword_arithmetic_image_handler(meta,
                                     operand1,
                                     operation,
                                     operand2,
                                     keylist=None,
                                     stdev_threshold=10):
    """Convert an image to a scalar for FITS keyword arithmetic"""

    # This is hard to do in general just looking at the data in
    # operand2 except the special case where stdev data = 0 (a value
    # turned into an array).  Start in the cases we know the answer
    if hasattr(operand2, 'meta'):
        imagetyp = operand2.meta.get('imagetyp')
        if imagetyp is None:
            # Trigger general code below
            o2 = None
        else:
            imagetyp = imagetyp.lower()
            if imagetyp in ['bias', 'dark']:
                # For the coronagraph reduction scheme, biases and
                # darks are small perterbations around zero.  Other
                # projects may incorporate offset (overscan) into the
                # bias
                o2 = 0
            elif imagetyp == 'flat':
                # Check for unprocessed flat
                if operand2.meta.get('flatdiv') is None:
                    # med is not the best value, since the flats roll
                    # off significantly, but this is the wrong place
                    # to calculate it correctly.  Generally, flats are
                    # divided by a scalar first, their max val, and
                    # then the FLATDIV keyword is written, so this
                    # code should not be called
                    o2 = np.median(operand2)
                    log.warning(
                        'Arithmetic with unprocessed flat, keyword arithmetic will be off unless the median of the flat is close to the desired characteristic value'
                    )
                else:
                    # Processed flats are normalized to 1
                    o2 = 1
            else:
                # Trigger general code below
                o2 = None
    else:
        # No metadata to help us
        o2 = None

    if o2 is None:
        # If we made it here, we need to use a general approach to
        # recognize structure.  This fails for biases and darks, hence
        # the need for the code above
        med = np.median(operand2)
        stdev = np.std(operand2)
        if med == 0:
            o2 = 0
        elif stdev_threshold * stdev < np.abs(med):
            # The most common case here is likely to be stdev = 0, or
            # numbers that have been made into arrays to make use of
            # the NDData uncertainty propagation mixin code
            o2 = med
        else:
            o2 = None

    return o2
Exemple #46
0
def get_sampler(
    data_table=None,
    p0=None,
    model=None,
    prior=None,
    nwalkers=500,
    nburn=100,
    guess=True,
    interactive=False,
    prefit=False,
    labels=None,
    threads=4,
    data_sed=None,
):
    """Generate a new MCMC sampler.

    Parameters
    ----------
    data_table : `~astropy.table.Table` or list of `~astropy.table.Table`
        Table containing the observed spectrum. If multiple tables are passed
        as a string, they will be concatenated in the order given. Each table
        needs at least these columns, with the appropriate associated units
        (with the physical type indicated in brackets below) as either a
        `~astropy.units.Unit` instance or parseable string:

        - ``energy``: Observed photon energy [``energy``]
        - ``flux``: Observed fluxes [``flux`` or ``differential flux``]
        - ``flux_error``: 68% CL gaussian uncertainty of the flux [``flux`` or
          ``differential flux``]. It can also be provided as ``flux_error_lo``
          and ``flux_error_hi`` (see below).

        Optional columns:

        - ``energy_width``: Width of the energy bin [``energy``], or
        - ``energy_error``: Half-width of the energy bin [``energy``], or
        - ``energy_error_lo`` and ``energy_error_hi``: Distance from bin center
          to lower and upper bin edges [``energy``], or
        - ``energy_lo`` and ``energy_hi``: Energy edges of the corresponding
          energy bin [``energy``]
        - ``flux_error_lo`` and ``flux_error_hi``: 68% CL gaussian lower and
          upper uncertainties of the flux.
        - ``ul``: Flag to indicate that a flux measurement is an upper limit.
        - ``flux_ul``: Upper limit to the flux. If not present, the ``flux``
          column will be taken as an upper limit for those measurements with
          the ``ul`` flag set to True or 1.

        The ``keywords`` metadata field of the table can be used to provide the
        confidence level of the upper limits with the keyword ``cl``, which
        defaults to 90%. The `astropy.io.ascii` reader can recover all
        the needed information from ASCII tables in the
        :class:`~astropy.io.ascii.Ipac` and :class:`~astropy.io.ascii.Daophot`
        formats, and everything except the ``cl`` keyword from tables in the
        :class:`~astropy.io.ascii.Sextractor`.  For the latter format, the cl
        keyword can be added after reading the table with::

            data.meta['keywords']['cl']=0.99

    p0 : array
        Initial position vector. The distribution for the ``nwalkers`` walkers
        will be computed as a multidimensional gaussian of width 5% around the
        initial position vector ``p0``.
    model : function
        A function that takes a vector in the parameter space and the data
        dictionary, and returns the expected fluxes at the energies in the
        spectrum. Additional return objects will be saved as blobs in the
        sampler chain, see `the emcee documentation for the
        format
        <http://dan.iel.fm/emcee/current/user/advanced/#arbitrary-metadata-blobs>`_.
    prior : function, optional
        A function that takes a vector in the parameter space and returns the
        log-likelihood of the Bayesian prior. Parameter limits can be specified
        through a uniform prior, returning 0. if the vector is within the
        parameter bounds and ``-np.inf`` otherwise.
    nwalkers : int, optional
        The number of Goodman & Weare “walkers”. Default is 500.
    nburn : int, optional
        Number of burn-in steps. After ``nburn`` steps, the sampler is reset
        and chain history discarded. It is necessary to settle the sampler into
        the maximum of the parameter space density. Default is 100.
    labels : iterable of strings, optional
        Labels for the parameters included in the position vector ``p0``. If
        not provided ``['par1','par2', ... ,'parN']`` will be used.
    threads : int, optional
        Number of threads to use for sampling. Default is 4.
    guess : bool, optional
        Whether to attempt to guess the normalization (first) parameter of the
        model. Default is True.
    interactive : bool, optional
        Whether to launch the interactive fitting window to set the initial
        values for the prefitting or the MCMC run. Requires matplotlib. Default
        is False.
    prefit : bool, optional
        Whether to attempt to find the maximum likelihood parameters with a
        Nelder-Mead algorithm and use them as starting point of the MCMC run.
        The parameter values in `p0` will be used as starting points for the
        minimization. Note that the initial optimization is done without taking
        the prior function into account to avoid the possibility of infinite
        values in the objective function. If the best-fit parameter vector
        without prior is forbidden by the prior given, it will be discarded.
    data_sed : bool, optional
        When providing more than one data table, whether to convert them to SED
        format. If unset or None, all tables will be converted to the format of
        the first table.

    Returns
    -------
    sampler : :class:`~emcee.EnsembleSampler` instance
        Ensemble sampler with walker positions after ``nburn`` burn-in steps.
    pos : :class:`~numpy.ndarray`
        Final position vector array.

    See also
    --------
    emcee.EnsembleSampler
    """
    import emcee

    if data_table is None:
        raise TypeError("Data table is missing!")
    else:
        data = validate_data_table(data_table, sed=data_sed)

    if model is None:
        raise TypeError("Model function is missing!")

    # Add parameter labels if not provided or too short
    if labels is None:
        # First is normalization
        labels = ["norm"] + ["par{0}".format(i) for i in range(1, len(p0))]
    elif len(labels) < len(p0):
        labels += ["par{0}".format(i) for i in range(len(labels), len(p0))]

    # Check that the model returns fluxes in same physical type as data
    modelout = model(p0, data)
    if (type(modelout) == tuple
            or type(modelout) == list) and (type(modelout) != np.ndarray):
        spec = modelout[0]
    else:
        spec = modelout

    # check whether both can be converted to same physical type through
    # sed_conversion
    try:
        # If both can be converted to differential flux, they can be compared
        # Otherwise, sed_conversion will raise a u.UnitsError
        sed_conversion(data["energy"], spec.unit, False)
        sed_conversion(data["energy"], data["flux"].unit, False)
    except u.UnitsError:
        raise u.UnitsError(
            "The physical type of the model and data units are not compatible,"
            " please modify your model or data so they match:\n"
            " Model units: {0} [{1}]\n Data units: {2} [{3}]\n".format(
                spec.unit,
                spec.unit.physical_type,
                data["flux"].unit,
                data["flux"].unit.physical_type,
            ))

    if guess:
        normNames = ["norm", "Norm", "ampl", "Ampl", "We", "Wp"]
        normNameslog = ["log({0}".format(name) for name in normNames]
        normNameslog10 = ["log10({0}".format(name) for name in normNames]
        normNames += normNameslog + normNameslog10
        idxs = []
        for l in normNames:
            for l2 in labels:
                if l2.startswith(l):
                    # check with startswith to include normalization,
                    # amplitude, etc.
                    idxs.append(labels.index(l2))

        if len(idxs) == 1:

            nunit, sedf = sed_conversion(data["energy"], spec.unit, False)
            currFlux = np.trapz(data["energy"] * (spec * sedf).to(nunit),
                                data["energy"])
            nunit, sedf = sed_conversion(data["energy"], data["flux"].unit,
                                         False)
            dataFlux = np.trapz(
                data["energy"] * (data["flux"] * sedf).to(nunit),
                data["energy"],
            )
            ratio = dataFlux / currFlux
            if labels[idxs[0]].startswith("log("):
                p0[idxs[0]] += np.log(ratio)
            elif labels[idxs[0]].startswith("log10("):
                p0[idxs[0]] += np.log10(ratio)
            else:
                p0[idxs[0]] *= ratio

        elif len(idxs) == 0:
            log.warning("No label starting with [{0}] found: not applying"
                        " normalization guess.".format(",".join(normNames)))
        elif len(idxs) > 1:
            log.warning("More than one label starting with [{0}] found:"
                        " not applying normalization guess.".format(
                            ",".join(normNames)))

    P0_IS_ML = False
    if interactive:
        try:
            log.info("Launching interactive model fitter, close when finished")
            from .model_fitter import InteractiveModelFitter
            import matplotlib.pyplot as plt

            iprev = plt.rcParams["interactive"]
            plt.rcParams["interactive"] = False
            imf = InteractiveModelFitter(model,
                                         p0,
                                         data,
                                         labels=labels,
                                         sed=True)
            p0 = imf.pars
            P0_IS_ML = imf.P0_IS_ML
            plt.rcParams["interactive"] = iprev
        except ImportError as e:
            log.warning("Interactive fitting is not available because"
                        " matplotlib is not installed: {0}".format(e))

    # If we already did the prefit call in ModelWidget (and didn't modify the
    # parameters afterwards), avoid doing it here
    if prefit and not P0_IS_ML:
        p0, P0_IS_ML = _prefit(p0, data, model, prior)

    sampler = emcee.EnsembleSampler(nwalkers,
                                    len(p0),
                                    lnprob,
                                    args=[data, model, prior],
                                    threads=threads)

    # Add data and parameters properties to sampler
    sampler.data_table = data_table
    sampler.data = data
    sampler.labels = labels
    # Add model function to sampler
    sampler.modelfn = model
    # Add run_info dict
    sampler.run_info = {
        "n_walkers": nwalkers,
        "n_burn": nburn,
        # convert from np.float to regular float
        "p0": [float(p) for p in p0],
        "guess": guess,
    }

    # Initialize walkers in a ball of relative size 0.5% in all dimensions if
    # the parameters have been fit to their ML values, or to 10% otherwise
    spread = 0.005 if P0_IS_ML else 0.1
    p0var = np.array([spread * pp for pp in p0])
    p0 = emcee.utils.sample_ball(p0, p0var, nwalkers)

    if nburn > 0:
        print("Burning in the {0} walkers with {1} steps...".format(
            nwalkers, nburn))
        sampler, pos = _run_mcmc(sampler, p0, nburn)
    else:
        pos = p0

    sampler.run_info["p0_burn_median"] = [
        float(p) for p in np.median(pos, axis=0)
    ]

    return sampler, pos
Exemple #47
0
def load_nustar_orbit(orb_filename):
    """Load data from a NuSTAR orbit file

    Parameters
    ----------
    orb_filename : str
        Name of file to load

    Returns
    -------
    astropy.table.Table
        containing Time, x, y, z, v_x, v_y, v_z data

    """
    # Load photon times from FT1 file

    if "_orb" in orb_filename:
        log.warning(
            "The NuSTAR orbit file you are providing is known to give"
            "a solution precise only to the ~0.5ms level. Use the "
            "pipeline-produced attitude-orbit file ('*.attorb.gz') for"
            "better precision."
        )

    hdulist = pyfits.open(orb_filename)
    orb_hdr = hdulist[1].header
    orb_dat = hdulist[1].data

    log.info("Opened orb FITS file {0}".format(orb_filename))
    # TIMESYS should be 'TT'
    # TIMEREF should be 'LOCAL', since no delays are applied
    timesys = orb_hdr["TIMESYS"]
    log.info("orb TIMESYS {0}".format(timesys))
    try:
        timeref = orb_hdr["TIMEREF"]
    except KeyError:
        timeref = "LOCAL"

    log.info("orb TIMEREF {0}".format(timeref))

    # The X, Y, Z position are for the START time
    mjds_TT = read_fits_event_mjds(hdulist[1])
    mjds_TT = mjds_TT * u.d
    # SC_POS is in meters in X,Y,Z Earth-centered Inertial (ECI) coordinates
    SC_POS = orb_dat.field("POSITION")
    X = SC_POS[:, 0] * u.km
    Y = SC_POS[:, 1] * u.km
    Z = SC_POS[:, 2] * u.km
    SC_VEL = orb_dat.field("VELOCITY")
    Vx = SC_VEL[:, 0] * u.km / u.s
    Vy = SC_VEL[:, 1] * u.km / u.s
    Vz = SC_VEL[:, 2] * u.km / u.s

    log.info(
        "Building orb table covering MJDs {0} to {1}".format(
            mjds_TT.min(), mjds_TT.max()
        )
    )
    orb_table = Table(
        [mjds_TT, X, Y, Z, Vx, Vy, Vz],
        names=("MJD_TT", "X", "Y", "Z", "Vx", "Vy", "Vz"),
        meta={"name": "orb"},
    )
    return orb_table
Exemple #48
0
def lightcurve_through_image(lightcurve,
                             exposure,
                             frame=np.array([30, 30]),
                             final_resolution=None,
                             duet=None,
                             gal_type=None,
                             gal_params=None,
                             debug=False,
                             debugfilename='lightcurve',
                             silent=False,
                             zodi='low',
                             ignore_low_check=False):
    """Transform a theoretical light curve into a flux measurement.

    1. Take the values of a light curve, optionally rebin it to a new time
    resolution.
    2. Then, create an image with a point source corresponding to each flux
    measurement, and calculate the flux from the image with ``daophot``.
    3. Return the ''realistic'' light curve

    Parameters
    ----------
    lightcurve : ``astropy.table.Table``
        The lightcurve has to contain the columns 'time', 'fluence_D1', and
        'fluence_D2'. Photon fluxes are in counts/s.
    exposure : ``astropy.units.Quantity``
        Exposure time used for the light curve

    Other parameters
    ----------------
    frame : [N, M]
        Number of pixel along x and y axis
    final_resolution : ``astropy.units.Quantity``, default None
        Rebin the light curve to this time resolution before creating the light
         curve. Must be > exposure
    duet : ``astroduet.config.Telescope``
        If None, a default one is created
    gal_type : string
        Default galaxy string ("spiral"/"elliptical") or "custom" w/ Sersic parameters
        in gal_params
    gal_params : dict
        Dictionary of parameters for Sersic model (see sim_galaxy)
    debug : bool
        If True, save the light curves to file
    debugfilename : str
        File to save the light curves to
    silent : bool
        Suppress progress bars
    zodi : string
        Either 'low', 'med', or 'high'

    Returns
    -------
    lightcurve : ``astropy.table.Table``
        A light curve, rebinned to ``final_resolution``, and with four new
        columns: 'fluence_D1_fit', 'fluence_D1_fiterr', 'fluence_D2_fit',
        and 'fluence_D2_fiterr', containing the flux measurements from the
        intermediate images and their errorbars.
    """
    from astropy.table import Table
    lightcurve = copy.deepcopy(lightcurve)
    if silent:
        tqdm = lambda x: x
    else:
        tqdm = imported_tqdm

    with suppress_stdout():
        if duet is None:
            duet = Telescope()

    with suppress_stdout():
        if zodi == 'low':
            [bgd_band1, bgd_band2] = background_pixel_rate(duet,
                                                           low_zodi=True,
                                                           diag=True)
        elif zodi == 'med':
            [bgd_band1, bgd_band2] = background_pixel_rate(duet,
                                                           med_zodi=True,
                                                           diag=True)
        elif zodi == 'high':
            [bgd_band1, bgd_band2] = background_pixel_rate(duet,
                                                           high_zodi=True,
                                                           diag=True)

    # Directory for debugging purposes
    if debugfilename != 'lightcurve':
        debugdir = os.path.join('debug_imgs', debugfilename)
    else:
        rand = np.random.randint(0, 99999999)
        debugdir = f'debug_imgs_{rand}'

    if debug:
        mkdir_p(debugdir)

    good = (lightcurve['fluence_D1'] > 0) & (lightcurve['fluence_D2'] > 0)
    if not ignore_low_check:
        if not np.any(good):
            log.warning("Light curve has no points with fluence > 0")
            return
        lightcurve = lightcurve[good]

    lightcurve = \
        construct_images_from_lightcurve(
            lightcurve, exposure, duet=duet, gal_type=gal_type,
            gal_params=gal_params, frame=frame, debug=debug,
            debugfilename=os.path.join(debugdir, debugfilename+'.hdf5'),
            zodi=zodi, silent=silent)

    total_image_rate1 = np.sum(lightcurve['imgs_D1'], axis=0)
    total_image_rate2 = np.sum(lightcurve['imgs_D2'], axis=0)

    total_image_rate = total_image_rate1 + total_image_rate2

    total_image_rate_bkgsub1 = np.sum(lightcurve['imgs_D1_bkgsub'], axis=0)
    total_image_rate_bkgsub2 = np.sum(lightcurve['imgs_D2_bkgsub'], axis=0)

    total_image_rate_bkgsub = \
        total_image_rate_bkgsub1 + total_image_rate_bkgsub2

    total_images_rate_list = [total_image_rate1, total_image_rate2]

    psf_fwhm_pix = duet.psf_fwhm / duet.pixel

    log.info('Constructing reference images')
    # Make reference images (5 exposures)
    if final_resolution is not None:
        nexp = int(np.rint(5 * final_resolution / exposure))
    else:
        nexp = 5

    ref_image1 = construct_image(frame,
                                 exposure,
                                 duet=duet,
                                 band=duet.bandpass1,
                                 gal_type=gal_type,
                                 gal_params=gal_params,
                                 sky_rate=bgd_band1,
                                 n_exp=nexp)
    ref_image_rate1 = ref_image1 / (exposure * nexp)
    ref_bkg1, ref_bkg_rms_median1 = estimate_background(ref_image_rate1,
                                                        method='1D',
                                                        sigma=2)
    ref_rate_bkgsub1 = ref_image_rate1 - ref_bkg1

    ref_image2 = construct_image(frame,
                                 exposure,
                                 duet=duet,
                                 band=duet.bandpass2,
                                 gal_type=gal_type,
                                 gal_params=gal_params,
                                 sky_rate=bgd_band2,
                                 n_exp=nexp)
    ref_image_rate2 = ref_image2 / (exposure * nexp)
    ref_bkg2, ref_bkg_rms_median2 = estimate_background(ref_image_rate2,
                                                        method='1D',
                                                        sigma=2)
    ref_rate_bkgsub2 = ref_image_rate2 - ref_bkg2

    # psf_array = duet.psf_model(x_size=5,y_size=5).array

    total_ref_img_rate = ref_image_rate1 + ref_image_rate2
    total_ref_img_rate_bkgsub = \
        ref_rate_bkgsub1 + ref_rate_bkgsub2

    log.info('Finding source in integrated diff image')
    diff_total_image = \
        calculate_diff_image(total_image_rate, total_image_rate_bkgsub,
                             total_ref_img_rate,
                             total_ref_img_rate_bkgsub,
                             duet)

    with suppress_stdout():
        star_tbl, bkg_image, threshold = find(diff_total_image,
                                              psf_fwhm_pix.value,
                                              method='daophot',
                                              background='1D',
                                              frame='diff')
    if len(star_tbl) < 1:
        log.warning("No good detections in this field")
        return

    if debug:
        outfile = os.path.join(debugdir, f'images_total.p')
        with open(outfile, 'wb') as fobj:
            pickle.dump(
                {
                    'imgD1': total_images_rate_list[0],
                    'imgD2': total_images_rate_list[1]
                }, fobj)
        outfile = os.path.join(debugdir, f'images_ref.p')
        with open(outfile, 'wb') as fobj:
            pickle.dump({
                'imgD1': ref_image_rate1,
                'imgD2': ref_image_rate2
            }, fobj)

        outfile = os.path.join(debugdir, f'images_diff.p')
        with open(outfile, 'wb') as fobj:
            pickle.dump({'imgD1': diff_total_image}, fobj)

    star_tbl.sort('flux')
    star_tbl = star_tbl[-1:]['x', 'y']

    # decide light curve bins before image generation, for speed.
    lightcurve['nbin'] = 1
    if final_resolution is not None:
        lightcurve = rebin_lightcurve(lightcurve,
                                      exposure,
                                      final_resolution,
                                      debug=debug)

    for duet_no in [1, 2]:
        for suffix in ['', 'err']:
            colname = f'fluence_D{duet_no}_fit{suffix}'
            lightcurve[colname] = 0.
            lightcurve[colname].unit = u.ph / (u.cm**2 * u.s)
            colname = f'ABmag_D{duet_no}_fit{suffix}'
            lightcurve[colname] = 0.

    lightcurve['snr_D1'] = 0.
    lightcurve['snr_D2'] = 0.
    # Generate light curve
    log.info('Measuring fluxes and creating light curve')
    for i, row in enumerate(tqdm(lightcurve)):
        time = row['time']

        image_rate1 = lightcurve['imgs_D1'][i]
        image_rate_bkgsub1 = lightcurve['imgs_D1_bkgsub'][i]

        diff_image1 = calculate_diff_image(image_rate1,
                                           image_rate_bkgsub1,
                                           ref_image_rate1,
                                           ref_rate_bkgsub1,
                                           duet=duet)

        with suppress_stdout():
            result1, _ = run_daophot(diff_image1,
                                     threshold,
                                     star_tbl,
                                     niters=1,
                                     snr_lim=0.,
                                     duet=duet)

        fl1_fit = result1['flux_fit'][0] * image_rate1.unit
        fl1_fite = result1['flux_unc'][0] * image_rate1.unit
        lightcurve['fluence_D1_fit'][i] = duet.rate_to_fluence(fl1_fit)
        lightcurve['fluence_D1_fiterr'][i] = duet.rate_to_fluence(fl1_fite)
        if (fl1_fit > 0) & (fl1_fite > 0):
            lightcurve['snr_D1'][i] = fl1_fit / fl1_fite
            lightcurve['ABmag_D1_fit'][i] = \
                duet_fluence_to_abmag(lightcurve['fluence_D1_fit'][i], 1,
                                      duet=duet).value
            ABerr = 2.5 * np.log(1 + 1 / lightcurve['snr_D1'][i])
            lightcurve['ABmag_D1_fiterr'][i] = ABerr

        image_rate2 = lightcurve['imgs_D2'][i]
        image_rate_bkgsub2 = lightcurve['imgs_D2_bkgsub'][i]

        diff_image2 = calculate_diff_image(image_rate2,
                                           image_rate_bkgsub2,
                                           ref_image_rate2,
                                           ref_rate_bkgsub2,
                                           duet=duet)

        with suppress_stdout():
            result2, _ = run_daophot(diff_image2,
                                     threshold,
                                     star_tbl,
                                     niters=1,
                                     snr_lim=0.,
                                     duet=duet)
        fl2_fit = result2['flux_fit'][0] * image_rate2.unit
        fl2_fite = result2['flux_unc'][0] * image_rate2.unit

        lightcurve['fluence_D2_fit'][i] = duet.rate_to_fluence(fl2_fit)
        lightcurve['fluence_D2_fiterr'][i] = duet.rate_to_fluence(fl2_fite)
        if (fl2_fit > 0) & (fl2_fite > 0):
            lightcurve['snr_D2'][i] = fl2_fit / fl2_fite
            lightcurve['ABmag_D2_fit'][i] = \
                duet_fluence_to_abmag(lightcurve['fluence_D2_fit'][i], 2,
                                      duet=duet).value
            ABerr = 2.5 * np.log(1 + 1 / lightcurve['snr_D2'][i])
            lightcurve['ABmag_D2_fiterr'][i] = ABerr

    return lightcurve
Exemple #49
0
    def read_parfile(self, file):
        """Read values from the specified parfile into the model parameters.

        Parameters
        ----------
        file : str or list or file-like
            The parfile to read from. May be specified as a filename,
            a list of lines, or a readable file-like object.

        """
        repeat_param = defaultdict(int)
        param_map = self.get_params_mapping()
        comps = self.components.copy()
        comps['timing_model'] = self
        wants_tcb = None
        stray_lines = []
        for li in interesting_lines(lines_of(file), comments=("#", "C ")):
            k = li.split()
            name = k[0].upper()

            if name == 'UNITS':
                if name in repeat_param:
                    raise ValueError("UNITS is repeated in par file")
                else:
                    repeat_param[name] += 1
                if len(k) > 1 and k[1] == 'TDB':
                    wants_tcb = False
                else:
                    wants_tcb = li
                continue

            if name == 'EPHVER':
                if len(k) > 1 and k[1] != '2' and wants_tcb is None:
                    wants_tcb = li
                log.warning("EPHVER %s does nothing in PINT" % k[1])
                #actually people expect EPHVER 5 to work
                #even though it's supposed to imply TCB which doesn't
                continue

            repeat_param[name] += 1
            if repeat_param[name] > 1:
                k[0] = k[0] + str(repeat_param[name])
                li = ' '.join(k)

            used = []
            for p, c in param_map.items():
                if getattr(comps[c], p).from_parfile_line(li):
                    used.append((c, p))
            if len(used) > 1:
                log.warning("More than one component made use of par file "
                            "line {!r}: {}".format(li, used))
            if used:
                continue

            if name in ignore_params:
                log.debug("Ignoring parfile line '%s'" % (li, ))
                continue

            try:
                prefix, f, v = utils.split_prefixed_name(name)
                if prefix in ignore_prefix:
                    log.debug("Ignoring prefix parfile line '%s'" % (li, ))
                    continue
            except utils.PrefixError:
                pass

            stray_lines.append(li)

        if wants_tcb:
            raise ValueError(
                "Only UNITS TDB supported by PINT but parfile has {}".format(
                    wants_tcb))
        if stray_lines:
            for l in stray_lines:
                log.warning("Unrecognized parfile line {!r}".format(l))
            for name, param in getattr(self, "discarded_components", []):
                log.warning("Model component {} was rejected because we "
                            "didn't find parameter {}".format(name, param))
            log.warning("Final object: {}".format(self))

        # The "setup" functions contain tests for required parameters or
        # combinations of parameters, etc, that can only be done
        # after the entire parfile is read
        self.setup()
Exemple #50
0
def make_comparison_image(filename1, filename2, title1='bsens', title2='cleanest', writediff=False, allow_reproj=False, nticks=12,
                          asinh_scaling_factor=10, scalebarlength=15):
    #fh_pre = fits.open()
    #fh_post = fits.open()
    cube_pre = SpectralCube.read(filename1, format='fits' if 'fits' in filename1 else 'casa_image').with_spectral_unit(u.GHz)
    cube_post = SpectralCube.read(filename2, format='fits' if 'fits' in filename2 else 'casa_image').with_spectral_unit(u.GHz)

    if 'pbcor' in filename1:
        assert 'pbcor' in filename2
    if 'pbcor' in filename2:
        assert 'pbcor' in filename1

    if allow_reproj:
        if cube_pre.shape != cube_post.shape or (cube_post.wcs != cube_pre.wcs and cube_post.wcs.wcs != cube_pre.wcs.wcs):
            cube_post = cube_post.reproject(cube_pre.header)


    cube_pre = cube_pre.with_mask(cube_pre != 0*cube_pre.unit)
    cube_post = cube_post.with_mask(cube_post != 0*cube_post.unit)
    slices = cube_pre.subcube_slices_from_mask(cube_pre.mask & cube_post.mask,
                                               spatial_only=True)[1:]

    # make the cubes match the data; needed for later WCS cutouts
    cube_pre = cube_pre[:, slices[0], slices[1]]
    cube_post = cube_post[:, slices[0], slices[1]]

    #cube_pre = cube_pre.minimal_subcube()
    #cube_post = cube_post.minimal_subcube()
    data_pre = cube_pre[0].value * 1e3
    data_post = cube_post[0].value * 1e3

    #data_pre[np.abs(data_pre) < 1e-7] = np.nan
    #data_post[np.abs(data_post) < 1e-7] = np.nan

    try:
        diff = (data_post - data_pre)
    except Exception as ex:
        print(filename1, filename2, cube_pre.shape, cube_post.shape)
        raise ex

    ww = cube_post.wcs
    pixscale = wcs.utils.proj_plane_pixel_area(ww)*u.deg**2
    try:
        beam = cube_post.beam
        ppbeam = (beam.sr / pixscale).decompose()
        assert ppbeam.unit.is_equivalent(u.dimensionless_unscaled)
        ppbeam = ppbeam.value
    except NoBeamError:
        beam = np.nan*u.sr
        ppbeam = np.nan

    if writediff:
        fits.PrimaryHDU(data=diff,
                        header=cube_post.header).writeto(filename2.split(".fits")[0]
                                                         + ".preselfcal-diff.fits",
                                                         overwrite=True)
    fig = pl.figure(1, figsize=(14,6))
    fig.clf()

    if fig.get_figheight() != 6:
        fig.set_figheight(6)
    if fig.get_figwidth() != 14:
        fig.set_figwidth(14)

    data_pre_display = np.arcsinh(data_pre*asinh_scaling_factor)
    data_post_display = np.arcsinh(data_post*asinh_scaling_factor)
    diff_display = np.arcsinh(diff*asinh_scaling_factor)

    minv = np.nanpercentile(data_pre_display, 0.05)
    maxv = np.nanpercentile(data_pre_display, 99.5)
    if maxv > np.arcsinh(1000):
        maxv = np.arcsinh(1000)
    if np.abs(minv) > maxv:
        minv = -maxv

    norm = visualization.simple_norm(data=diff_display.squeeze(), stretch='linear',
                                     #min_percent=0.05, max_percent=99.995,)
                                     min_cut=minv, max_cut=maxv)

    #cm = pl.matplotlib.cm.gray
    #cm.set_bad('white', 0)
    cm = pl.matplotlib.cm.viridis

    ax1 = pl.subplot(1,3,1)
    ax2 = pl.subplot(1,3,2)
    ax3 = pl.subplot(1,3,3)
    for ax in (ax1,ax2,ax3):
        ax.cla()

    ax1.imshow(data_pre_display, norm=norm, origin='lower', interpolation='nearest', cmap=cm)
    ax1.set_title(title1)

    # scalebar
    ww = cube_pre.wcs.celestial
    cd = (ww.pixel_scale_matrix[1,1] * 3600)
    blc = np.array(diff.shape)*0.1
    ax1.add_patch(matplotlib.patches.Rectangle([blc[1]*0.8, blc[0]*0.9],
                                               width=scalebarlength/cd*1.4,
                                               height=blc[0]*0.6,
                                               edgecolor='k', facecolor='w',
                                               alpha=0.5))
    ax1.plot([blc[1], blc[1]+scalebarlength/cd], [blc[0], blc[0]], color='k')
    tx = ax1.annotate(f'{scalebarlength}"', (blc[1]+scalebarlength/2/cd, blc[0]*1.1))
    tx.set_horizontalalignment('center')

    ax2.imshow(data_post_display, norm=norm, origin='lower', interpolation='nearest', cmap=cm)
    ax2.set_title(title2)

    im = ax3.imshow(diff_display.squeeze(), norm=norm, origin='lower', interpolation='nearest', cmap=cm)
    ax3.set_title(f"{title2} - {title1}")

    for ax in (ax1,ax2,ax3):
        ax.set_xticks([])
        ax.set_yticks([])

    pl.subplots_adjust(wspace=0.0)

    cbax = fig.add_axes([0.91,0.18,0.03,0.64])
    cb = fig.colorbar(cax=cbax, mappable=im)
    cb.set_label("S$_\\nu$ [mJy/beam]")
    mn,mx = cb.get_ticks().min(), cb.get_ticks().max()
    ticklocs = np.concatenate([np.linspace(-norm.vmax, 0, nticks//2)[:-1], np.linspace(0, norm.vmax, nticks//2)])
    ticks = np.sinh(ticklocs)
    cb.update_normal(im)
    cb.set_ticks(ticks)
    ticklocs = cb.get_ticks()
    ticklabels = [f"{np.sinh(x/asinh_scaling_factor):0.2f}" for x in ticklocs]
    cb.set_ticklabels(ticklabels)

    meta = parse_fn(filename1)

    reg = get_noise_region(meta['region'], meta['band'])

    if reg is not None:
        reglist = regions.read_ds9(reg)
        composite_region = reduce(operator.or_, reglist)

        if hasattr(composite_region, 'to_mask'):
            msk = composite_region.to_mask()
        else:
            preg = composite_region.to_pixel(cube_pre.wcs.celestial)
            msk = preg.to_mask()

        cutout_pixels_pre = msk.cutout(data_pre, fill_value=np.nan)[msk.data.astype('bool')]

        mad_sample_pre = mad_std(cutout_pixels_pre, ignore_nan=True)
        std_sample_pre = np.nanstd(cutout_pixels_pre)

        if hasattr(composite_region, 'to_mask'):
            msk = composite_region.to_mask()
        else:
            preg = composite_region.to_pixel(cube_post.wcs.celestial)
            msk = preg.to_mask()
        cutout_pixels_post = msk.cutout(data_post, fill_value=np.nan)[msk.data.astype('bool')]

        mad_sample_post = mad_std(cutout_pixels_post, ignore_nan=True)
        std_sample_post = np.nanstd(cutout_pixels_post)


        if np.any(np.isnan(mad_sample_pre)):
            log.warning("mad_sample_pre contains some NaN values")
        if np.any(np.isnan(mad_sample_post)):
            log.warning("mad_sample_post contains some NaN values")

        if len(cutout_pixels_post) != len(cutout_pixels_pre):
            log.warning(f"cutout pixels are different size in pre vs post ({filename1} : {filename2})")
        if (cube_pre.wcs.celestial != cube_post.wcs.celestial) and (cube_pre.wcs.celestial.wcs != cube_post.wcs.celestial.wcs):
            # wcs comparisons stopped working sometime in 2019-2020 - wcs.wcs comparisons appear to work?
            log.warning(f"post and pre have different celestial WCSes ({filename1} : {filename2})")


        if not np.isfinite(mad_sample_pre):
            raise ValueError


    mad_pre = mad_std(data_pre, ignore_nan=True)
    mad_post = mad_std(data_post, ignore_nan=True)

    mad_diff = mad_std(diff, ignore_nan=True)
    diffmask = np.abs(diff) > 3*mad_diff

    history = cube_post.header['HISTORY']
    hasamp = any("'calmode': 'ap'" in x for x in history) or any("'calmode': 'a'" in x for x in history)

    diffstats = {'mean': np.nanmean(diff),
                 'max': np.nanmax(diff),
                 'shape': diff.shape[0],
                 'ppbeam': ppbeam,
                 'sum': np.nansum(diff),
                 'masksum': diff[diffmask].sum(),
                 'min': np.nanmin(diff),
                 'median': np.nanmedian(diff),
                 'mad': mad_diff,
                 'dr_pre': np.nanmax(data_pre) / mad_std(data_pre, ignore_nan=True),
                 'dr_post': np.nanmax(data_post) / mad_std(data_post, ignore_nan=True),
                 'min_pre': np.nanmin(data_pre),
                 'min_post': np.nanmin(data_post),
                 'max_pre': np.nanmax(data_pre),
                 'max_post': np.nanmax(data_post),
                 'sum_pre': np.nansum(data_pre),
                 'sum_post': np.nansum(data_post),
                 'masksum_pre': (data_pre[data_pre > mad_pre*3]).sum(),
                 'masksum_post': (data_post[data_post > mad_post*3]).sum(),
                 'mad_pre': mad_pre,
                 'mad_post':  mad_post,
                 'mad_sample_pre': np.nan,
                 'mad_sample_post': np.nan,
                 'std_sample_pre': np.nan,
                 'std_sample_post': np.nan,
                 'has_amp': hasamp,
                }
    if reg is not None:
        diffstats.update({
            'mad_sample_pre': mad_sample_pre,
            'mad_sample_post': mad_sample_post,
            'std_sample_pre': std_sample_pre,
            'std_sample_post': std_sample_post,
        })

    return ax1, ax2, ax3, fig, diffstats
Exemple #51
0
    def _login(self, username=None, store_password=False,
               reenter_password=False):
        """
        Login to the NRAO archive

        Parameters
        ----------
        username : str, optional
            Username to the NRAO archive. If not given, it should be specified
            in the config file.
        store_password : bool, optional
            Stores the password securely in your keyring. Default is False.
        reenter_password : bool, optional
            Asks for the password even if it is already stored in the
            keyring. This is the way to overwrite an already stored passwork
            on the keyring. Default is False.
        """

        # Developer notes:
        # Login via https://my.nrao.edu/cas/login
        # # this can be added to auto-redirect back to the query tool:
        # ?service=https://archive.nrao.edu/archive/advquery.jsp

        if username is None:
            if not self.USERNAME:
                raise LoginError("If you do not pass a username to login(), "
                                 "you should configure a default one!")
            else:
                username = self.USERNAME

        # Check if already logged in
        loginpage = self._request("GET", "https://my.nrao.edu/cas/login",
                                  cache=False)
        root = BeautifulSoup(loginpage.content, 'html5lib')
        if root.find('div', class_='success'):
            log.info("Already logged in.")
            return True

        # Get password from keyring or prompt
        if reenter_password is False:
            password_from_keyring = keyring.get_password(
                "astroquery:my.nrao.edu", username)
        else:
            password_from_keyring = None

        if password_from_keyring is None:
            if system_tools.in_ipynb():
                log.warning("You may be using an ipython notebook:"
                            " the password form will appear in your terminal.")
            password = getpass.getpass("{0}, enter your NRAO archive password:"******"\n".format(username))
        else:
            password = password_from_keyring
        # Authenticate
        log.info("Authenticating {0} on my.nrao.edu ...".format(username))
        # Do not cache pieces of the login process
        data = {kw: root.find('input', {'name': kw})['value']
                for kw in ('lt', '_eventId', 'execution')}
        data['username'] = username
        data['password'] = password
        data['execution'] = 'e1s1'  # not sure if needed
        data['_eventId'] = 'submit'
        data['submit'] = 'LOGIN'

        login_response = self._request("POST", "https://my.nrao.edu/cas/login",
                                       data=data, cache=False)

        authenticated = ('You have successfully logged in' in
                         login_response.text)

        if authenticated:
            log.info("Authentication successful!")
            self.USERNAME = username
        else:
            log.exception("Authentication failed!")
        # When authenticated, save password in keyring if needed
        if authenticated and password_from_keyring is None and store_password:
            keyring.set_password("astroquery:my.nrao.edu", username, password)

        return authenticated
Exemple #52
0
    def stage_data(self, uids):
        """
        Stage ALMA data

        Parameters
        ----------
        uids : list or str
            A list of valid UIDs or a single UID.
            UIDs should have the form: 'uid://A002/X391d0b/X7b'

        Returns
        -------
        data_file_table : Table
            A table containing 3 columns: the UID, the file URL (for future
            downloading), and the file size
        """
        """
        With log.set_level(10)
        INFO: Staging files... [astroquery.alma.core]
        DEBUG: First request URL: https://almascience.eso.org/rh/submission [astroquery.alma.core]
        DEBUG: First request payload: {'dataset': [u'ALMA+uid___A002_X3b3400_X90f']} [astroquery.alma.core]
        DEBUG: First response URL: https://almascience.eso.org/rh/checkAuthenticationStatus/3f98de33-197e-4692-9afa-496842032ea9/submission [astroquery.alma.core]
        DEBUG: Request ID: 3f98de33-197e-4692-9afa-496842032ea9 [astroquery.alma.core]
        DEBUG: Submission URL: https://almascience.eso.org/rh/submission/3f98de33-197e-4692-9afa-496842032ea9 [astroquery.alma.core]
        .DEBUG: Data list URL: https://almascience.eso.org/rh/requests/anonymous/786823226 [astroquery.alma.core]
        """

        if isinstance(uids, six.string_types + (np.bytes_, )):
            uids = [uids]
        if not isinstance(uids, (list, tuple, np.ndarray)):
            raise TypeError("Datasets must be given as a list of strings.")

        log.info("Staging files...")

        self._get_dataarchive_url()

        url = urljoin(self.dataarchive_url, 'rh/submission')
        log.debug("First request URL: {0}".format(url))
        # 'ALMA+uid___A002_X391d0b_X7b'
        payload = {'dataset': ['ALMA+' + clean_uid(uid) for uid in uids]}
        log.debug("First request payload: {0}".format(payload))

        self._staging_log = {'first_post_url': url}

        # Request staging for the UIDs
        # This component cannot be cached, since the returned data can change
        # if new data are uploaded
        response = self._request('POST',
                                 url,
                                 data=payload,
                                 timeout=self.TIMEOUT,
                                 cache=False)
        self._staging_log['initial_response'] = response
        log.debug("First response URL: {0}".format(response.url))
        if 'login' in response.url:
            raise ValueError(
                "You must login before downloading this data set.")

        if response.status_code == 405:
            if hasattr(self, '_last_successful_staging_log'):
                log.warning(
                    "Error 405 received.  If you have previously staged "
                    "the same UIDs, the result returned is probably "
                    "correct, otherwise you may need to create a fresh "
                    "astroquery.Alma instance.")
                return self._last_successful_staging_log['result']
            else:
                raise HTTPError(
                    "Received an error 405: this may indicate you "
                    "have already staged the data.  Try downloading "
                    "the file URLs directly with download_files.")
        response.raise_for_status()

        if 'j_spring_cas_security_check' in response.url:
            time.sleep(1)
            # CANNOT cache this stage: it not a real data page!  results in
            # infinite loops
            response = self._request('POST',
                                     url,
                                     data=payload,
                                     timeout=self.TIMEOUT,
                                     cache=False)
            self._staging_log['initial_response'] = response
            if 'j_spring_cas_security_check' in response.url:
                log.warning("Staging request was not successful.  Try again?")
            response.raise_for_status()

        if 'j_spring_cas_security_check' in response.url:
            raise RemoteServiceError("Could not access data.  This error "
                                     "can arise if the data are private and "
                                     "you do not have access rights or are "
                                     "not logged in.")

        request_id = response.url.split("/")[-2]
        self._staging_log['request_id'] = request_id
        log.debug("Request ID: {0}".format(request_id))

        # Submit a request for the specific request ID identified above
        submission_url = urljoin(self.dataarchive_url,
                                 url_helpers.join('rh/submission', request_id))
        log.debug("Submission URL: {0}".format(submission_url))
        self._staging_log['submission_url'] = submission_url
        staging_submission = self._request('GET', submission_url, cache=True)
        self._staging_log['staging_submission'] = staging_submission
        staging_submission.raise_for_status()

        data_page_url = staging_submission.url
        self._staging_log['data_page_url'] = data_page_url
        dpid = data_page_url.split("/")[-1]
        self._staging_log['staging_page_id'] = dpid

        # CANNOT cache this step: please_wait will happen infinitely
        data_page = self._request('GET', data_page_url, cache=False)
        self._staging_log['data_page'] = data_page
        data_page.raise_for_status()

        has_completed = False
        while not has_completed:
            time.sleep(1)
            summary = self._request('GET',
                                    url_helpers.join(data_page_url, 'summary'),
                                    cache=False)
            summary.raise_for_status()
            print(".", end='')
            sys.stdout.flush()
            has_completed = summary.json()['complete']

        self._staging_log['summary'] = summary
        summary.raise_for_status()
        self._staging_log['json_data'] = json_data = summary.json()

        username = self.USERNAME if self.USERNAME else 'anonymous'

        # templates:
        # https://almascience.eso.org/dataPortal/requests/keflavich/946895898/ALMA/
        # 2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar/2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar
        # uid___A002_X9ee74a_X26f0/2013.1.00308.S_uid___A002_X9ee74a_X26f0.asdm.sdm.tar

        url_decomposed = urlparse(data_page_url)
        base_url = ('{uri.scheme}://{uri.netloc}/'
                    'dataPortal/requests/{username}/'
                    '{staging_page_id}/ALMA'.format(
                        uri=url_decomposed,
                        staging_page_id=dpid,
                        username=username,
                    ))
        tbl = self._json_summary_to_table(json_data, base_url=base_url)
        self._staging_log['result'] = tbl
        self._staging_log['file_urls'] = tbl['URL']
        self._last_successful_staging_log = self._staging_log

        return tbl
Exemple #53
0
 def units(self, value):
     log.warning("'units' is deprecated; please use 'unit'",
                 DeprecationWarning)
     self._unit = value
Exemple #54
0
    def _parse_staging_request_page(self, data_list_page):
        """
        Parse pages like this one:
        https://almascience.eso.org/rh/requests/anonymous/786572566

        that include links to data sets that have been requested and staged

        Parameters
        ----------
        data_list_page : requests.Response object

        """

        root = BeautifulSoup(data_list_page.content, 'html5lib')

        data_table = root.findAll('table', class_='list', id='report')[0]
        columns = {'uid': [], 'URL': [], 'size': []}
        for tr in data_table.findAll('tr'):
            tds = tr.findAll('td')

            # Cannot check class if it is not defined
            cl = 'class' in tr.attrs

            if (len(tds) > 1 and 'uid' in tds[0].text
                    and (cl and 'Level' in tr['class'][0])):
                # New Style
                text = tds[0].text.strip().split()
                if text[0] in ('Asdm', 'Member'):
                    uid = text[-1]
            elif len(tds) > 1 and 'uid' in tds[1].text:
                # Old Style
                uid = tds[1].text.strip()
            elif cl and tr['class'] == 'Level_1':
                raise ValueError("Heading was found when parsing the download "
                                 "page but it was not parsed correctly")

            if len(tds) > 3 and (cl and tr['class'][0] == 'fileRow'):
                # New Style
                size, unit = re.search(r'(-|[0-9\.]*)([A-Za-z]*)',
                                       tds[2].text).groups()
                href = tds[1].find('a')
                if size == '':
                    # this is a header row
                    continue
                authorized = ('access_authorized.png'
                              in tds[3].findChild('img')['src'])
                if authorized:
                    columns['uid'].append(uid)
                    if href and 'href' in href.attrs:
                        columns['URL'].append(href.attrs['href'])
                    else:
                        columns['URL'].append('None_Found')
                    unit = (u.Unit(unit) if unit in ('GB', 'MB') else
                            u.Unit('kB') if 'kb' in unit.lower() else 1)
                    try:
                        columns['size'].append(float(size) * u.Unit(unit))
                    except ValueError:
                        # size is probably a string?
                        columns['size'].append(-1 * u.byte)
                    log.log(level=5,
                            msg="Found a new-style entry.  "
                            "size={0} uid={1} url={2}".format(
                                size, uid, columns['URL'][-1]))
                else:
                    log.warning("Access to {0} is not authorized.".format(uid))
            elif len(tds) > 3 and tds[2].find('a'):
                # Old Style
                href = tds[2].find('a')
                size, unit = re.search(r'([0-9\.]*)([A-Za-z]*)',
                                       tds[3].text).groups()
                columns['uid'].append(uid)
                columns['URL'].append(href.attrs['href'])
                unit = (u.Unit(unit) if unit in ('GB', 'MB') else
                        u.Unit('kB') if 'kb' in unit.lower() else 1)
                columns['size'].append(float(size) * u.Unit(unit))
                log.log(level=5,
                        msg="Found an old-style entry.  "
                        "size={0} uid={1} url={2}".format(
                            size, uid, columns['URL'][-1]))

        columns['size'] = u.Quantity(columns['size'], u.Gbyte)

        if len(columns['uid']) == 0:
            raise RemoteServiceError(
                "No valid UIDs were found in the staged data table. "
                "Please include {0} in a bug report.".format(
                    self._staging_log['data_list_url']))

        tbl = Table([Column(name=k, data=v) for k, v in iteritems(columns)])

        return tbl
Exemple #55
0
    def ND_params(self):
        """Returns parameters which characterize the coronagraph ND filter.
        Parameters are relative to *unbinned image.* It is important
        this be calculated on a per-image bases, since flexure in the
        instrument can shift it a bit side-to-side.  Unforunately,
        contrast is not sufficient to accurately spot the ND_filter in
        normal dark sky exposures without some sort of close starting
        point.  This is addressed by using the flats, which do have
        sufficient contrast, to provide RUN_LEVEL_DEFAULT_ND_PARAMS.

        """

        if self.imagetyp == 'bias' or self.imagetyp == 'dark':
            return self.default_ND_params

        # To simplify calculations, everything will be done with the
        # unbinned image.
        ytop = self.small_filt_crop[0, 0]
        ybot = self.small_filt_crop[1, 0]

        # For CorObs, there will not be mask, uncertainty, etc., so we
        # just work with the data, which we will call "im."

        if self.imagetyp == 'flat':
            # Flats have high contrast and low sensitivity to hot
            # pixels, so we can work with the whole image.  It is OK
            # that this is not a copy since we are not going to muck
            # with it.  Since flats are high enough quality, we use
            # them to independently measure the ND_params, so there is
            # no need for the default (in fact that is how we derive
            # it!).  Finally, The flats produce very narrow peaks in
            # the ND_param algorithm when processed without a
            # default_ND_param and there is a significant filter
            # rotation.  Once things are morphed by the
            # default_ND_params (assuming they match the image), the
            # peaks are much broader.  So our cwt arange needs to be a
            # little different.
            im = self.self_unbinned.data
            default_ND_params = None
            cwt_width_arange = self.cwt_width_arange_flat
            self.n_y_steps = 25
        else:
            # Non-flat case
            default_ND_params = self.default_ND_params
            cwt_width_arange = self.cwt_width_arange
            # Do a quick filter to get rid of hot pixels in awkward
            # places.  Do this only for stuff inside small_filter_crop
            # since it is our most time-consuming step.  Also work in
            # our original, potentially binned image, so hot pixels
            # don't get blown up by unbinning.  This is also a natural
            # place to check that we have default_ND_params in the
            # non-flat case and warn accordingly.
            if default_ND_params is None:
                log.warning('No default_ND_params specified in '
                            'non-flat case.  This is likely to result '
                            'in a poor ND_coords calculation.')
                # For filtering hot pixels, this doesn't need to be
                # super precise
                tb_ND_params = RUN_LEVEL_DEFAULT_ND_PARAMS
            else:
                tb_ND_params = default_ND_params
            # Make a copy so we don't mess up the primary data array
            im = self.data.copy()
            xtop = self.binned(self.ND_edges(ytop, tb_ND_params))
            xbot = self.binned(self.ND_edges(ybot, tb_ND_params))
            # Get the far left and right coords, keeping in mind ND
            # filter might be oriented CW or CCW of vertical
            x0 = int(
                np.min((xbot, xtop)) - self.search_margin / self.binning[1])
            x1 = int(
                np.max((xbot, xtop)) + self.search_margin / self.binning[1])
            x0 = np.max((0, x0))
            x1 = np.min((x1, im.shape[1]))
            # This is the operation that messes with the array in place
            im[ytop:ybot, x0:x1] \
                = signal.medfilt(im[ytop:ybot, x0:x1],
                                 kernel_size=3)
            # Unbin now that we have removed hot pixels from the
            # section we care about
            im = self.im_unbinned(im)

        # At this point, im may or may not be a copy of our primary
        # data.  But that is OK, we won't muck with it from now on
        # (promise)

        # The general method is to take the absolute value of the
        # gradient along each row to spot the edges of the ND filter.
        # Because contrast can be low in the Jupiter images, we need
        # to combine n_y_steps rows.  However, since the ND filter can
        # be tilted by ~20 degrees or so, combining rows washes out
        # the edge of the ND filter.  So shift each row to a common
        # center based on the default_ND_params.  Flats are high
        # contrast, so we can use a slightly different algorithm for
        # them and iterate to jump-start the process with them

        ND_edges = []
        ypts = []

        # Create yrange at y_bin intervals starting at ytop (low
        # number in C fashion) and extending to ybot (high number),
        # chopping of the last one if it goes too far
        y_bin = int((ybot - ytop) / self.n_y_steps)
        #yrange = np.arange(0, im.shape[0], y_bin)
        yrange = np.arange(ytop, ybot, y_bin)
        if yrange[-1] + y_bin > ybot:
            yrange = yrange[0:-1]
        # picturing the image in C fashion, indexed from the top down,
        # ypt_top is the top point from which we bin y_bin rows together

        for ypt_top in yrange:
            # We will be referencing the measured points to the center
            # of the bin
            ycent = ypt_top + y_bin / 2

            if default_ND_params is None:
                # We have already made sure we are a flat at this
                # point, so just run with it.  Flats are high
                # contrast, low noise.  When we run this the first
                # time around, features are rounded and shifted by the
                # ND angle, but still detectable.

                # We can chop off the edges of the smaller SII
                # filters to prevent problems with detection of
                # edges of those filters
                bounds = self.small_filt_crop[:, 1]
                profile = np.sum(
                    im[ypt_top:ypt_top + y_bin, bounds[0]:bounds[1]], 0)
                #plt.plot(bounds[0]+np.arange(bounds[1]-bounds[0]), profile)
                #plt.show()
                # Just doing d2 gets two peaks, so multiply
                # by the original profile to kill the inner peaks
                smoothed_profile \
                    = signal.savgol_filter(profile, self.x_filt_width, 3)
                d = np.gradient(smoothed_profile, 10)
                d2 = np.gradient(d, 10)
                s = np.abs(d2) * profile
            else:
                # Non-flat case.  We want to morph the image by
                # shifting each row by by the amount predicted by the
                # default_ND_params.  This lines the edges of the ND
                # filter up for easy spotting.  We will morph the
                # image directly into a subim of just the right size
                default_ND_width = (default_ND_params[1, 1] -
                                    default_ND_params[1, 0])
                subim_hw = int(default_ND_width / 2 + self.search_margin)
                subim = np.zeros((y_bin, 2 * subim_hw))

                # rowpt is each row in the ypt_top y_bin, which we need to
                # shift to accumulate into a subim that is the morphed
                # image.
                for rowpt in np.arange(y_bin):
                    # determine how many columns we will shift each row by
                    # using the default_ND_params
                    this_ND_center \
                        = int(
                            np.round(
                                np.mean(
                                    self.ND_edges(
                                        rowpt+ypt_top,
                                        default_ND_params))))
                    #print('this_ND_center: ', this_ND_center)
                    #print('subim_hw: ', subim_hw)
                    dcenter = np.abs(this_ND_center - self.shape[1] / 2)
                    if dcenter > self.shape[1] / 4:
                        raise ValueError('this_ND_center too far off')
                    left = max((0, this_ND_center - subim_hw))
                    right = min((this_ND_center + subim_hw,
                                 this_ND_center + subim.shape[1] - 1))
                    #print('(left, right): ', (left, right))
                    subim[rowpt, :] \
                        = im[ypt_top+rowpt, left:right]

                profile = np.sum(subim, 0)
                # This spots the sharp edge of the filter surprisingly
                # well, though the resulting peaks are a little fat
                # (see signal.find_peaks_cwt arguments, below)
                smoothed_profile \
                    = signal.savgol_filter(profile, self.x_filt_width, 0)
                d = np.gradient(smoothed_profile, 10)
                s = np.abs(d)
                # To match the logic in the flat case, calculate
                # bounds of the subim picturing that it is floating
                # inside of the full image
                bounds = im.shape[1] / 2 + np.asarray((-subim_hw, subim_hw))
                bounds = bounds.astype(int)

            # https://blog.ytotech.com/2015/11/01/findpeaks-in-python/
            # points out same problem I had with with cwt.  It is too
            # sensitive to little peaks.  However, I can find the peaks
            # and just take the two largest ones
            #peak_idx = signal.find_peaks_cwt(s, np.arange(5, 20), min_snr=2)
            #peak_idx = signal.find_peaks_cwt(s, np.arange(2, 80), min_snr=2)
            peak_idx = signal.find_peaks_cwt(s,
                                             cwt_width_arange,
                                             min_snr=self.cwt_min_snr)
            # Need to change peak_idx into an array instead of a list for
            # indexing
            peak_idx = np.array(peak_idx)

            # Give up if we don't find two clear edges
            if peak_idx.size < 2:
                log.debug('No clear two peaks inside bounds ' + str(bounds))
                #plt.plot(s)
                #plt.show()
                continue

            if default_ND_params is None:
                # In the flat case where we are deriving ND_params for
                # the first time, assume we have a set of good peaks,
                # sort on peak size
                sorted_idx = np.argsort(s[peak_idx])
                # Unwrap
                peak_idx = peak_idx[sorted_idx]

                # Thow out if lower peak is too weak.  Use Carey Woodward's
                # trick of estimating the noise on the continuum To avoid
                # contamination, do this calc just over our desired interval
                #ss = s[bounds[0]:bounds[1]]

                #noise = np.std(ss[1:-1] - ss[0:-2])
                noise = np.std(s[1:-1] - s[0:-2])
                #print(noise)
                if s[peak_idx[-2]] < noise:
                    #print("Rejected -- not above noise threshold")
                    continue
                # Find top two and put back in index order
                edge_idx = np.sort(peak_idx[-2:])
                # Sanity check
                de = edge_idx[1] - edge_idx[0]
                if (de < self.max_ND_width_range[0]
                        or de > self.max_ND_width_range[1]):
                    continue

                # Accumulate in tuples
                ND_edges.append(edge_idx)
                ypts.append(ycent)

            else:
                # In lower S/N case.  Compute all the permutations and
                # combinations of peak differences so we can find the
                # pair that is closest to our expected value
                diff_arr = []
                for ip in np.arange(peak_idx.size - 1):
                    for iop in np.arange(ip + 1, peak_idx.size):
                        diff_arr.append(
                            (ip, iop, peak_idx[iop] - peak_idx[ip]))
                diff_arr = np.asarray(diff_arr)
                closest = np.abs(diff_arr[:, 2] - default_ND_width)
                sorted_idx = np.argsort(closest)
                edge_idx = peak_idx[diff_arr[sorted_idx[0], 0:2]]
                # Sanity check
                de = edge_idx[1] - edge_idx[0]
                if (de < self.max_ND_width_range[0]
                        or de > self.max_ND_width_range[1]):
                    continue

                # Accumulate in tuples
                ND_edges.append(edge_idx)
                ypts.append(ycent)

            if self.plot_prof:
                plt.plot(profile)
                plt.show()
            if self.plot_dprof:
                plt.plot(s)
                plt.show()

        if len(ND_edges) < 2:
            if default_ND_params is None:
                raise ValueError('Not able to find ND filter position')
            log.warning('Unable to improve filter position over initial guess')
            return default_ND_params

        ND_edges = np.asarray(ND_edges) + bounds[0]
        ypts = np.asarray(ypts)

        # Put the ND_edges back into the original orientation before
        # we cshifted them with default_ND_params
        if default_ND_params is not None:
            es = []
            for iy in np.arange(ypts.size):
                this_default_ND_center\
                    = np.round(
                        np.mean(
                            self.ND_edges(
                                ypts[iy], default_ND_params)))
                cshift = int(this_default_ND_center - im.shape[1] / 2.)
                es.append(ND_edges[iy, :] + cshift)

                #es.append(self.default_ND_params[1,:] - im.shape[1]/2. + self.default_ND_params[0,:]*(this_y - im.shape[0]/2))
            ND_edges = np.asarray(es)

        if self.plot_ND_edges:
            plt.plot(ypts, ND_edges)
            plt.show()

        # Try an iterative approach to fitting lines to the ND_edges
        ND_edges = np.asarray(ND_edges)
        ND_params0 = iter_linfit(ypts - im.shape[0] / 2, ND_edges[:, 0],
                                 self.max_fit_delta_pix)
        ND_params1 = iter_linfit(ypts - im.shape[0] / 2, ND_edges[:, 1],
                                 self.max_fit_delta_pix)
        # Note when np.polyfit is given 2 vectors, the coefs
        # come out in columns, one per vector, as expected in C.
        ND_params = np.transpose(np.asarray((ND_params0, ND_params1)))

        # DEBUGGING
        #plt.plot(ypts, self.ND_edges(ypts, ND_params))
        #plt.show()

        dp = abs((ND_params[0, 1] - ND_params[0, 0]) * im.shape[0] / 2)
        if dp > self.max_parallel_delta_pix:
            txt = 'ND filter edges are not parallel.  Edges are off by ' + str(
                dp) + ' pixels.'
            #print(txt)
            #plt.plot(ypts, ND_edges)
            #plt.show()

            if default_ND_params is None:
                raise ValueError(txt +
                                 '  No initial try available, raising error.')
            log.warning(txt + ' Returning initial try.')
            ND_params = default_ND_params

        return ND_params
Exemple #56
0
parser.add_argument("--mkf-only",   help="Grab the prefilter data files only", action='store_true', dest='mkf_only')
parser.add_argument("--decryptkey", help="Add decryption key to run GPG after download", type=str, default=None)
parser.add_argument("--unzip",      help="Gunzip after decrypting", action='store_true')
args = parser.parse_args()

# ----------------------------------------------------------------------
# Checking the presence of GPG or GPG2
# ----------------------------------------------------------------------
gpg_version = 'gpg'
if args.decryptkey:
    try:
        cmd = ["{}".format(gpg_version),"--version"]
        subprocess.call(cmd)
    except OSError as e:
        if e.errno == os.errno.ENOENT:
            log.warning("gpg not found...Trying gpg2")
            gpg_version = 'gpg2'
            cmd = ["{}".format(gpg_version),"--version"]
            try:
                subprocess.call(cmd)
            except OSError as e:
                if e.errno == os.errno.ENOENT:
                    log.warning("gpg2 not found...No decryption will be performed")
                    exit()
                else:
                    log.error("Something went wrong while trying to run gpg2")
                    raise
        else:
            log.error("Something went wrong while trying to run gpg")
            raise
log.info("{} will be used".format(gpg_version))
Exemple #57
0
    def obj_center(self):
        """Returns center pixel coords of Jupiter whether or not Jupiter is on ND filter.  Unbinned pixel coords are returned.  Use [Cor]Obs_Data.binned() to convert to binned pixels.
        """

        # Check to see if we really want to calculate the center
        imagetyp = self.meta.get('IMAGETYP')
        if imagetyp.lower() in ['bias', 'dark', 'flat']:
            self.no_obj_center = True
        if self.no_obj_center:
            return NoCenterPGD(self).obj_center

        # Work with a copy of the unbinned data array, since we are
        # going to muck with it
        im = self.self_unbinned.data.copy()
        back_level = self.background / (np.prod(self.binning))

        satlevel = self.meta.get('SATLEVEL')  # * self.unit
        readnoise = self.meta.get('RDNOISE')  # * u.electron

        # Establish some metrics to see if Jupiter is on or off the ND
        # filter.  Easiest one is number of saturated pixels
        # /data/io/IoIO/raw/2018-01-28/R-band_off_ND_filter.fit gives
        # 4090 of these.  Calculation below suggests 1000 should be a
        # good minimum number of saturated pixels (assuming no
        # additional scattered light).  A star off the ND filter
        # /data/io/IoIO/raw/2017-05-28/Sky_Flat-0001_SII_on-band.fit
        # gives 124 num_sat
        satc = np.where(im >= satlevel)
        num_sat = len(satc[0])
        #log.debug('Number of saturated pixels in image: ' + str(num_sat))

        # Work another way to see if the ND filter has a low flux
        # Note, this assignment dereferences im from HDUList[0].data
        im = im - back_level
        satlevel -= back_level

        # Get the coordinates of the ND filter
        NDc = self.ND_coords

        # Filter ND coords for ones that are at least 5 std of the
        # bias noise above the median.  Calculate a fresh median for
        # the ND filter just in case it is different than the median
        # of the image as a whole (which is now 0 -- see above).  We
        # can't use the std of the ND filter, since it is too biased
        # by Jupiter when it is there.
        NDmed = np.median(im[NDc])
        boostc = np.where(im[NDc] > (NDmed + 5 * readnoise))
        boost_NDc0 = np.asarray(NDc[0])[boostc]
        boost_NDc1 = np.asarray(NDc[1])[boostc]

        # Come up with a metric for when Jupiter is in the ND filter.
        # Below is my scratch work
        # Rj = np.asarray((50.1, 29.8))/2. # arcsec
        # plate = 1.59/2 # main "/pix
        #
        # Rj/plate # Jupiter pixel radius
        # array([ 31.50943396,  18.74213836])
        #
        # np.pi * (Rj/plate)**2 # Jupiter area in pix**2
        # array([ 3119.11276312,  1103.54018437])
        #
        # Jupiter is generally better than 1000
        #
        # np.pi * (Rj/plate)**2 * 1000
        # array([ 3119112.76311733,  1103540.18436529])

        sum_on_ND_filter = np.sum(im[boost_NDc0, boost_NDc1])
        # Adjust for the case where ND filter may have a fairly
        # high sky background.  We just want Jupiter
        sum_on_ND_filter -= NDmed * len(boost_NDc0)

        #log.debug('sum of significant pixels on ND filter = ' + str(sum_on_ND_filter))
        print('sum_on_ND_filter = ', sum_on_ND_filter)
        #if num_sat > 1000 or sum_on_ND_filter < 1E6:
        # Vega is 950,000
        if num_sat > 1000 or sum_on_ND_filter < 0.75E6:
            log.warning('Jupiter outside of ND filter?')
            # Outside the ND filter, Jupiter should be saturating.  To
            # make the center of mass calc more accurate, just set
            # everything that is not getting toward saturation to 0
            # --> Might want to fine-tune or remove this so bright
            im[np.where(im < satlevel * 0.7)] = 0

            #log.debug('Approx number of saturating pixels ' + str(np.sum(im)/65000))

            # 25 worked for a star, 250 should be conservative for
            # Jupiter (see above calcs)
            # if np.sum(im) < satlevel * 25:
            if np.sum(im) < satlevel * 250:
                self.quality = 4
                log.warning(
                    'Jupiter (or suitably bright object) not found in image.  This object is unlikely to show up on the ND filter.  Seeting quality to '
                    + str(self.quality) + ', center to [-99, -99]')
                self._obj_center = np.asarray([-99, -99])
            else:
                self.quality = 6
                # If we made it here, Jupiter is outside the ND filter,
                # but shining bright enough to be found
                # --> Try iterative approach
                ny, nx = im.shape
                y_x = np.asarray(ndimage.measurements.center_of_mass(im))
                print(y_x)
                y = np.arange(ny) - y_x[0]
                x = np.arange(nx) - y_x[1]
                # input/output Cartesian direction by default
                xx, yy = np.meshgrid(x, y)
                rr = np.sqrt(xx**2 + yy**2)
                im[np.where(rr > 200)] = 0
                y_x = np.asarray(ndimage.measurements.center_of_mass(im))

                self._obj_center = y_x
                log.info('Object center (X, Y; binned) = ' +
                         str(self.binned(self._obj_center)[::-1]))
        else:
            # Here is where we boost what is sure to be Jupiter, if Jupiter is
            # in the ND filter
            # --> this has trouble when there is bright skys
            im[boost_NDc0, boost_NDc1] *= 1000
            # Clean up any signal from clouds off the ND filter, which can
            # mess up the center of mass calculation
            # Fri Jun 25 14:35:05 2021 EDT  jpmorgen@snipe
            # Not sure what I was thinking by using the
            # sx694.satlevel, since satlevel may be adjusted for gain
            # --> check this for error
            im[np.where(im < satlevel)] = 0
            #im[np.where(im < sx694.satlevel)] = 0
            y_x = ndimage.measurements.center_of_mass(im)

            #print(y_x[::-1])
            #plt.imshow(im)
            #plt.show()
            #return (y_x[::-1], ND_center)

            # Stay in Pythonic y, x coords
            self._obj_center = np.asarray(y_x)
            log.debug('Object center (X, Y; binned) = ' +
                      str(self.binned(self._obj_center)[::-1]))
            self.quality = 6
        self.header['OBJ_CR0'] = (self._obj_center[1], 'Object center X')
        self.header['OBJ_CR1'] = (self._obj_center[0], 'Object center Y')
        self.header['QUALITY'] = (
            self.quality, 'Quality on 0-10 scale of center determination')
        return self._obj_center
Exemple #58
0
def find_spatial_pixel_index(cube, xlo, xhi, ylo, yhi):
    '''
    Given low and high cuts, return the pixel coordinates for a rectangular
    region in the given cube or spatial projection. lo and hi inputs can be
    given in pixels, "min"/"max", or in world coordinates.

    When spatial WCS dimensions are given as an `~astropy.units.Quantity`,
    the spatial coordinates of the 'lo' and 'hi' corners are solved together.
    This minimizes WCS variations due to the sky curvature when slicing from
    a large (>1 deg) image.


    Parameters
    ----------
    cube : :class:`~SpectralCube` or spatial :class:`~Projection`
        A spectral-cube or projection/slice with spatial dimensions.
    [xy]lo/[xy]hi : int or :class:`~astropy.units.Quantity` or ``min``/``max``
        The endpoints to extract.  If given as a ``Quantity``, will be
        interpreted as World coordinates.  If given as a ``string`` or
        ``int``, will be interpreted as pixel coordinates.

    Returns
    -------
    limit_dict : dict
        Pixel coordinates of [xy]lo/[xy]hi in the given ``cube``.

    '''

    ndim = cube.ndim

    for val in (xlo, ylo, xhi, yhi):
        if hasattr(val, 'unit') and not val.unit.is_equivalent(u.degree):
            raise u.UnitsError("The X and Y slices must be specified in "
                               "degree-equivalent units.")

    limit_dict = {}

    # Match corners. If one uses a WCS coord, set 'min'/'max'
    # To the lat or long extrema.
    # We only care about matching spatial corners.
    xlo_unit = hasattr(xlo, 'unit')
    ylo_unit = hasattr(ylo, 'unit')

    # Do min/max switching if the WCS grid increases/decreases
    # with the pixel grid.
    ymin = min if cube.wcs.wcs.cdelt[1] > 0 else max
    xmin = min if cube.wcs.wcs.cdelt[0] > 0 else max
    ymax = max if cube.wcs.wcs.cdelt[1] > 0 else min
    xmax = max if cube.wcs.wcs.cdelt[0] > 0 else min

    if not any([xlo_unit, ylo_unit]):
        limit_dict['xlo'] = 0 if xlo == 'min' else xlo
        limit_dict['ylo'] = 0 if ylo == 'min' else ylo
    else:
        if xlo_unit:
            limit_dict['xlo'] = xlo
            limit_dict['ylo'] = ymin(
                cube.latitude_extrema) if ylo == 'min' else ylo
        if ylo_unit:
            limit_dict['ylo'] = ylo
            limit_dict['xlo'] = xmin(
                cube.longitude_extrema) if xlo == 'min' else xlo

    xhi_unit = hasattr(xhi, 'unit')
    yhi_unit = hasattr(yhi, 'unit')

    if not any([xhi_unit, yhi_unit]):

        # For 3D cube
        if ndim == 3:
            limit_dict['xhi'] = cube.shape[2] if xhi == 'max' else xhi
            limit_dict['yhi'] = cube.shape[1] if yhi == 'max' else yhi
        # For 2D spatial projection/slice
        else:
            limit_dict['xhi'] = cube.shape[1] if xhi == 'max' else xhi
            limit_dict['yhi'] = cube.shape[0] if yhi == 'max' else yhi
    else:
        if xhi_unit:
            limit_dict['xhi'] = xhi
            limit_dict['yhi'] = ymax(
                cube.latitude_extrema) if yhi == 'max' else yhi
        if yhi_unit:
            limit_dict['yhi'] = yhi
            limit_dict['xhi'] = xmax(
                cube.longitude_extrema) if xhi == 'max' else xhi

    # list to track which entries had units
    united = []

    # Solve the spatial axes together.
    # There's 3 options:
    # (1) If both pixel units, do nothing
    # (2) If both WCS units, use world_to_array_index_values
    # (3) If mixed, minimize the distance between the spatial position grids
    #     for the cube to find the closest spatial pixel.

    for corn in ['lo', 'hi']:
        grids = {}

        # Check if either were given as a WCS value with a unit
        x_hasunit = hasattr(limit_dict['x' + corn], 'unit')
        y_hasunit = hasattr(limit_dict['y' + corn], 'unit')

        # print(limit_dict['x'+corn], limit_dict['y'+corn])
        # print(x_hasunit, y_hasunit)

        # (1) If both pixel units, we keep in pixel units.
        if not any([x_hasunit, y_hasunit]):
            continue

        # (2) If both WCS units, use world_to_array_index_values
        elif all([x_hasunit, y_hasunit]):

            corn_arr = np.array(
                [limit_dict['x' + corn].value, limit_dict['y' + corn].value])

            xmin, ymin = cube.wcs.celestial.world_to_array_index_values(
                corn_arr.reshape((1, 2)))[0]

            limit_dict['y' + corn] = ymin
            limit_dict['x' + corn] = xmin

            if corn == 'hi':
                united.append('y' + corn)
                united.append('x' + corn)

        # (3) If mixed, minimize the distance between the spatial position grids
        #     for the cube to find the closest spatial pixel, limited to the 1 pixel
        #     value that is given.
        else:

            # We change the dimensions being sliced depending on whether the
            # x or y dim is given in pixel units.
            # This allows for a 1D minimization instead of needing both spatial axes.

            if x_hasunit:
                pixval = limit_dict['y' + corn]
                lim = 'x' + corn
                slicedim = 0
            else:
                pixval = limit_dict['x' + corn]
                lim = 'y' + corn
                slicedim = 1

            if corn == 'lo':
                slice_pixdim = slice(pixval, pixval + 1)
            else:
                slice_pixdim = slice(pixval - 1, pixval)

            limval = limit_dict[lim]
            if hasattr(limval, 'unit'):
                united.append(lim)

                sl = [slice(None)]
                sl.insert(slicedim, slice_pixdim)

                if ndim == 3:
                    sl.insert(0, slice(0, 1))

                sl = tuple(sl)

                if slicedim == 0:
                    spine = cube.world[sl][2 if ndim == 3 else 1]
                else:
                    spine = cube.world[sl][1 if ndim == 3 else 0]

                val = np.argmin(np.abs(limval - spine))
                if limval > spine.max() or limval < spine.min():
                    log.warning("The limit {0} is out of bounds."
                                "  Using min/max instead.".format(lim))

                limit_dict[lim] = val

    # Correct ordering (this shouldn't be necessary but do a quick check)
    for xx in 'yx':
        hi, lo = limit_dict[xx + 'hi'], limit_dict[xx + 'lo']
        if hi < lo:
            # must have high > low
            limit_dict[xx + 'hi'], limit_dict[xx + 'lo'] = lo, hi

        if xx + 'hi' in united:
            # End-inclusive indexing: need to add one for the high slice
            # Only do this for converted values, not for pixel values
            # (i.e., if the xlo/ylo/zlo value had units)
            limit_dict[xx + 'hi'] += 1

    return limit_dict
Exemple #59
0
    def change_binary_epoch(self, new_epoch):
        """Change the epoch for this binary model.

        TASC will be changed to the epoch of the ascending node closest to the
        supplied epoch, and the Laplace parameters (EPS1, EPS2) and projected
        semimajor axis (A1 or X) will be updated according to the specified
        EPS1DOT, EPS2DOT, and A1DOT or XDOT, if present.

        Note that derivatives of binary orbital frequency higher than the first
        (FB2, FB3, etc.) are ignored in computing the new T0, even if present in
        the model. If high-precision results are necessary, especially for models
        containing higher derivatives of orbital frequency, consider re-fitting
        the model to a set of TOAs.

        Parameters
        ----------
        new_epoch: float MJD (in TDB) or `astropy.Time` object
            The new epoch value.
        """
        if isinstance(new_epoch, Time):
            new_epoch = Time(new_epoch, scale="tdb", precision=9)
        else:
            new_epoch = Time(new_epoch, scale="tdb", format="mjd", precision=9)

        try:
            FB2 = self.FB2.quantity
            log.warning(
                "Ignoring orbital frequency derivatives higher than FB1"
                "in computing new TASC")
        except AttributeError:
            pass

        # Get PB and PBDOT from model
        if self.PB.quantity is not None:
            PB = self.PB.quantity
            if self.PBDOT.quantity is not None:
                PBDOT = self.PBDOT.quantity
            else:
                PBDOT = 0.0 * u.Unit("")
        else:
            PB = 1.0 / self.FB0.quantity
            try:
                PBDOT = -self.FB1.quantity / self.FB0.quantity**2
            except AttributeError:
                PBDOT = 0.0 * u.Unit("")

        # Find closest periapsis time and reassign T0
        tasc_ld = self.TASC.quantity.tdb.mjd_long
        dt = (new_epoch.tdb.mjd_long - tasc_ld) * u.day
        d_orbits = dt / PB - PBDOT * dt**2 / (2.0 * PB**2)
        n_orbits = np.round(d_orbits.to(u.Unit("")))
        dt_integer_orbits = PB * n_orbits + PB * PBDOT * n_orbits**2 / 2.0
        self.TASC.quantity = self.TASC.quantity + dt_integer_orbits

        # Update PB or FB0, FB1, etc.
        if isinstance(self.binary_instance.orbits_cls, bo.OrbitPB):
            dPB = PBDOT * dt_integer_orbits
            self.PB.quantity = self.PB.quantity + dPB
        else:
            fbterms = [
                getattr(self, k).quantity
                for k in self.get_prefix_mapping("FB").values()
            ]
            fbterms = [0.0 * u.Unit("")] + fbterms

            for n in range(len(fbterms) - 1):
                cur_deriv = getattr(self, "FB{}".format(n))
                cur_deriv.value = taylor_horner_deriv(dt.to(u.s),
                                                      fbterms,
                                                      deriv_order=n + 1)

        # Update EPS1, EPS2, and A1
        if self.EPS1DOT.quantity is not None:
            dEPS1 = self.EPS1DOT.quantity * dt_integer_orbits
            self.EPS1.quantity = self.EPS1.quantity + dEPS1
        if self.EPS2DOT.quantity is not None:
            dEPS2 = self.EPS2DOT.quantity * dt_integer_orbits
            self.EPS2.quantity = self.EPS2.quantity + dEPS2
        if self.A1DOT.quantity is not None:
            dA1 = self.A1DOT.quantity * dt_integer_orbits
            self.A1.quantity = self.A1.quantity + dA1
Exemple #60
0
    def change_binary_epoch(self, new_epoch):
        """Change the epoch for this binary model.

        T0 will be changed to the periapsis time closest to the supplied epoch,
        and the argument of periapsis (OM), eccentricity (ECC), and projected
        semimajor axis (A1 or X) will be updated according to the specified
        OMDOT, EDOT, and A1DOT or XDOT, if present.

        Note that derivatives of binary orbital frequency higher than the first
        (FB2, FB3, etc.) are ignored in computing the new T0, even if present in
        the model. If high-precision results are necessary, especially for models
        containing higher derivatives of orbital frequency, consider re-fitting
        the model to a set of TOAs. The use of :func:`pint.toa.make_fake_toas`
        and the :class:`pint.fitter.Fitter` option ``track_mode="use_pulse_number"``
        can make this extremely simple.

        Parameters
        ----------
        new_epoch: float MJD (in TDB) or `astropy.Time` object
            The new epoch value.
        """
        if isinstance(new_epoch, Time):
            new_epoch = Time(new_epoch, scale="tdb", precision=9)
        else:
            new_epoch = Time(new_epoch, scale="tdb", format="mjd", precision=9)

        try:
            self.FB2.quantity
            log.warning(
                "Ignoring orbital frequency derivatives higher than FB1"
                "in computing new T0"
            )
        except AttributeError:
            pass

        # Get PB and PBDOT from model
        if self.PB.quantity is not None:
            PB = self.PB.quantity
            if self.PBDOT.quantity is not None:
                PBDOT = self.PBDOT.quantity
            else:
                PBDOT = 0.0 * u.Unit("")
        else:
            PB = 1.0 / self.FB0.quantity
            try:
                PBDOT = -self.FB1.quantity / self.FB0.quantity ** 2
            except AttributeError:
                PBDOT = 0.0 * u.Unit("")

        # Find closest periapsis time and reassign T0
        t0_ld = self.T0.quantity.tdb.mjd_long
        dt = (new_epoch.tdb.mjd_long - t0_ld) * u.day
        d_orbits = dt / PB - PBDOT * dt ** 2 / (2.0 * PB ** 2)
        n_orbits = np.round(d_orbits.to(u.Unit("")))
        dt_integer_orbits = PB * n_orbits + PB * PBDOT * n_orbits ** 2 / 2.0
        self.T0.quantity = self.T0.quantity + dt_integer_orbits

        # Update PB or FB0, FB1, etc.
        if isinstance(self.binary_instance.orbits_cls, bo.OrbitPB):
            dPB = PBDOT * dt_integer_orbits
            self.PB.quantity = self.PB.quantity + dPB
        else:
            fbterms = [
                getattr(self, k).quantity
                for k in self.get_prefix_mapping("FB").values()
            ]
            fbterms = [0.0 * u.Unit("")] + fbterms

            for n in range(len(fbterms) - 1):
                cur_deriv = getattr(self, "FB{}".format(n))
                cur_deriv.value = taylor_horner_deriv(
                    dt.to(u.s), fbterms, deriv_order=n + 1
                )

        # Update ECC, OM, and A1
        dECC = self.EDOT.quantity * dt_integer_orbits
        self.ECC.quantity = self.ECC.quantity + dECC
        dOM = self.OMDOT.quantity * dt_integer_orbits
        self.OM.quantity = self.OM.quantity + dOM
        dA1 = self.A1DOT.quantity * dt_integer_orbits
        self.A1.quantity = self.A1.quantity + dA1