예제 #1
0
    def posvel(self, t, ephem, maxextrap=2):
        '''Return position and velocity vectors of NICER.

        t is an astropy.Time or array of astropy.Times
        maxextrap is the longest (in minutes) it is acceptable to
            extrapolate the S/C position
        '''
        # this is a simple edge check mainly to prevent use of the wrong
        # orbit file or a single orbit file with a merged event file; if
        # needed, can check to make sure there is a spline anchor point
        # sufficiently close to all event times
        tmin = np.min(self.FPorb['MJD_TT'])
        tmax = np.max(self.FPorb['MJD_TT'])
        if (tmin-np.min(t.tt.mjd) > float(maxextrap)/(60*24) or
            np.max(t.tt.mjd)-tmax > float(maxextrap)/(60*24)):
            log.error('Extrapolating NICER position by more than %d minutes!'%maxextrap)
            raise ValueError("Bad extrapolation of S/C file.")
        # Compute vector from SSB to Earth
        geo_posvel = objPosVel_wrt_SSB('earth', t, ephem)
        # Now add vector from Earth to NICER
        nicer_pos_geo = np.array([self.X(t.tt.mjd), self.Y(t.tt.mjd), self.Z(t.tt.mjd)])*self.FPorb['X'].unit
        nicer_vel_geo = np.array([self.Vx(t.tt.mjd), self.Vy(t.tt.mjd), self.Vz(t.tt.mjd)])*self.FPorb['Vx'].unit
        nicer_posvel = PosVel( nicer_pos_geo, nicer_vel_geo, origin='earth', obj='nicer')
        # Vector add to geo_posvel to get full posvel vector.
        return geo_posvel + nicer_posvel
예제 #2
0
파일: toa.py 프로젝트: yanwang2012/PINT
    def apply_clock_corrections(self):
        """Apply observatory clock corrections.

        Apply clock corrections to all the TOAs where corrections
        are available.  This routine actually changes
        the value of the TOA, although the correction is also listed
        as a new flag for the TOA called 'clkcorr' so that it can be
        reversed if necessary.  This routine also applies all 'TIME'
        commands and treats them exactly as if they were a part of the
        observatory clock corrections.
        """
        for t in self.toas:
            # any TOAs from an unknown observatory will not have TIME applied
            assert t.obs in self.observatories
        for obsname in self.observatories:
            mjds, ccorr = observatories_module.get_clock_corr_vals(obsname)
            # select the TOAs we will apply corrections to
            toas = [t for t in self.toas if t.obs == obsname and
                    "clkcorr" not in t.flags]
            tvals = numpy.array([t.mjd.value for t in toas])
            if numpy.any((tvals < mjds[0]) | (tvals > mjds[-1])):
                # FIXME: check the user sees this! should it be an exception?
                log.error("Some TOAs are not covered by the %s clock correction"
                    +" file, treating clock corrections as constant"
                    +" past the ends." % obsname)
            corrs = numpy.interp(tvals, mjds, ccorr)
            for corr, toa in zip(corrs, toas):
                corr *= u.us # the clock corrections are in microseconds
                if "time" in toa.flags:
                    corr += toa.flags["time"] * u.s # TIME commands are in sec
                toa.flags["clkcorr"] = corr
                toa.mjd += time.TimeDelta(corr)
                toa.mjd.delta_ut1_utc = toa.mjd.get_delta_ut1_utc(iers_a)
                toa.mjd.TDB = utils.time_to_mjd_mpf(toa.mjd.tdb)
예제 #3
0
    def earth_location_itrf(self, time=None):
        '''Return Fermi spacecraft location in ITRF coordinates'''

        if self.tt2tdb_mode.lower().startswith('none'):
            log.warning('Using location=None for TT to TDB conversion')
            return None
        elif self.tt2tdb_mode.lower().startswith('geo'):
            log.warning('Using location geocenter for TT to TDB conversion')
            return EarthLocation.from_geocentric(0.0*u.m,0.0*u.m,0.0*u.m)
        elif self.tt2tdb_mode.lower().startswith('spacecraft'):
            # First, interpolate Earth-Centered Inertial (ECI) geocentric
            # location from orbit file.
            # These are inertial coordinates aligned with ICRS, called GCRS
            # <http://docs.astropy.org/en/stable/api/astropy.coordinates.GCRS.html>
            pos_gcrs =  GCRS(CartesianRepresentation(self.X(time.tt.mjd)*u.m,
                                                     self.Y(time.tt.mjd)*u.m,
                                                     self.Z(time.tt.mjd)*u.m),
                             obstime=time)

            # Now transform ECI (GCRS) to ECEF (ITRS)
            # By default, this uses the WGS84 ellipsoid
            pos_ITRS = pos_gcrs.transform_to(ITRS(obstime=time))

            # Return geocentric ITRS coordinates as an EarthLocation object
            return pos_ITRS.earth_location
        else:
            log.error('Unknown tt2tdb_mode %s, using None', self.tt2tdb_mode)
            return None
예제 #4
0
def spectra(data,wcs=None,mask=None,unit=None,restrict=None):
    """


        Parameters
        ----------
        data : (M,N,Z) numpy.ndarray or astropy.nddata.NDData or astropy.nddata.NDDataRef
            Astronomical data cube.
        wcs : astropy.wcs.wcs.WCS
            World Coordinate System to use
        mask : numpy.ndarray
            Mask for data.
        unit : astropy.units.Unit
            Astropy unit (http://docs.astropy.org/en/stable/units/)
        restrict : boolean


        Returns
        -------
        result: astropy.nddata.NDData
            Moment 2 of the data cube

    """
    if restrict is None:
        #Create NDD and WCS change...
        return core.integrate(data,axis=(1,2))
    else:
        log.error("Not Implemented Yet!")

        # Get 1 pixel aperture
        aperture=np.abs(wcs.celestial.wcs.cdelt[0])*u.deg
예제 #5
0
 def run(self):
     """Perform the band-merging."""
     cmd = self.get_stilts_command()
     status = os.system(cmd)
     if status != 0:
         log.error('{0}: Unexpected status ("{1}"): command was: {2}'.format(self.fieldid, status, cmd))
     return status
예제 #6
0
def initialize_dataset(dset_name, group_path, XCov_filename, lock_filename):
    # only one process should modify the file to add the dataset if it doesn't exist
    with h5py.File(XCov_filename, mode='r') as f:
        make_group = False
        make_dataset = True

        try:
            group = f[group_path]
            logger.debug("Group already exists")
        except KeyError:
            make_group = True
            logger.debug("Group doesn't exist...")

        if not make_group and dset_name in group:
            make_dataset = False

    if make_group or make_dataset:
        lock = filelock.FileLock(lock_filename)
        try:
            with lock.acquire(timeout=90):
                logger.debug("File lock acquired: creating dataset for log-likelihoods")
                with h5py.File(XCov_filename, mode='r+') as f:
                    if make_group and group_path not in f:
                        group = f.create_group(group_path)
                    else:
                        group = f[group_path]

                    if dset_name not in group: # double checking!
                        ll_shape = (f['search']['X'].shape[0],)
                        ll_dset = group.create_dataset(dset_name, ll_shape, dtype='f')
                        ll_dset[:] = np.nan

        except filelock.Timeout:
            logger.error("Timed out trying to acquire file lock to create dataset.")
            sys.exit(1)
예제 #7
0
def test_log_to_list_level():

    with log.log_to_list(filter_level='ERROR') as log_list:
        log.error("Error message")
        log.warning("Warning message")

    assert len(log_list) == 1 and log_list[0].levelname == 'ERROR'
예제 #8
0
def on_key_press(event, fig, textbox, return_dict, pixl, flux):
    ax = fig.axes[0]
    if event.key == 'enter':
        if not str(textbox.text()).strip():
            logger.error("You must enter the wavelength value of an identified line "
                         "before pressing enter over it!")

        pixl_val = event.xdata
        wvln_val = float(textbox.text())

        if wvln_val in return_dict['wavelength']:
            return

        p0 = (flux[int(round(pixl_val))],
              pixl_val,
              1.,
              flux[int(round(pixl_val))-16:int(round(pixl_val))+16].min())
        p,_ = so.curve_fit(gaussian_constant, pixl, flux, p0=p0)
        pixl_val = p[1]

        if p[2] > 5. or p[2] < 0.:
            msg = "Failed to fit line at pixel={}".format(event.xdata)
            logger.error(msg)
            fig.suptitle(msg, color='r', fontsize=12)
            return

        return_dict['wavelength'].append(wvln_val)
        return_dict['pixel'].append(pixl_val)

        ax.axvline(pixl_val, alpha=0.25, c='#2166AC')
        ax.text(pixl_val-1, ax.get_ylim()[1], "{:.1f} $\AA$".format(wvln_val),
                ha='right', va='top', rotation=90)
        fig.suptitle('')
        pl.draw()
예제 #9
0
    def callback(self, tmpfile):
        """
        TODO:
        """

        if tmpfile is None:
            logger.debug("Tempfile is None")
            return

        with open(tmpfile) as f:
            result = pickle.load(f)
        os.remove(tmpfile)

        logger.debug("Flushing {0} to output array...".format(result['index']))
        memmap = np.memmap(self.cache_file, mode='r+',
                           dtype=self.cache_dtype, shape=(len(self.w0),))
        if result['error_code'] != 0.:
            logger.error("Error code = {0}".format(result['error_code']))
            # error happened
            for key in memmap.dtype.names:
                if key in result:
                    memmap[key][result['index']] = result[key]

        else:
            # all is well
            for key in memmap.dtype.names:
                memmap[key][result['index']] = result[key]

        # flush to output array
        memmap.flush()
        logger.debug("...flushed, washing hands.")

        del result
        del memmap
예제 #10
0
def extent(data,wcs=None,lower=None,upper=None):
    """
        Get the axes extent.

        Parameters
        ----------
        data : (M,N) or (M,N,Z) numpy.ndarray or astropy.nddata.NDData or astropy.nddata.NDDataRef
            Astronomical data cube.
        wcs : astropy.wcs.wcs.WCS
            World Coordinate System to use.
        lower : (M,N) or (M,N,Z) tuple of int
            Start coordinate in data
        upper : (M,N) or (M,N,Z) tuple of int
            End coordinate in data

        Returns
        -------
        result: (M, N) tuple of astropy.units.quantity.Quantity
            Axes extent

    """
    #TODO: These can be a decorator
    if wcs is None:
        log.error("A world coordinate system (WCS) is needed")
        return None
    if lower==None:
        lower=np.zeros(data.ndim)
    if upper==None:
        upper=data.shape
    idx=[lower,upper]
    idx_f  = np.fliplr(idx)
    values = wcs.wcs_pix2world(idx_f, 0)
    values = np.fliplr(values)
    return (_unitize(values[0],wcs),_unitize(values[1],wcs))
예제 #11
0
def features(data,wcs=None,lower=None,upper=None):
    """
        Creates an array with WCS axea in features format

        Parameters
        ----------
        data : (M,N) or (M,N,Z) numpy.ndarray or astropy.nddata.NDData or astropy.nddata.NDDataRef
            Astronomical data cube.
        wcs : astropy.wcs.wcs.WCS
            World Coordinate System to use.
        lower : (M,N) or (M,N,Z) tuple of integers
            Start coordinate in data.
        upper : (M,N) or (M,N,Z) tuple of integers
            End coordinate in data.

        Returns
        -------
        result: astropy.table.Table
            Table with WCS information of a section from the data.

    """
    if wcs is None:
        log.error("A world coordinate system (WCS) is needed")
        return None
    ii=core.index_features(data,lower,upper)
    f=wcs.wcs_pix2world(ii.T,0)
    return _world_table_creator(f,wcs)
예제 #12
0
def get_clock_corr_vals(obsname, **kwargs):
    """
    get_clock_corr_vals(obsname, **kwargs)

    Return a tuple of numpy arrays of MJDs and clock
    corrections (in us) which can be used to interpolate
    a more exact clock correction for a TOA.  the kwargs are
    used if there are other things which determine the values
    (for example, backend specific corrections)
    
    # SUGGESTION(paulr): This docstring should specify exactly what is expected of
    # the clock correction files (i.e. the source and destination timescales.
    # Also, a routine should probably be provided to actually use the corrections, with
    # proper interpolation, instead of the current manual calculation that toa.py does
    """
    # The following works for simple linear interpolation
    # of normal TEMPO-style clock correction files
    # Find the 1-character tempo code, this is necessary for properly
    # reading the file.
    obs = read_observatories()
    site = next((x for x in obs[obsname].aliases if len(x) == 1), None)
    if site is None:
        log.error("No tempo site code for '%s', skipping clock corrections" % obsname)
        return (numpy.array([0.0, 100000.0]), numpy.array([0.0, 0.0]))
    filenm = os.path.join(os.environ["TEMPO"], "clock/time.dat")
    mjds, ccorr = load_tempo1_clock_file(filenm, site=site)
    return numpy.array(mjds), numpy.array(ccorr)
예제 #13
0
    def get_response(self):
        """Returns the flux profile in JSON format.

        Returns
        -------
        dict
        """
        result = {}
        if self.fluxes is None or len(self.fluxes) == 0:
            log.error('No suitable data found.')
            result['status'] = 'WARNING'
            result['msg'] = 'No suitable data found.'
        else:
            result['status'] = 'OK'
            result['graph'] = self.save_graph()
            result['flux'] = []
            for row in self.fluxes:
                newrow = []
                # Averaged profiles do not have a time field
                if row['time']:
                    newrow.append(str(row['time'])[0:16])
                newrow.extend(('{:.3f}'.format(row['solarlon']),
                              '{:.1f}'.format(row['teff']),
                              '{:.1f}'.format(row['eca']),
                              '{0}'.format(row['met']),
                              '{:.1f} &pm; {:.1f}'.format(row['flux'], row['e_flux']),
                              '{:.0f}'.format(row['zhr'])))
                result['flux'].append(newrow)
        return result
예제 #14
0
def test_log_to_list_origin2():

    with log.log_to_list(filter_origin='astropy.wcs') as log_list:
        log.error("Error message")
        log.warning("Warning message")

    assert len(log_list) == 0
예제 #15
0
def _moment(data,order,wcs=None,mask=None,unit=None,restfrq=None):
    if wcs is None:
        log.error("A world coordinate system (WCS) is needed")
        return None
    data=core.fix_mask(data,mask)
    dim=wcs.wcs.spec
    rdim=data.ndim - 1 - dim
    v=spectral_velocities(data,wcs,fqis=np.arange(data.shape[rdim]),restfrq=restfrq)
    v=v.value
    m0=data.sum(axis=rdim)
    if order==0:
        mywcs=wcs.dropaxis(dim)
        return NDDataRef(m0.data, uncertainty=None, mask=m0.mask,wcs=mywcs, meta=None, unit=unit)
    #mu,alpha=np.average(data,axis=rdim,weights=v,returned=True)
    mu,alpha=np.ma.average(data,axis=rdim,weights=v,returned=True)
    m1=alpha*mu/m0
    if order==1:
        mywcs=wcs.dropaxis(dim)
        return NDDataRef(m1.data, uncertainty=None, mask=m1.mask,wcs=mywcs, meta=None, unit=u.km/u.s)
    v2=v*v
    var,beta=np.ma.average(data,axis=rdim,weights=v2,returned=True)
    #var,beta=data.average(axis=rdim,weights=v2,returned=True)
    m2=np.sqrt(beta*var/m0 - m1*m1)
    if order==2:
        mywcs=wcs.dropaxis(dim)
        return NDDataRef(m2.data, uncertainty=None, mask=m2.mask,wcs=mywcs, meta=None, unit=u.km*u.km/u.s/u.s)
    log.error("Order not supported")
    return None
예제 #16
0
def get_clock_corr_vals(obsname, **kwargs):
    """
    get_clock_corr_vals(obsname, **kwargs)

    Return a tuple of numpy arrays of MJDs and clock
    corrections (in us) which can be used to interpolate
    a more exact clock correction for a TOA.  the kwargs are
    used if there are other things which determine the values
    (for example, backend specific corrections)
    """
    fileparts = {"GBT": "gbt",
                 "Arecibo": "ao",
                 "JVLA": "vla",
                 "Parkes": "pks",
                 "Nancay": "nancay",
                 "Effelsberg": "bonn",
                 "WSRT": "wsrt"}
    if obsname in fileparts.keys():
        filenm = os.path.join(os.environ["TEMPO"],
                              "clock/time_%s.dat" % \
                              fileparts[obsname])
    else:
        log.error("No clock correction valus for %s" % obsname)
        return (numpy.array([0.0, 100000.0]), numpy.array([0.0, 0.0]))
    # The following works for simple linear interpolation
    # of normal TEMPO-style clock correction files
    mjds, ccorr = numpy.loadtxt(filenm, skiprows=2,
                                usecols=(0, 2), unpack=True)
    return mjds, ccorr
예제 #17
0
def vphas_quicklook_main(args=None):
    import argparse

    parser = argparse.ArgumentParser(
        description='Create a beautiful color image from single VPHAS frames.')
    parser.add_argument('-o', '--output', metavar='filename',
                        type=str, default=None,
                        help='Filename for the output image (Default is a '
                             'JPG file named vphas-offset-ccd.jpg)')
    parser.add_argument('-c', '--ccd', nargs='+', type=int, default=None,
                        help='CCD number between 1 and 32.'
                             '(Default is to save all CCDs.)')
    parser.add_argument('--min_percent_r', type=float, default=1.0,
                        help=('The percentile value used to determine the '
                              'minimum cut level for the red channel'))
    parser.add_argument('--max_percent_r', type=float, default=99.5,
                        help=('The percentile value used to determine the '
                              'maximum cut level for the red channel'))
    parser.add_argument('offset', nargs='+',
                        help='Name of the VPHAS offset pointing.')
    args = parser.parse_args(args)

    if args.ccd is None:
        args.ccd = range(1, 33)
    for offset in args.offset:
        try:
            for ccd in args.ccd:
                vphas_quicklook(offset,
                                ccd=ccd,
                                out_fn=args.output)
        except NotObservedException as e:
            log.error(e)
예제 #18
0
def vel_stacking(data,data_slice,wcs=None,uncertainty=None, mask=None, meta=None, unit=None):
    """
    Create an image collapsing the frecuency axis

    Parameters
    ----------
    data : numpy.ndarray or astropy.nddata.NDData or astropy.nddata.NDDataRef
        Astronomical 2D image

    slice : slice object
        Sector to be collapsed

    Returns
    -------
    image (NDDataRef): 2D-Array with the stacked cube.

    """
    if len(data.shape) != 3:
        log.error("Cube needs to be a 3D array")
        raise ValueError("Cube needs to be a 3D array")
    dims = data.shape
    subcube = data[data_slice, :,:]
    stacked = np.sum(subcube,axis=0)
    if wcs:
        wcs = wcs.dropaxis(2)

        return NDDataRef(stacked, uncertainty=uncertainty, mask=mask,wcs=wcs, meta=meta, unit=unit)
    else:
        return stacked
예제 #19
0
def load_event_TOAs(eventname, mission, weights=None):
    '''
    Read photon event times out of a FITS file as PINT TOA objects.

    Correctly handles raw event files, or ones processed with axBary to have
    barycentered  TOAs. Different conditions may apply to different missions.
    
    Parameters
    ----------
    eventname : str
        File name of the FITS event list
    mission : str
        Name of the mission (e.g. RXTE, XMM)
    weights : array or None
        The array has to be of the same size as the event list. Overwrites 
        possible weight lists from mission-specific FITS files
    
    Returns
    -------
    toalist : list of TOA objects
    '''
    # Load photon times from event file
    hdulist = pyfits.open(eventname)

    extension = mission_config[mission]["fits_extension"]

    if hdulist[1].name not in extension.split(','):
        raise RuntimeError('First table in FITS file' +
                           'must be {}. Found {}'.format(extension,
                                                         hdulist[1].name))

    timesys, timeref = _get_timesys_and_timeref(hdulist[1])

    if not mission_config[mission]['allow_local'] \
            and timesys != 'TDB':
        log.error('Raw spacecraft TOAs not yet supported for ' + mission)

    obs, scale = _default_obs_and_scale(mission, timesys, timeref)

    # Read time column from FITS file
    mjds = read_fits_event_mjds_tuples(hdulist[1])

    new_kwargs = _get_columns_from_fits(hdulist[1],
                                        mission_config[mission]["fits_columns"])

    hdulist.close()

    if weights is not None:
        new_kwargs["weights"] = weights

    toalist = [None] * len(mjds)
    kw = {}
    for i in range(len(mjds)):
        # Create TOA list
        for key in new_kwargs.keys():
            kw[key] = new_kwargs[key][i]
        toalist[i] = toa.TOA(mjds[i], obs=obs, scale=scale, **kw)

    return toalist
예제 #20
0
def HDU_to_NDData(hdu):
   """
    Create an N-dimensional dataset from an HDU component.

    Parameters
    ----------
    hdu : HDU object
        HDU to transform into an N-dimensional dataset.

    Returns
    -------
    result: astropy.nddata.NDDataRef with data from the HDU object.
   """
   hdu.verify("fix")
   data=hdu.data
   meta=hdu.header
   mask=np.isnan(data)
   # Hack to correct wrong uppercased units generated by CASA
   try:
     bscale=meta['BSCALE']
   except KeyError:
     bscale=1.0
   try:
     bzero=meta['BZERO']
   except KeyError:
     bzero=0.0
   try:
     bsu=meta['BUNIT']
     bsu=bsu.lower()
     bsu=bsu.replace("jy","Jy")
     bunit=u.Unit(bsu,format="fits")
   except KeyError:
     bunit=u.Unit("u.Jy/u.beam")
   rem_list=[]
   for i in meta.items():
      if i[0].startswith('PC00'):
         rem_list.append(i[0])
   for e in rem_list:
      meta.remove(e)

   mywcs=wcs.WCS(meta)
   # Create astropy units
   if len(data.shape) == 4:
       # Put data in physically-meaninful values, and remove stokes
       # TODO: Stokes is removed by summing (is this correct? maybe is averaging?)
       log.info("4D data detected: assuming RA-DEC-FREQ-STOKES (like CASA-generated ones), and dropping STOKES")
       data=data.sum(axis=0)*bscale+bzero
       mask = np.logical_and.reduce(mask,axis=0)
       mywcs=mywcs.dropaxis(3)
   elif len(data.shape) == 3:
       log.info("3D data detected: assuming RA-DEC-FREQ")
       data=data*bscale+bzero
   elif len(data.shape) == 2:
       log.info("2D data detected: assuming RA-DEC")
       data=data*bscale+bzero
   else:
       log.error("Only 3D data allowed (or 4D in case of polarization)")
       raise TypeError
   return ndd.NDDataRef(data, uncertainty=None, mask=mask,wcs=mywcs, meta=meta, unit=bunit)
def syscall(cmd):
    """Calls a system command and returns its status."""
    log.info(cmd)
    status = os.system(cmd)
    if status != 0:
        log.error("System call returned {}\n"
                  "command was: {}".format(status, cmd))
    return status
예제 #22
0
    def __init__(self, MJD, # required
                 error=0.0, obs='Barycenter', freq=float("inf"),
                 scale=None, 
                 **kwargs):  # keyword args that are completely optional
        r"""
        Construct a TOA object

        Parameters
        ----------
        MJD : astropy Time, float, or tuple of floats
            The time of the TOA, which can be expressed as an astropy Time,
            a floating point MJD (64 or 128 bit precision), or a tuple
            of (MJD1,MJD2) whose sum is the full precision MJD (usually the
            integer and fractional part of the MJD)
        obs : string
            The observatory code for the TOA
        freq : float or astropy Quantity
            Frequency corresponding to the TOA.  Either a Quantity with frequency
            units, or a number for which MHz is assumed.
        scale : string
            Time scale for the TOA time.  Defaults to the timescale appropriate
            to the site, but can be overridden

        Notes
        -----
        It is VERY important that all astropy.Time() objects are created
        with precision=9. This is ensured in the code and is checked for any
        Time object passed to the TOA constructor.

        """
        site = Observatory.get(obs)
        if numpy.isscalar(MJD):
            arg1, arg2 = MJD, None
        else:
            arg1, arg2 = MJD[0], MJD[1]
        if scale is None:
            scale = site.timescale
        self.mjd = time.Time(arg1, arg2, scale=scale,
                location=site.earth_location,
                format='pulsar_mjd', precision=9)

        if hasattr(error,'unit'):
            self.error = error
        else:
            self.error = error * u.microsecond
        self.obs = site.name
        if hasattr(freq,'unit'):
            try:
                junk = freq.to(u.MHz)
            except UnitConversionError:
                log.error("Frequency for TOA with incompatible unit {0}".format(freq))
            self.freq = freq
        else:
            self.freq = freq * u.MHz
        if self.freq == 0.0*u.MHz:
            self.freq = numpy.inf*u.MHz
        self.flags = kwargs
예제 #23
0
    def __init__(self, toafile=None, toalist=None, toaTable=None, usepickle=True):
        # First, just make an empty container
        self.toas = []
        self.commands = []
        self.observatories = set()
        self.filename = None
        if toaTable is not None:
            self.table = toaTable
            self.ntoas = len(self.table)
            self.first_MJD = self.table['mjd'].min()
            self.last_MJD = self.table['mjd'].max()
            self.source = 'table'

        if (toalist is not None) and (toafile is not None):
            log.error('Can not initialize TOAs from both file and list')
        if toafile is not None:
            # FIXME: work with file-like objects as well

            if type(toafile) in [tuple, list]:
                self.filename = None
                for infile in toafile:
                    self.read_toa_file(infile, usepickle=usepickle)
            else:
                pth, ext = os.path.splitext(toafile)
                if ext == ".pickle":
                    toafile = pth
                elif ext == ".gz":
                    pth0, ext0 = os.path.splitext(pth)
                    if ext0 == ".pickle":
                        toafile = pth0
                self.read_toa_file(toafile, usepickle=usepickle)
                self.filename = toafile

        if toalist is not None:
            if not isinstance(toalist,(list,tuple)):
                log.error('Trying to initialize from a non-list class')
            self.toas = toalist
            self.ntoas = len(toalist)
            self.commands = []
            self.filename = None
            self.observatories.update([t.obs for t in toalist])


        if not hasattr(self, 'table'):
            mjds = self.get_mjds(high_precision=True)
            self.first_MJD = mjds.min()
            self.last_MJD = mjds.max()
            # The table is grouped by observatory
            self.table = table.Table([numpy.arange(self.ntoas), mjds,
                                      self.get_errors(), self.get_freqs(),
                                      self.get_obss(), self.get_flags()],
                                      names=("index", "mjd", "error", "freq",
                                              "obs", "flags"),
                                      meta = {'filename':self.filename}).group_by("obs")

        # We don't need this now that we have a table
        del(self.toas)
예제 #24
0
파일: db.py 프로젝트: barentsen/meteor-flux
 def query(self, sql, arguments=()):
     try:
         self.cur.execute(sql, arguments)
         return self.cur.fetchall()
     except psycopg2.ProgrammingError as e:
         log.error('Query failed [{0}] with error message[{1}]'.format(
                             self.cur.query, e))
         self.rollback()
         return None
예제 #25
0
파일: toa.py 프로젝트: vhaasteren/PINT
    def apply_clock_corrections(self):
        """Apply observatory clock corrections and TIME statments.

        Apply clock corrections to all the TOAs where corrections are
        available.  This routine actually changes the value of the TOA,
        although the correction is also listed as a new flag for the TOA
        called 'clkcorr' so that it can be reversed if necessary.  This
        routine also applies all 'TIME' commands and treats them exactly
        as if they were a part of the observatory clock corrections.

        # SUGGESTION(paulr): Somewhere in this docstring, or in a higher level
        # documentation, the assumptions about the timescales should be specified.
        # The docstring says apply "correction" but does not say what it is correcting.
        # Be more specific.


        """
        # First make sure that we haven't already applied clock corrections
        flags = self.table['flags']
        if any([f.has_key('clkcorr') for f in flags]):
            log.warn("Some TOAs have 'clkcorr' flag.  Not applying new clock corrections.")
            return
        # An array of all the time corrections, one for each TOA
        corr = numpy.zeros(self.ntoas) * u.s
        times = self.table['mjd']
        for ii, key in enumerate(self.table.groups.keys):
            grp = self.table.groups[ii]
            obs = self.table.groups.keys[ii]['obs']
            loind, hiind = self.table.groups.indices[ii:ii+2]
            # First apply any TIME statements
            for jj in range(loind, hiind):
                if flags[jj].has_key('time'):
                    # TIME commands are in sec
                    # SUGGESTION(paulr): These time correction units should
                    # be applied in the parser, not here. In the table the time
                    # correction should have units.
                    corr[jj] = flags[jj]['time'] * u.s
                    times[jj] += time.TimeDelta(corr[jj])
            # These are observatory clock corrections.  Do in groups.
            if (key['obs'] in observatories and key['obs'] != "Geocenter"):
                mjds, ccorr = obsmod.get_clock_corr_vals(key['obs'])
                tvals = numpy.array([t.mjd for t in grp['mjd']])
                if numpy.any((tvals < mjds[0]) | (tvals > mjds[-1])):
                    # FIXME: check the user sees this! should it be an exception?
                    log.error(
                        "Some TOAs are not covered by the %s clock correction"%key['obs']
                        +" file, treating clock corrections as constant"
                        +" past the ends.")
                gcorr = numpy.interp(tvals, mjds, ccorr) * u.us
                for jj, cc in enumerate(gcorr):
                    grp['mjd'][jj] += time.TimeDelta(cc)
                corr[loind:hiind] += gcorr
            # Now update the flags with the clock correction used
            for jj in range(loind, hiind):
                if corr[jj]:
                    flags[jj]['clkcorr'] = corr[jj]
예제 #26
0
def loadFITS_PrimaryOnly(fitsfile):
    hdulist = fits.open(fitsfile, lazy_load_hdus=True)
    log.info('Processing PrimaryHDU Object 0')
    hduobject = hdulist[0]
    if hduobject is None:
        log.error('FITS PrimaryHDU is None')
        raise ValueError('FITS PrimaryHDU is None')
    result = HDU_to_NDData(hduobject)
    hdulist.close()
    return result
예제 #27
0
def test_log_to_file(tmpdir, level):

    local_path = tmpdir.join('test.log')
    log_file = local_path.open('wb')
    log_path = str(local_path.realpath())
    orig_level = log.level

    try:
        if level is not None:
            log.setLevel(level)

        with log.log_to_file(log_path):
            log.error("Error message")
            log.warning("Warning message")
            log.info("Information message")
            log.debug("Debug message")

        log_file.close()
    finally:
        log.setLevel(orig_level)

    log_file = local_path.open('rb')
    log_entries = log_file.readlines()
    log_file.close()

    if level is None:
        # The log level *should* be set to whatever it was in the config
        level = conf.log_level

    # Check list length
    if level == 'DEBUG':
        assert len(log_entries) == 4
    elif level == 'INFO':
        assert len(log_entries) == 3
    elif level == 'WARN':
        assert len(log_entries) == 2
    elif level == 'ERROR':
        assert len(log_entries) == 1

    # Check list content

    assert eval(log_entries[0].strip())[-3:] == (
        'astropy.tests.test_logger', 'ERROR', 'Error message')

    if len(log_entries) >= 2:
        assert eval(log_entries[1].strip())[-3:] == (
            'astropy.tests.test_logger', 'WARNING', 'Warning message')

    if len(log_entries) >= 3:
        assert eval(log_entries[2].strip())[-3:] == (
            'astropy.tests.test_logger', 'INFO', 'Information message')

    if len(log_entries) >= 4:
        assert eval(log_entries[3].strip())[-3:] == (
            'astropy.tests.test_logger', 'DEBUG', 'Debug message')
예제 #28
0
파일: nicer_obs.py 프로젝트: demorest/PINT
def load_FPorbit(orbit_filename):
    '''Load data from an (RXTE or NICER) FPorbit file

        Reads a FPorbit FITS file

        Parameters
        ----------
        orbit_filename : str
            Name of file to load

        Returns
        -------
        astropy Table containing Time, x, y, z, v_x, v_y, v_z data

    '''
    # Load photon times from FT1 file
    hdulist = pyfits.open(orbit_filename)
    #log.info('orb file HDU name is {0}'.format(hdulist[1].name))
    if hdulist[1].name != 'ORBIT':
        log.error('NICER orb file first extension is {0}. It should be ORBIT'.format(hdulist[1].name))
    FPorbit_hdr=hdulist[1].header
    FPorbit_dat=hdulist[1].data

    log.info('Opened FPorbit FITS file {0}'.format(orbit_filename))
    # TIMESYS should be 'TT'

    # TIMEREF should be 'LOCAL', since no delays are applied

    timesys = FPorbit_hdr['TIMESYS']
    log.debug("FPorbit TIMESYS {0}".format(timesys))
    timeref = FPorbit_hdr['TIMEREF']
    log.debug("FPorbit TIMEREF {0}".format(timeref))

    mjds_TT = read_fits_event_mjds(hdulist[1])
    mjds_TT = mjds_TT*u.d
    log.debug("FPorbit spacing is {0}".format((mjds_TT[1]-mjds_TT[0]).to(u.s)))
    X = FPorbit_dat.field('X')*u.m
    Y = FPorbit_dat.field('Y')*u.m
    Z = FPorbit_dat.field('Z')*u.m
    Vx = FPorbit_dat.field('Vx')*u.m/u.s
    Vy = FPorbit_dat.field('Vy')*u.m/u.s
    Vz = FPorbit_dat.field('Vz')*u.m/u.s
    log.info('Building FPorbit table covering MJDs {0} to {1}'.format(mjds_TT.min(), mjds_TT.max()))
    FPorbit_table = Table([mjds_TT, X, Y, Z, Vx, Vy, Vz],
            names = ('MJD_TT', 'X', 'Y', 'Z', 'Vx', 'Vy', 'Vz'),
            meta = {'name':'FPorbit'} )
    # Make sure table is sorted by time
    log.debug('Sorting FPorbit table')
    FPorbit_table.sort('MJD_TT')
    # Now delete any bad entries where the positions are 0.0
    idx = np.where(np.logical_and(FPorbit_table['X'] != 0.0, FPorbit_table['Y'] != 0.0))[0]
    if (len(idx) != len(FPorbit_table)):
        log.warning('Dropping {0} zero entries from FPorbit table'.format(len(FPorbit_table)-len(idx)))
        FPorbit_table = FPorbit_table[idx]
    return FPorbit_table
예제 #29
0
def load_eventfiles(infile, tcoords=None, minweight=0, minMJD=0, maxMJD=100000):
    '''
    Load events from multiple sources
    The format of each line of infile is:
    <eventfile> <log_likelihood function> <template> [flags]
    Allowed flags are:
        -setweights - A multiplicative weight to apply to the 
            probability function for this eventfile
        -usepickle - Load from a pickle file
        -weightcol - The weight column in the fits file
    '''
    lines = open(infile, 'r').read().split('\n')
    eventinfo = {}
    eventinfo['toas'] = []
    eventinfo['lnlikes'] = []
    eventinfo['templates'] = []
    eventinfo['weightcol'] = []
    eventinfo['setweights'] = []

    for line in lines:
        log.info('%s' % line)
        if len(line) == 0:
            continue
        try:
            words = line.split()

            if len(words) > 3:
                kvs = words[3:]
                flags = {}
                for i in range(0, len(flags), 2):
                    k, v = kvs[i].lstrip('-'), kvs[i+1]
                    flags[k] = v
            else:
                flags = {}

            ts = get_toas(words[0], flags, tcoords=tcoords, minweight=minweight, 
                          minMJD=minMJD, maxMJD=maxMJD)
            eventinfo['toas'].append(ts)
            log.info('%s has %d events' % (words[0], len(ts.table)))
            eventinfo['lnlikes'].append(words[1])
            eventinfo['templates'].append(words[2])
            if 'setweights' in flags:
                eventinfo['setweights'].append(float(flags['setweights']))
            else:
                eventinfo['setweights'].append(1.0)
            if 'weightcol' in flags:
                eventinfo['weightcol'].append(flags['weightcol'])
            else:
                eventinfo['weightcol'].append(None)
        except Exception as e:
            log.error('%s' % str(e))
            log.error('Could not load %s' % line)

    return eventinfo
예제 #30
0
def format_toa_line(toatime, toaerr, freq, dm=0.0, obs='@', name='unk', flags={},
    format='Princeton'):
    """
    Format TOA line for writing

    Bugs
    ----
    This implementation is currently incomplete in that it will not
    undo things like TIME statements and probably other things.

    Princeton format
    ----------------
    columns  item
    1-1     Observatory (one-character code) '@' is barycenter
    2-2     must be blank
    16-24   Observing frequency (MHz)
    25-44   TOA (decimal point must be in column 30 or column 31)
    45-53   TOA uncertainty (microseconds)
    69-78   DM correction (pc cm^-3)

    Tempo2 format
    -------------
    First line of file should be "FORMAT 1"
    TOA format is "file freq sat satErr siteID <flags>"

    Returns
    -------
    out : string
        Formatted TOA line
    """
    toa = "{0:19.13f}".format(toatime.mjd)
    if format.upper() in ('TEMPO2','1'):
        # In Tempo2 format, freq=0.0 means infinite frequency
        if freq == numpy.inf:
            freq = 0.0
        flagstring = ''
        if dm != 0.0:
            flagstring += "-dm %.5f" % (dm,)
        # Here I need to append any actual flags
        out = "%s %f %s %.2f %s %s\n" % (name,freq,toa,toaerr,obs,flagstring)
    elif fomat.upper() in  ('PRINCETON','TEMPO'): # TEMPO/Princeton format
        # In TEMPO/Princeton format, freq=0.0 means infinite frequency
        if freq == numpy.inf:
            freq = 0.0
        if dm!=0.0:
            out = obs+" %13s %8.3f %s %8.2f              %9.4f\n" % \
                (name, freq, toa, toaerr, dm)
        else:
            out = obs+" %13s %8.3f %s %8.2f\n" % (name, freq, toa, toaerr)
    else:
        log.error('Unknown TOA format ({0})'.format(format))
        # Should this raise an exception here? -- paulr

    return out
예제 #31
0
파일: core.py 프로젝트: nstarman/astroquery
    def stage_data(self, uids, expand_tarfiles=False, return_json=False):
        """
        Obtain table of ALMA files

        Parameters
        ----------
        uids : list or str
            A list of valid UIDs or a single UID.
            UIDs should have the form: 'uid://A002/X391d0b/X7b'
        expand_tarfiles : bool
            Expand the tarfiles to obtain lists of all contained files.  If
            this is specified, the parent tarfile will *not* be included
        return_json : bool
            Return a list of the JSON data sets returned from the query.  This
            is primarily intended as a debug routine, but may be useful if there
            are unusual scheduling block layouts.

        Returns
        -------
        data_file_table : Table
            A table containing 3 columns: the UID, the file URL (for future
            downloading), and the file size
        """

        dataarchive_url = self._get_dataarchive_url()

        # allow for the uid to be specified as single entry
        if isinstance(uids, str):
            uids = [uids]

        tables = []
        for uu in uids:
            log.debug("Retrieving metadata for {0}".format(uu))
            uid = clean_uid(uu)
            req = self._request('GET', '{dataarchive_url}/rh/data/expand/{uid}'
                                .format(dataarchive_url=dataarchive_url,
                                        uid=uid),
                                cache=False)
            req.raise_for_status()
            try:
                jdata = req.json()
            # Note this exception does not work in Python 2.7
            except json.JSONDecodeError:
                if 'Central Authentication Service' in req.text or 'recentRequests' in req.url:
                    # this indicates a wrong server is being used;
                    # the "pre-feb2020" stager will be phased out
                    # when the new services are deployed
                    raise RemoteServiceError("Failed query!  This shouldn't happen - please "
                                             "report the issue as it may indicate a change in "
                                             "the ALMA servers.")
                else:
                    raise

            if return_json:
                tables.append(jdata)
            else:
                if jdata['type'] != 'PROJECT':
                    log.error("Skipped uid {uu} because it is not a project and"
                              "lacks the appropriate metadata; it is a "
                              "{jdata}".format(uu=uu, jdata=jdata['type']))
                    continue
                if expand_tarfiles:
                    table = uid_json_to_table(jdata, productlist=['ASDM',
                                                                  'PIPELINE_PRODUCT'])
                else:
                    table = uid_json_to_table(jdata,
                                              productlist=['ASDM',
                                                           'PIPELINE_PRODUCT'
                                                           'PIPELINE_PRODUCT_TARFILE',
                                                           'PIPELINE_AUXILIARY_TARFILE'])
                table['sizeInBytes'].unit = u.B
                table.rename_column('sizeInBytes', 'size')
                table.add_column(Column(data=['{dataarchive_url}/dataPortal/{name}'
                                              .format(dataarchive_url=dataarchive_url,
                                                      name=name)
                                              for name in table['name']],
                                        name='URL'))

                isp = self.is_proprietary(uid)
                table.add_column(Column(data=[isp for row in table],
                                        name='isProprietary'))

                tables.append(table)
                log.debug("Completed metadata retrieval for {0}".format(uu))

        if len(tables) == 0:
            raise ValueError("No valid UIDs supplied.")

        if return_json:
            return tables

        table = table_vstack(tables)

        return table
예제 #32
0
parser.add_argument("--dec", help="DEC in DD:MM:SS.s", default="00:00:00")

parser.add_argument("--observer",
                    help="Name of person analyzing data",
                    default=None)
args = parser.parse_args()
if args.observer is None:
    import getpass
    args.observer = getpass.getuser()

# Grab base filename for output
base = os.path.splitext(os.path.basename(args.evfile))[0]

etable = Table.read(args.evfile, hdu=1)
if etable.meta['TIMESYS'] != 'TDB':
    log.error('Event file must be barycentered!')
    sys.exit(1)
gtitable = Table.read(args.evfile, hdu=2)

epoch_met = gtitable['START'][0]
# WARNING: This loses precision! Should be done with astropy times
# Should make utility routine to convert FITS TIME column to astropy times properly
epoch_mjd = (etable.meta['MJDREFI'] + etable.meta['MJDREFF'] +
             etable.meta['TIMEZERO'] + epoch_met / 86400.0)

# Write event times to bin file
eventtimes = np.array(etable['TIME'], dtype=np.float) - epoch_met
log.info('Event times: {0} to {1}'.format(eventtimes.min(), eventtimes.max()))
eventtimes.tofile('{0}.events'.format(base))
#
nbins = choose_N((gtitable['STOP'][-1] - epoch_met) / args.dt)
예제 #33
0
import pint.toa as toa

# import matplotlib
# matplotlib.use('TKAgg')
import matplotlib.pyplot as plt

import astropy.units as u
from astropy import log
from pint.residuals import Residuals as resids
import os

try:
    import tempo2_utils
except ImportError:
    log.error(
        "This example requires tempo_utils, download from: https://github.com/demorest/tempo_utils and 'pip install .'"
    )
    raise

# Using Nanograv data J0623-0200
datadir = "../../tests/datafile"
parfile = os.path.join(datadir, "J0613-0200_NANOGrav_dfg+12_TAI_FB90.par")
timfile = os.path.join(datadir, "J0613-0200_NANOGrav_dfg+12.tim")

# libstempo calculation
print("tempo2 calculation")
tempo2_vals = tempo2_utils.general2(parfile, timfile, ["pre"])
# Build PINT model
print("PINT calculation")
m = mb.get_model(parfile)
# Get toas to pint
예제 #34
0
파일: pprint.py 프로젝트: zupeiza/astropy
    def _more_tabcol(self, tabcol, max_lines=None, max_width=None,
                     show_name=True, show_unit=None, show_dtype=False):
        """Interactive "more" of a table or column.

        Parameters
        ----------
        max_lines : int or None
            Maximum number of rows to output

        max_width : int or None
            Maximum character width of output

        show_name : bool
            Include a header row for column names. Default is True.

        show_unit : bool
            Include a header row for unit.  Default is to show a row
            for units only if one or more columns has a defined value
            for the unit.

        show_dtype : bool
            Include a header row for column dtypes. Default is False.
        """
        allowed_keys = 'f br<>qhpn'

        # Count the header lines
        n_header = 0
        if show_name:
            n_header += 1
        if show_unit:
            n_header += 1
        if show_dtype:
            n_header += 1
        if show_name or show_unit or show_dtype:
            n_header += 1

        # Set up kwargs for pformat call.  Only Table gets max_width.
        kwargs = dict(max_lines=-1, show_name=show_name, show_unit=show_unit,
                      show_dtype=show_dtype)
        if hasattr(tabcol, 'columns'):  # tabcol is a table
            kwargs['max_width'] = max_width

        # If max_lines is None (=> query screen size) then increase by 2.
        # This is because get_pprint_size leaves 6 extra lines so that in
        # ipython you normally see the last input line.
        max_lines1, max_width = self._get_pprint_size(max_lines, max_width)
        if max_lines is None:
            max_lines1 += 2
        delta_lines = max_lines1 - n_header

        # Set up a function to get a single character on any platform
        inkey = Getch()

        i0 = 0  # First table/column row to show
        showlines = True
        while True:
            i1 = i0 + delta_lines  # Last table/col row to show
            if showlines:  # Don't always show the table (e.g. after help)
                try:
                    os.system('cls' if os.name == 'nt' else 'clear')
                except Exception:
                    pass  # No worries if clear screen call fails
                lines = tabcol[i0:i1].pformat(**kwargs)
                colors = ('red' if i < n_header else 'default'
                          for i in range(len(lines)))
                for color, line in zip(colors, lines):
                    color_print(line, color)
            showlines = True
            print()
            print("-- f, <space>, b, r, p, n, <, >, q h (help) --", end=' ')
            # Get a valid key
            while True:
                try:
                    key = inkey().lower()
                except Exception:
                    print("\n")
                    log.error('Console does not support getting a character'
                              ' as required by more().  Use pprint() instead.')
                    return
                if key in allowed_keys:
                    break
            print(key)

            if key.lower() == 'q':
                break
            elif key == ' ' or key == 'f':
                i0 += delta_lines
            elif key == 'b':
                i0 = i0 - delta_lines
            elif key == 'r':
                pass
            elif key == '<':
                i0 = 0
            elif key == '>':
                i0 = len(tabcol)
            elif key == 'p':
                i0 -= 1
            elif key == 'n':
                i0 += 1
            elif key == 'h':
                showlines = False
                print("""
    Browsing keys:
       f, <space> : forward one page
       b : back one page
       r : refresh same page
       n : next row
       p : previous row
       < : go to beginning
       > : go to end
       q : quit browsing
       h : print this help""", end=' ')
            if i0 < 0:
                i0 = 0
            if i0 >= len(tabcol) - delta_lines:
                i0 = len(tabcol) - delta_lines
            print("\n")
예제 #35
0
파일: core.py 프로젝트: tgellis/astroquery
 def api_key(self):
     """ Return the Astrometry.net API key. """
     if not conf.api_key:
         log.error("Astrometry.net API key not in configuration file")
     return conf.api_key
                            os.mkdir(
                                f"{basepath}/{field}/B{band}/comparisons/")
                        if not os.path.exists(
                                f"{sharepath}/comparison_images/"):
                            os.mkdir(f"{sharepath}/comparison_images/")
                        fig.savefig(
                            f"{basepath}/{field}/B{band}/comparisons/{field}_B{band}_{config}{bsens}_selfcal{last_selfcal}_comparison.png",
                            bbox_inches='tight')
                        shutil.copy(
                            f"{basepath}/{field}/B{band}/comparisons/{field}_B{band}_{config}{bsens}_selfcal{last_selfcal}_comparison.png",
                            f"{sharepath}/comparison_images/")
                    except IndexError:
                        raise
                    except Exception as ex:
                        log.error(
                            f"Failure for pre={preselfcal_name} post={postselfcal_name}"
                        )
                        log.error((field, band, config, imtype, ex))
                        raise ex
                        #continue

                    matchrow = (
                        (tbl['region'] == field) & (tbl['band'] == f'B{band}')
                        & (tbl['array']
                           == ('12Monly' if config == '12M' else config)) &
                        (tbl['robust'] == 'r0.0') &
                        (tbl['bsens'] if 'bsens' in imtype else ~tbl['bsens'])
                        &
                        (tbl['pbcor'] if 'pbcor' in suffix else ~tbl['pbcor']))
                    if matchrow.sum() == 0:
                        raise ValueError(
예제 #37
0
def main(argv=None):

    parser = argparse.ArgumentParser(
        description=
        "PINT tool for MCMC optimization of timing models using event data.")

    parser.add_argument("eventfile", help="event file to use")
    parser.add_argument("parfile", help="par file to read model from")
    parser.add_argument("gaussianfile",
                        help="gaussian file that defines template")
    parser.add_argument("--ft2", help="Path to FT2 file.", default=None)
    parser.add_argument(
        "--weightcol",
        help="name of weight column (or 'CALC' to have them computed",
        default=None)
    parser.add_argument("--nwalkers",
                        help="Number of MCMC walkers (def 200)",
                        type=int,
                        default=200)
    parser.add_argument("--burnin",
                        help="Number of MCMC steps for burn in (def 100)",
                        type=int,
                        default=100)
    parser.add_argument("--nsteps",
                        help="Number of MCMC steps to compute (def 1000)",
                        type=int,
                        default=1000)
    parser.add_argument("--minMJD",
                        help="Earliest MJD to use (def 54680)",
                        type=float,
                        default=54680.0)
    parser.add_argument("--maxMJD",
                        help="Latest MJD to use (def 57250)",
                        type=float,
                        default=57250.0)
    parser.add_argument("--phs",
                        help="Starting phase offset [0-1] (def is to measure)",
                        type=float)
    parser.add_argument("--phserr",
                        help="Error on starting phase",
                        type=float,
                        default=0.03)
    parser.add_argument("--minWeight",
                        help="Minimum weight to include (def 0.05)",
                        type=float,
                        default=0.05)
    parser.add_argument(
        "--wgtexp",
        help=
        "Raise computed weights to this power (or 0.0 to disable any rescaling of weights)",
        type=float,
        default=0.0)
    parser.add_argument("--testWeights",
                        help="Make plots to evalute weight cuts?",
                        default=False,
                        action="store_true")
    parser.add_argument("--doOpt",
                        help="Run initial scipy opt before MCMC?",
                        default=False,
                        action="store_true")
    parser.add_argument(
        "--initerrfact",
        help=
        "Multiply par file errors by this factor when initializing walker starting values",
        type=float,
        default=0.1)
    parser.add_argument(
        "--priorerrfact",
        help=
        "Multiple par file errors by this factor when setting gaussian prior widths",
        type=float,
        default=10.0)
    parser.add_argument("--usepickle",
                        help="Read events from pickle file, if available?",
                        default=False,
                        action="store_true")

    global nwalkers, nsteps, ftr

    args = parser.parse_args(argv)

    eventfile = args.eventfile
    parfile = args.parfile
    gaussianfile = args.gaussianfile
    weightcol = args.weightcol

    if args.ft2 is not None:
        # Instantiate FermiObs once so it gets added to the observatory registry
        FermiObs(name='Fermi', ft2name=args.ft2)

    nwalkers = args.nwalkers
    burnin = args.burnin
    nsteps = args.nsteps
    if burnin >= nsteps:
        log.error('burnin must be < nsteps')
        sys.exit(1)
    nbins = 256  # For likelihood calculation based on gaussians file
    outprof_nbins = 256  # in the text file, for pygaussfit.py, for instance
    minMJD = args.minMJD
    maxMJD = args.maxMJD  # Usually set by coverage of IERS file

    minWeight = args.minWeight
    do_opt_first = args.doOpt
    wgtexp = args.wgtexp

    # Read in initial model
    modelin = pint.models.get_model(parfile)

    # The custom_timing version below is to manually construct the TimingModel
    # class, which allows it to be pickled. This is needed for parallelizing
    # the emcee call over a number of threads.  So far, it isn't quite working
    # so it is disabled.  The code above constructs the TimingModel class
    # dynamically, as usual.
    #modelin = custom_timing(parfile)

    # Remove the dispersion delay as it is unnecessary
    #modelin.delay_funcs['L1'].remove(modelin.dispersion_delay)
    # Set the target coords for automatic weighting if necessary
    if 'ELONG' in modelin.params:
        tc = SkyCoord(modelin.ELONG.quantity,
                      modelin.ELAT.quantity,
                      frame='barycentrictrueecliptic')
    else:
        tc = SkyCoord(modelin.RAJ.quantity,
                      modelin.DECJ.quantity,
                      frame='icrs')

    target = tc if weightcol == 'CALC' else None

    # TODO: make this properly handle long double
    if not args.usepickle or (not (os.path.isfile(eventfile + ".pickle") or
                                   os.path.isfile(eventfile + ".pickle.gz"))):
        # Read event file and return list of TOA objects
        tl = fermi.load_Fermi_TOAs(eventfile,
                                   weightcolumn=weightcol,
                                   targetcoord=target,
                                   minweight=minWeight)
        # Limit the TOAs to ones in selected MJD range and above minWeight
        tl = [
            tl[ii] for ii in range(len(tl))
            if (tl[ii].mjd.value > minMJD and tl[ii].mjd.value < maxMJD and (
                weightcol is None or tl[ii].flags['weight'] > minWeight))
        ]
        print("There are %d events we will use" % len(tl))
        # Now convert to TOAs object and compute TDBs and posvels
        ts = toa.TOAs(toalist=tl)
        ts.filename = eventfile
        ts.compute_TDBs()
        ts.compute_posvels(ephem="DE421", planets=False)
        ts.pickle()
    else:  # read the events in as a pickle file
        picklefile = toa._check_pickle(eventfile)
        if not picklefile:
            picklefile = eventfile
        ts = toa.TOAs(picklefile)

    if weightcol is not None:
        if weightcol == 'CALC':
            weights = np.asarray([x['weight'] for x in ts.table['flags']])
            print("Original weights have min / max weights %.3f / %.3f" % \
                (weights.min(), weights.max()))
            # Rescale the weights, if requested (by having wgtexp != 0.0)
            if wgtexp != 0.0:
                weights **= wgtexp
                wmx, wmn = weights.max(), weights.min()
                # make the highest weight = 1, but keep min weight the same
                weights = wmn + ((weights - wmn) * (1.0 - wmn) / (wmx - wmn))
            for ii, x in enumerate(ts.table['flags']):
                x['weight'] = weights[ii]
        weights = np.asarray([x['weight'] for x in ts.table['flags']])
        print("There are %d events, with min / max weights %.3f / %.3f" % \
            (len(weights), weights.min(), weights.max()))
    else:
        weights = None
        print("There are %d events, no weights are being used." % ts.ntoas)

    # Now load in the gaussian template and normalize it
    gtemplate = read_gaussfitfile(gaussianfile, nbins)
    gtemplate /= gtemplate.mean()

    # Set the priors on the parameters in the model, before
    # instantiating the emcee_fitter
    # Currently, this adds a gaussian prior on each parameter
    # with width equal to the par file uncertainty * priorerrfact,
    # and then puts in some special cases.
    # *** This should be replaced/supplemented with a way to specify
    # more general priors on parameters that need certain bounds
    phs = 0.0 if args.phs is None else args.phs
    fitkeys, fitvals, fiterrs = get_fit_keyvals(modelin,
                                                phs=phs,
                                                phserr=args.phserr)

    for key, v, e in zip(fitkeys[:-1], fitvals[:-1], fiterrs[:-1]):
        if key == 'SINI' or key == 'E' or key == 'ECC':
            getattr(modelin, key).prior = Prior(uniform(0.0, 1.0))
        elif key == 'PX':
            getattr(modelin, key).prior = Prior(uniform(0.0, 10.0))
        elif key.startswith('GLPH'):
            getattr(modelin, key).prior = Prior(uniform(-0.5, 1.0))
        else:
            getattr(modelin, key).prior = Prior(
                norm(loc=float(v), scale=float(e * args.priorerrfact)))

    # Now define the requirements for emcee
    ftr = emcee_fitter(ts, modelin, gtemplate, weights, phs, args.phserr)

    # Use this if you want to see the effect of setting minWeight
    if args.testWeights:
        log.info("Checking H-test vs weights")
        ftr.prof_vs_weights(use_weights=True)
        ftr.prof_vs_weights(use_weights=False)
        sys.exit()

    # Now compute the photon phases and see if we see a pulse
    phss = ftr.get_event_phases()
    maxbin, like_start = marginalize_over_phase(phss,
                                                gtemplate,
                                                weights=ftr.weights,
                                                minimize=True,
                                                showplot=False)
    print("Starting pulse likelihood:", like_start)
    if args.phs is None:
        fitvals[-1] = 1.0 - maxbin[0] / float(len(gtemplate))
        if fitvals[-1] > 1.0: fitvals[-1] -= 1.0
        if fitvals[-1] < 0.0: fitvals[-1] += 1.0
        print("Starting pulse phase:", fitvals[-1])
    else:
        print("Measured starting pulse phase is %f, but using %f" % \
            (1.0 - maxbin / float(len(gtemplate)), args.phs))
        fitvals[-1] = args.phs
    ftr.fitvals[-1] = fitvals[-1]
    ftr.phaseogram(plotfile=ftr.model.PSR.value + "_pre.png")
    plt.close()
    #ftr.phaseogram()

    # Write out the starting pulse profile
    vs, xs = np.histogram(ftr.get_event_phases(), outprof_nbins, \
        range=[0,1], weights=ftr.weights)
    f = open(ftr.model.PSR.value + "_prof_pre.txt", 'w')
    for x, v in zip(xs, vs):
        f.write("%.5f  %12.5f\n" % (x, v))
    f.close()

    # Try normal optimization first to see how it goes
    if do_opt_first:
        result = op.minimize(ftr.minimize_func, np.zeros_like(ftr.fitvals))
        newfitvals = np.asarray(result['x']) * ftr.fiterrs + ftr.fitvals
        like_optmin = -result['fun']
        print("Optimization likelihood:", like_optmin)
        ftr.set_params(dict(zip(ftr.fitkeys, newfitvals)))
        ftr.phaseogram()
    else:
        like_optmin = -np.inf

    # Set up the initial conditions for the emcee walkers.  Use the
    # scipy.optimize newfitvals instead if they are better
    ndim = ftr.n_fit_params
    if like_start > like_optmin:
        # Keep the starting deviations small...
        pos = [
            ftr.fitvals +
            ftr.fiterrs * args.initerrfact * np.random.randn(ndim)
            for ii in range(nwalkers)
        ]
        # Set starting params
        for param in [
                "GLPH_1", "GLEP_1", "SINI", "M2", "E", "ECC", "PX", "A1"
        ]:
            if param in ftr.fitkeys:
                idx = ftr.fitkeys.index(param)
                if param == "GLPH_1":
                    svals = np.random.uniform(-0.5, 0.5, nwalkers)
                elif param == "GLEP_1":
                    svals = np.random.uniform(minMJD + 100, maxMJD - 100,
                                              nwalkers)
                    #svals = 55422.0 + np.random.randn(nwalkers)
                elif param == "SINI":
                    svals = np.random.uniform(0.0, 1.0, nwalkers)
                elif param == "M2":
                    svals = np.random.uniform(0.1, 0.6, nwalkers)
                elif param in ["E", "ECC", "PX", "A1"]:
                    # Ensure all positive
                    svals = np.fabs(ftr.fitvals[idx] + ftr.fiterrs[idx] *
                                    np.random.randn(nwalkers))
                    if param in ["E", "ECC"]:
                        svals[svals > 1.0] = 1.0 - (svals[svals > 1.0] - 1.0)
                for ii in range(nwalkers):
                    pos[ii][idx] = svals[ii]
    else:
        pos = [
            newfitvals + ftr.fiterrs * args.initerrfact * np.random.randn(ndim)
            for i in range(nwalkers)
        ]
    # Set the 0th walker to have the initial pre-fit solution
    # This way, one walker should always be in a good position
    pos[0] = ftr.fitvals

    import emcee
    # Following are for parallel processing tests...
    if 0:

        def unwrapped_lnpost(theta, ftr=ftr):
            return ftr.lnposterior(theta)

        import pathos.multiprocessing as mp
        pool = mp.ProcessPool(nodes=8)
        sampler = emcee.EnsembleSampler(nwalkers,
                                        ndim,
                                        unwrapped_lnpost,
                                        pool=pool,
                                        args=[ftr])
    else:
        sampler = emcee.EnsembleSampler(nwalkers, ndim, ftr.lnposterior)
    # The number is the number of points in the chain
    sampler.run_mcmc(pos, nsteps)

    def chains_to_dict(names, sampler):
        chains = [sampler.chain[:, :, ii].T for ii in range(len(names))]
        return dict(zip(names, chains))

    def plot_chains(chain_dict, file=False):
        npts = len(chain_dict)
        fig, axes = plt.subplots(npts, 1, sharex=True, figsize=(8, 9))
        for ii, name in enumerate(chain_dict.keys()):
            axes[ii].plot(chain_dict[name], color="k", alpha=0.3)
            axes[ii].set_ylabel(name)
        axes[npts - 1].set_xlabel("Step Number")
        fig.tight_layout()
        if file:
            fig.savefig(file)
            plt.close()
        else:
            plt.show()
            plt.close()

    chains = chains_to_dict(ftr.fitkeys, sampler)
    plot_chains(chains, file=ftr.model.PSR.value + "_chains.png")

    # Make the triangle plot.
    samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
    import corner
    fig = corner.corner(samples,
                        labels=ftr.fitkeys,
                        bins=50,
                        truths=ftr.maxpost_fitvals,
                        plot_contours=True)
    fig.savefig(ftr.model.PSR.value + "_triangle.png")
    plt.close()

    # Make a phaseogram with the 50th percentile values
    #ftr.set_params(dict(zip(ftr.fitkeys, np.percentile(samples, 50, axis=0))))
    # Make a phaseogram with the best MCMC result
    ftr.set_params(dict(zip(ftr.fitkeys[:-1], ftr.maxpost_fitvals[:-1])))
    ftr.phaseogram(plotfile=ftr.model.PSR.value + "_post.png")
    plt.close()

    # Write out the output pulse profile
    vs, xs = np.histogram(ftr.get_event_phases(), outprof_nbins, \
        range=[0,1], weights=ftr.weights)
    f = open(ftr.model.PSR.value + "_prof_post.txt", 'w')
    for x, v in zip(xs, vs):
        f.write("%.5f  %12.5f\n" % (x, v))
    f.close()

    # Write out the par file for the best MCMC parameter est
    f = open(ftr.model.PSR.value + "_post.par", 'w')
    f.write(ftr.model.as_parfile())
    f.close()

    # Print the best MCMC values and ranges
    ranges = map(lambda v: (v[1], v[2] - v[1], v[1] - v[0]),
                 zip(*np.percentile(samples, [16, 50, 84], axis=0)))
    print("Post-MCMC values (50th percentile +/- (16th/84th percentile):")
    for name, vals in zip(ftr.fitkeys, ranges):
        print("%8s:" % name, "%25.15g (+ %12.5g  / - %12.5g)" % vals)

    # Put the same stuff in a file
    f = open(ftr.model.PSR.value + "_results.txt", 'w')

    f.write("Post-MCMC values (50th percentile +/- (16th/84th percentile):\n")
    for name, vals in zip(ftr.fitkeys, ranges):
        f.write("%8s:" % name + " %25.15g (+ %12.5g  / - %12.5g)\n" % vals)

    f.write("\nMaximum likelihood par file:\n")
    f.write(ftr.model.as_parfile())
    f.close()

    import cPickle
    cPickle.dump(samples, open(ftr.model.PSR.value + "_samples.pickle", "wb"))
예제 #38
0
파일: toa.py 프로젝트: zpleunis/PINT
    def __init__(
            self,
            MJD,  # required
            error=0.0,
            obs='Barycenter',
            freq=float("inf"),
            scale=None,
            **kwargs):  # keyword args that are completely optional
        r"""
        Construct a TOA object

        Parameters
        ----------
        MJD : astropy Time, float, or tuple of floats
            The time of the TOA, which can be expressed as an astropy Time,
            a floating point MJD (64 or 128 bit precision), or a tuple
            of (MJD1,MJD2) whose sum is the full precision MJD (usually the
            integer and fractional part of the MJD)
        obs : string
            The observatory code for the TOA
        freq : float or astropy Quantity
            Frequency corresponding to the TOA.  Either a Quantity with frequency
            units, or a number for which MHz is assumed.
        scale : string
            Time scale for the TOA time.  Defaults to the timescale appropriate
            to the site, but can be overridden

        Notes
        -----
        It is VERY important that all astropy.Time() objects are created
        with precision=9. This is ensured in the code and is checked for any
        Time object passed to the TOA constructor.

        """
        site = get_observatory(obs)

        # If MJD is already a Time, just use it. Note that this will ignore
        # the 'scale' argument to the TOA() constructor!
        if isinstance(MJD, time.Time):
            t = MJD
        else:
            if numpy.isscalar(MJD):
                arg1, arg2 = MJD, None
            else:
                arg1, arg2 = MJD[0], MJD[1]
            if scale is None:
                scale = site.timescale
            # First build a time without a location
            # Note that when scale is UTC, must use pulsar_mjd format!
            if scale.lower() == 'utc':
                fmt = 'pulsar_mjd'
            else:
                fmt = 'mjd'
            t = time.Time(arg1, arg2, scale=scale, format=fmt, precision=9)
        # Now assign the site location to the Time, for use in the TDB conversion
        # Time objects are immutable so you must make a new one to add the location!
        # Use the intial time to look up the observatory location
        # (needed for moving observatories)
        # The location is an EarthLocation in the ITRF (ECEF, WGS84) frame
        try:
            loc = site.earth_location_itrf(time=t)
        except:
            print(
                "Error computing earth_location_itrf at time {0}, {1}".format(
                    t, type(t)))
            raise
        # Then construct the full time, with observatory location set
        self.mjd = time.Time(t, location=loc, precision=9)

        if hasattr(error, 'unit'):
            self.error = error
        else:
            self.error = error * u.microsecond
        self.obs = site.name
        if hasattr(freq, 'unit'):
            try:
                junk = freq.to(u.MHz)
            except u.UnitConversionError:
                log.error(
                    "Frequency for TOA with incompatible unit {0}".format(
                        freq))
            self.freq = freq
        else:
            self.freq = freq * u.MHz
        if self.freq == 0.0 * u.MHz:
            self.freq = numpy.inf * u.MHz
        self.flags = kwargs
예제 #39
0
파일: toa.py 프로젝트: zpleunis/PINT
def format_toa_line(toatime,
                    toaerr,
                    freq,
                    obs,
                    dm=0.0 * u.pc / u.cm**3,
                    name='unk',
                    flags={},
                    format='Princeton'):
    """
    Format TOA line for writing

    Inputs
    ------
    toatime   Time object containing TOA arrival time
    toaerr    TOA error as a Quantity with units
    freq      Frequency as a Quantity with units (NB: value of np.inf is allowed)
    obs       Observatory object

    dm        DM for the TOA as a Quantity with units (not printed if 0.0 pc/cm^3)
    name      Name to embed in TOA line (conventionally the data file name)
    format    (Princeton | Tempo2)
    flags     Any Tempo2 flags to append to the TOA line

    Bugs
    ----
    This implementation is currently incomplete in that it will not
    undo things like TIME statements and probably other things.

    Princeton format
    ----------------
    columns  item
    1-1     Observatory (one-character code) '@' is barycenter
    2-2     must be blank
    16-24   Observing frequency (MHz)
    25-44   TOA (decimal point must be in column 30 or column 31)
    45-53   TOA uncertainty (microseconds)
    69-78   DM correction (pc cm^-3)

    Tempo2 format
    -------------
    First line of file should be "FORMAT 1"
    TOA format is "file freq sat satErr siteID <flags>"

    Returns
    -------
    out : string
        Formatted TOA line
    """
    from .utils import time_to_mjd_string
    if format.upper() in ('TEMPO2', '1'):
        toa_str = time_to_mjd_string(toatime, prec=16)
        #log.info(toa_str)
        # In Tempo2 format, freq=0.0 means infinite frequency
        if freq == numpy.inf * u.MHz:
            freq = 0.0 * u.MHz
        flagstring = ''
        if dm != 0.0 * u.pc / u.cm**3:
            flagstring += "-dm {0:%.5f}".format(dm.to(u.pc / u.cm**3).value)
        # Here I need to append any actual flags
        for flag in flags.keys():
            v = flags[flag]
            # Since toas file do not have values with unit in the flags,
            # here we are taking the units out
            if flag in ['clkcorr']:
                continue
            if hasattr(v, "unit"):
                v = v.value
            flag = str(flag)
            if flag.startswith('-'):
                flagstring += ' %s %s' % (flag, v)
            else:
                flagstring += ' -%s %s' % (flag, v)
        # Now set observatory code. Use obs.name unless overridden by tempo2_code
        try:
            obscode = obs.tempo2_code
        except:
            obscode = obs.name
        out = "%s %f %s %.3f %s %s\n" % (name, freq.to(
            u.MHz).value, toa_str, toaerr.to(u.us).value, obs.name, flagstring)
    elif format.upper() in ('PRINCETON', 'TEMPO'):  # TEMPO/Princeton format
        toa_str = time_to_mjd_string(toatime, prec=13)
        # In TEMPO/Princeton format, freq=0.0 means infinite frequency
        if freq == numpy.inf * u.MHz:
            freq = 0.0 * u.MHz
        if len(obs.tempo_code) != 1:
            log.warn(
                'Observatory {0} does not have 1-character tempo_code, skipping TOA!'
                .format(obs.name))
        if dm != 0.0 * u.pc / u.cm**3:
            out = obs.tempo_code+" %13s%9.3f%20s%9.2f                %9.4f\n" % \
                (name, freq.to(u.MHz).value, toa_str, toaerr.to(u.us).value,
                dm.to(u.pc/u.cm**3).value)
        else:
            out = obs.tempo_code + " %13s%9.3f%20s%9.2f\n" % (
                name, freq.to(u.MHz).value, toa_str, toaerr.to(u.us).value)
    else:
        log.error('Unknown TOA format ({0})'.format(format))
        # Should this raise an exception here? -- @paulray

    return out
예제 #40
0
tbl.add_column(Column(name='dr_improvement_bsens', data=[np.nan] * len(tbl)))

for field in "G008.67 G337.92 W43-MM3 G328.25 G351.77 G012.80 G327.29 W43-MM1 G010.62 W51-IRS2 W43-MM2 G333.60 G338.93 W51-E G353.41".split(
):
    for band in (3, 6):
        for config in ("12M", ):  # "7M12M"):
            for suffix in ('image.tt0.fits', 'image.tt0.pbcor.fits'):

                fns = glob.glob(
                    f"{basepath}/{field}/B{band}/bsens/*_{config}_robust0_*finaliter*.{suffix}"
                )
                if len(fns) > 1:
                    raise ValueError("Too many matches!")
                elif len(fns) == 0:
                    log.error(
                        f"No matches to field={field} band={band} config={config}"
                    )
                    continue
                    raise ValueError("No matches!")
                fn = fns[0]

                pl.clf()
                bsens = fn
                cleanest = fn.replace("_bsens",
                                      "").replace("/bsens/", "/cleanest/")
                #print(os.path.exists(bsens), os.path.exists(clean))
                #field = fn.split("_uid")[0].split("/")[-1]

                filepath = fn.split("bsens")[0]

                try:
예제 #41
0
파일: tpub.py 프로젝트: elisaquintana/wpub
    def update(self, month=None,
                exclude=['johannes']
               # exclude=['keplerian', 'johannes', 'k<sub>2</sub>',
               #          "kepler equation", "kepler's equation", "xmm-newton",
               #          "kepler's law", "kepler's third law", "kepler problem",
               #          "kepler crater", "kepler's supernova", "kepler's snr"]
               ):
        """Query ADS for new publications.

        Parameters
        ----------
        month : str
            Of the form "YYYY-MM".

        exclude : list of str
            Ignore articles if they contain any of the strings given
            in this list. (Case-insensitive.)
        """
        if ads is None:
            log.error("This action requires the ADS key to be setup.")
            return

        print(Highlight.YELLOW +
              "Reminder: did you `git pull` tpub before running "
              "this command? [y/n] " +
              Highlight.END,
              end='')
        if input() == 'n':
            return

        if month is None:
            month = datetime.datetime.now().strftime("%Y-%m")

        # First show all the papers with the TESS funding message in the ack
        log.info("Querying ADS for acknowledgements (month={}).".format(month))
        database = "astronomy"
        qry = ads.SearchQuery(q="""(ack:"TESS mission"
                                    OR ack:"Transiting Exoplanet Survey Satellite"
                                    OR ack:"TESS team"
                                    OR ack:"TESS")
                                   -ack:"partial support from"
                                   pubdate:"{}"
                                   database:"{}"
                                """.format(month, database),
                              fl=FIELDS,
                              rows=9999999999)
        articles = list(qry)
        for idx, article in enumerate(articles):
            statusmsg = ("Showing article {} out of {} that mentions TESS "
                         "in the acknowledgements.\n\n".format(
                            idx+1, len(articles)))
            self.add_interactively(article, statusmsg=statusmsg)

        # Then search for keywords in the title and abstracts
        log.info("Querying ADS for titles and abstracts "
                 "(month={}).".format(month))
        qry = ads.SearchQuery(q="""(
                                    abs:"TESS"
                                    OR abs:"Transiting Exoplanet Survey Satellite"
                                    OR title:"TESS"
                                    OR title:"Transiting Exoplanet Survey Satellite"
                                    OR full:"TESS photometry"
                                    OR full:"TESS lightcurve"
                                    )
                                   pubdate:"{}"
                                   database:"{}"
                                """.format(month, database),
                              fl=FIELDS,
                              rows=9999999999)
        articles = list(qry)

        for idx, article in enumerate(articles):
            # Ignore articles without abstract
            if not hasattr(article, 'abstract') or article.abstract is None:
                continue
            abstract_lower = article.abstract.lower()

            ignore = False

            # Ignore articles containing any of the excluded terms
            for term in exclude:
                if term.lower() in abstract_lower:
                    ignore = True

            # Ignore articles already in the database
            if article in self:
                ignore = True

            # Ignore all the unrefereed non-arxiv stuff
            try:
                if "NOT REFEREED" in article.property and article.pub != "ArXiv e-prints":
                    ignore = True
            except (AttributeError, ads.exceptions.APIResponseError):
                pass  # no .pub attribute

            # Ignore proposals and cospar abstracts
            if ".prop." in article.bibcode or "cosp.." in article.bibcode:
                ignore = True

            if not ignore:  # Propose to the user
                statusmsg = '(Reviewing article {} out of {}.)\n\n'.format(
                                idx+1, len(articles))
                self.add_interactively(article, statusmsg=statusmsg)
        log.info('Finished reviewing all articles for {}.'.format(month))
예제 #42
0
파일: core.py 프로젝트: nstarman/astroquery
    def download_files(self, files, savedir=None, cache=True,
                       continuation=True, skip_unauthorized=True):
        """
        Given a list of file URLs, download them

        Note: Given a list with repeated URLs, each will only be downloaded
        once, so the return may have a different length than the input list

        Parameters
        ----------
        files : list
            List of URLs to download
        savedir : None or str
            The directory to save to.  Default is the cache location.
        cache : bool
            Cache the download?
        continuation : bool
            Attempt to continue where the download left off (if it was broken)
        skip_unauthorized : bool
            If you receive "unauthorized" responses for some of the download
            requests, skip over them.  If this is False, an exception will be
            raised.
        """

        if self.USERNAME:
            auth = self._get_auth_info(self.USERNAME)
        else:
            auth = None

        downloaded_files = []
        if savedir is None:
            savedir = self.cache_location
        for fileLink in unique(files):
            try:
                log.debug("Downloading {0} to {1}".format(fileLink, savedir))
                check_filename = self._request('HEAD', fileLink, auth=auth,
                                               stream=True)
                check_filename.raise_for_status()
                if 'text/html' in check_filename.headers['Content-Type']:
                    raise ValueError("Bad query.  This can happen if you "
                                     "attempt to download proprietary "
                                     "data when not logged in")

                filename = self._request("GET", fileLink, save=True,
                                         savedir=savedir,
                                         timeout=self.TIMEOUT,
                                         cache=cache,
                                         auth=auth,
                                         continuation=continuation)
                downloaded_files.append(filename)
            except requests.HTTPError as ex:
                if ex.response.status_code == 401:
                    if skip_unauthorized:
                        log.info("Access denied to {url}.  Skipping to"
                                 " next file".format(url=fileLink))
                        continue
                    else:
                        raise(ex)
                elif ex.response.status_code == 403:
                    log.error("Access denied to {url}".format(url=fileLink))
                    if 'dataPortal' in fileLink and 'sso' not in fileLink:
                        log.error("The URL may be incorrect.  Try using "
                                  "{0} instead of {1}"
                                  .format(fileLink.replace('dataPortal/',
                                                           'dataPortal/sso/'),
                                          fileLink))
                    raise ex
                elif ex.response.status_code == 500:
                    # empirically, this works the second time most of the time...
                    filename = self._request("GET", fileLink, save=True,
                                             savedir=savedir,
                                             timeout=self.TIMEOUT,
                                             cache=cache,
                                             auth=auth,
                                             continuation=continuation)
                    downloaded_files.append(filename)
                else:
                    raise ex
        return downloaded_files
예제 #43
0
    def selectregion(self,
                     xmin=None,
                     xmax=None,
                     xtype='wcs',
                     highlight=False,
                     fit_plotted_area=True,
                     reset=False,
                     verbose=False,
                     debug=False,
                     use_window_limits=None,
                     exclude=None,
                     **kwargs):
        """
        Pick a fitting region in either WCS units or pixel units

        Parameters
        ----------
        *xmin / xmax* : [ float ]
            The min/max X values to use in X-axis units (or pixel units if xtype is set).
            TAKES PRECEDENCE ALL OTHER BOOLEAN OPTIONS

        *xtype* : [ string ]
            A string specifying the xtype that xmin/xmax are specified in.  It can be either
            'wcs' or any valid xtype from :class:`pyspeckit.spectrum.units`

        *reset* : [ bool ]
            Reset the selected region to the full spectrum?  Only takes effect
            if xmin and xmax are not (both) specified.
            TAKES PRECEDENCE ALL SUBSEQUENT BOOLEAN OPTIONS

        *fit_plotted_area* : [ bool ]
            Use the plot limits *as specified in :class:`pyspeckit.spectrum.plotters`*?
            Note that this is not necessarily the same as the window plot limits!

        *use_window_limits* : [ bool ]
            Use the plot limits *as displayed*.  Defaults to self.use_window_limits
            (:attr:`pyspeckit.spectrum.interactive.use_window_limits`).
            Overwrites xmin,xmax set by plotter

        exclude: {list of length 2n,'interactive', None}
            * interactive: start an interactive session to select the
              include/exclude regions
            * list: parsed as a series of (startpoint, endpoint) in the
              spectrum's X-axis units.  Will exclude the regions between
              startpoint and endpoint
            * None: No exclusion
        """
        if debug or self._debug:
            log.info("".join(
                map(str,
                    ("selectregion kwargs: ", kwargs, " use_window_limits: ",
                     use_window_limits, " reset: ", reset, " xmin: ", xmin,
                     " xmax: ", xmax))))

        if reset:
            if verbose or debug or self._debug:
                print("Resetting xmin/xmax to full limits of data")
            self.xmin = 0
            # End-inclusive!
            self.xmax = self.Spectrum.data.shape[0]
            self.includemask[self.xmin:self.xmax] = True
            #raise ValueError("Need to input xmin and xmax, or have them set by plotter, for selectregion.")

        if xmin is not None and xmax is not None:
            if verbose or debug or self._debug:
                log.info("Setting xmin,xmax from keywords %g,%g" %
                         (xmin, xmax))
            if xtype.lower() in ('wcs', ) or xtype in units.xtype_dict:
                self.xmin = numpy.floor(self.Spectrum.xarr.x_to_pix(xmin))
                # End-inclusive!
                self.xmax = numpy.ceil(self.Spectrum.xarr.x_to_pix(xmax)) + 1
            else:
                self.xmin = xmin
                # NOT end-inclusive!  This is PYTHON indexing
                self.xmax = xmax
            self.includemask[self.xmin:self.xmax] = True
        elif (self.Spectrum.plotter.xmin is not None
              and self.Spectrum.plotter.xmax is not None and fit_plotted_area):
            if use_window_limits or (use_window_limits is None
                                     and self.use_window_limits):
                if debug or self._debug:
                    print(
                        "Resetting plotter xmin,xmax and ymin,ymax to the currently visible region"
                    )
                self.Spectrum.plotter.set_limits_from_visible_window(
                    debug=debug)
            self.xmin = numpy.floor(
                self.Spectrum.xarr.x_to_pix(self.Spectrum.plotter.xmin))
            self.xmax = numpy.ceil(
                self.Spectrum.xarr.x_to_pix(self.Spectrum.plotter.xmax))
            if self.xmin > self.xmax:
                self.xmin, self.xmax = self.xmax, self.xmin
            # End-inclusive!  Note that this must be done after the min/max swap!
            # this feels sketchy to me, but if you don't do this the plot will not be edge-inclusive
            # that means you could do this reset operation N times to continuously shrink the plot
            self.xmax += 1
            if debug or self._debug:
                log.debug("Including all plotted area (as defined by "
                          "[plotter.xmin={0}, plotter.xmax={1}]) for "
                          "fit".format(self.Spectrum.plotter.xmin,
                                       self.Spectrum.plotter.xmax))
                log.debug("Including self.xmin:self.xmax = {0}:{1}"
                          " (and excluding the rest)".format(
                              self.xmin, self.xmax))
            self.includemask[self.xmin:self.xmax] = True
        else:
            if verbose:
                log.info("Left region selection unchanged."
                         "  xminpix, xmaxpix: %i,%i" % (self.xmin, self.xmax))

        if self.xmin == self.xmax:
            # Reset if there is no fitting region
            self.xmin = 0
            # End-inclusive
            self.xmax = self.Spectrum.data.shape[0]
            log.debug("Reset to full range because the endpoints were equal")
        elif self.xmin > self.xmax:
            # Swap endpoints if the axis has a negative delta-X
            self.xmin, self.xmax = self.xmax, self.xmin
            log.debug(
                "Swapped endpoints because the left end was greater than the right"
            )

        self.includemask[:self.xmin] = False
        self.includemask[self.xmax:] = False

        # Exclude keyword-specified excludes.  Assumes exclusion in current X array units
        log.debug("Exclude: {0}".format(exclude))
        if (isinstance(exclude, str) and (exclude == 'interactive')):
            self.start_interactive()
        elif exclude is not None and len(exclude) % 2 == 0:
            for x1, x2 in zip(exclude[::2], exclude[1::2]):
                if xtype.lower() in ('wcs', ) or xtype in units.xtype_dict:
                    x1 = self.Spectrum.xarr.x_to_pix(x1)
                    # WCS units should be end-inclusive
                    x2 = self.Spectrum.xarr.x_to_pix(x2) + 1
                    # correct for order if WCS units are used
                    # if pixel units are being used, we assume the user has
                    # done so intentionally
                    # TODO: if xarr, data go opposite directions, this swap
                    # doesn't work.
                    if x1 > x2:
                        x1, x2 = x2, x1
                log.debug("Exclusion pixels: {0} to {1}".format(x1, x2))
                self.includemask[x1:x2] = False
        elif exclude is not None:
            log.error("An 'exclude' keyword was specified with an odd number "
                      "of parameters, which is not permitted.")

        if highlight:
            self.highlight_fitregion()

        self._update_xminmax()

        if debug or self._debug:
            log.debug("At the end of selectregion, xmin, xmax = {0},{1}"
                      " and includemask.sum() == {2}".format(
                          self.xmin, self.xmax, self.includemask.sum()))
예제 #44
0
            }
            muid_configs.update({
                val: key
                for key, val in metadata[bandname][field]
                ['muid_configs'].items()
            })
            print(
                f"Loop info:  band={band}, field{field}, muid={muid_configs}, spw={spw}, spwn={spwn}"
            )

            for muid in muids:

                cdid = f'{field}B{band}{muid}'
                config = muid_configs[muid]
                if cdid not in contdatfiles:
                    log.error(f"Missing {cdid}  {field} B{band} {muid}")
                    if config is not None:
                        included_bw[band][spw][field][config] = np.nan
                    continue
                if 'notfound' in contdatfiles[cdid]:
                    log.error(
                        f"Missing {cdid}  {field} B{band} {muid}: {contdatfiles[cdid]}"
                    )
                    if config is not None:
                        included_bw[band][spw][field][config] = np.nan
                    continue

                contdat = parse_contdotdat(contdatfiles[cdid])
                if config is None:
                    log.error(
                        f"muid={muid} cdid={cdid}, contdat={contdatfiles[cdid]} but config={config}"
예제 #45
0
def pulse_profile(ax, etable, args):
    if (args.orb is None) or (args.par is None):
        log.warning('You did not specify orbfile or parfile')
        log.info('Please input files for orb and par with --orb and --par')
        return
    import pint
    import astropy.io.fits as pyfits
    from astropy.time import TimeDelta
    import pint.toa, pint.models
    from pint.plot_utils import phaseogram_binned
    from pint.observatory.nicer_obs import NICERObs
    from pint.eventstats import hm

    ### Make arguments for parfile and orbfile and only do this if both are present
    log.info('Event file TELESCOPE = {0}, INSTRUMENT = {1}'.format(
        etable.meta['TELESCOP'], etable.meta['INSTRUME']))
    # Instantiate NICERObs once so it gets added to the observatory registry
    log.info('Setting up NICER observatory')
    NICERObs(name='NICER', FPorbname=args.orb)

    log.info('Reading model from PARFILE')
    # Load PINT model objects
    modelin = pint.models.get_model(args.par)
    log.info(str(modelin))

    # Read event file and return list of TOA objects
    log.info('doing the load_toas thing')
    #tl  = load_NICER_TOAs(pulsefilename[0])

    # Create TOA list
    tl = []
    for t in etable['T']:
        tl.append(pint.toa.TOA(t, obs='NICER'))

    planets = False
    if 'PLANET_SHAPIRO' in modelin.params:
        if modelin.PLANET_SHAPIRO.value:
            planets = True
    ts = pint.toa.get_TOAs_list(tl,
                                planets=planets,
                                include_bipm=False,
                                include_gps=False)
    # No longer needed, since Feb 28 reprocessing
    #    log.warning('Applying -1.0s time correction to event time TOAs for pulse phase plot')
    #    ts.adjust_TOAs(TimeDelta(np.ones(len(ts.table))*-1.0*u.s,scale='tt'))
    # Note: adjust_TOAs recomputes TDBs and posvels so no need to do again.
    #    ts.compute_TDBs()
    #    ts.compute_posvels(ephem='DE421',planets=True)

    # Compute phases
    phss = modelin.phase(ts, abs_phase=True)[1]
    # Strip the units, because PINT may return u.cycle
    phss = np.array(phss)
    # ensure all postive
    phases = np.where(phss < 0.0, phss + 1.0, phss)
    mjds = ts.get_mjds()

    h = hm(phases)
    if not np.isfinite(h):
        log.error("H not finite, using {0} phases!".format(len(phases)))
        print("Phases from {0} to {1}\n".format(h.min(), h.max()))
    else:
        log.info("H = {0} from {1} phases".format(h, len(phases)))
    ax.hist(phases, bins=32)
    ax.text(0.1, 0.1, 'H = {0:.2f}'.format(h), transform=ax.transAxes)

    #np.savetxt('{0}.phases'.format(args.basename),np.transpose([etable['MET'], etable['PI'],phases]))

    plot.ylabel('Counts')
    plot.xlabel('Pulse Phase')
    plot.title('Pulse Profile')
    return
예제 #46
0
    def _get_maps_for_mission(self, maps_table, mission, download_dir, cache):
        maps = []

        if (len(maps_table[self.__PRODUCT_URL_STRING]) > 0):
            mission_directory = self._create_mission_directory(
                mission, download_dir)
            log.info("Starting download of {} data. ({} files)".format(
                mission, len(maps_table[self.__PRODUCT_URL_STRING])))
            for index in range(len(maps_table)):
                product_url = maps_table[
                    self.__PRODUCT_URL_STRING][index].decode('utf-8')
                if (mission.lower() == self.__HERSCHEL_STRING):
                    observation_id = maps_table["observation_id"][
                        index].decode('utf-8')
                else:
                    observation_id = (maps_table[self._get_tap_observation_id(
                        mission)][index].decode('utf-8'))
                log.info("Downloading Observation ID: {} from {}".format(
                    observation_id, product_url))
                sys.stdout.flush()
                directory_path = mission_directory + "/"
                if (mission.lower() == self.__HERSCHEL_STRING):
                    try:
                        maps.append(
                            self._get_herschel_map(product_url, directory_path,
                                                   cache))
                    except HTTPError as err:
                        log.error("Download failed with {}.".format(err))
                        maps.append(None)

                else:
                    response = self._request('GET',
                                             product_url,
                                             cache=cache,
                                             headers=self._get_header())

                    try:
                        response.raise_for_status()

                        file_name = ""
                        if (product_url.endswith(self.__FITS_STRING)):
                            file_name = (
                                directory_path +
                                self._extract_file_name_from_url(product_url))
                        else:
                            file_name = (
                                directory_path +
                                self._extract_file_name_from_response_header(
                                    response.headers))

                        fits_data = response.content
                        with open(file_name, 'wb') as fits_file:
                            fits_file.write(fits_data)
                            fits_file.close()
                            maps.append(fits.open(file_name))
                    except HTTPError as err:
                        log.error("Download failed with {}.".format(err))
                        maps.append(None)

                if None in maps:
                    log.error("Some downloads were unsuccessful, please check "
                              "the warnings for more details")

                else:
                    log.info("[Done]")

            log.info("Downloading of {} data complete.".format(mission))

        return maps
예제 #47
0
    def download_and_extract_files(self,
                                   urls,
                                   delete=True,
                                   regex='.*\.fits$',
                                   include_asdm=False,
                                   path='cache_path',
                                   verbose=True):
        """
        Given a list of tarball URLs:

            1. Download the tarball
            2. Extract all FITS files (or whatever matches the regex)
            3. Delete the downloaded tarball

        See ``Alma.get_files_from_tarballs`` for details

        Parameters
        ----------
        urls : str or list
            A single URL or a list of URLs
        include_asdm : bool
            Only affects cycle 1+ data.  If set, the ASDM files will be
            downloaded in addition to the script and log files.  By default,
            though, this file will be downloaded and deleted without extracting
            any information: you must change the regex if you want to extract
            data from an ASDM tarball
        """

        if isinstance(urls, six.string_types):
            urls = [urls]
        if not isinstance(urls, (list, tuple, np.ndarray)):
            raise TypeError("Datasets must be given as a list of strings.")

        all_files = []
        for url in urls:
            if url[-4:] != '.tar':
                raise ValueError("URLs should be links to tarballs.")

            tarfile_name = os.path.split(url)[-1]
            if tarfile_name in self._cycle0_tarfile_content['ID']:
                # It is a cycle 0 file: need to check if it contains FITS
                match = (self._cycle0_tarfile_content['ID'] == tarfile_name)
                if not any(
                        re.match(regex, x)
                        for x in self._cycle0_tarfile_content['Files'][match]):
                    log.info("No FITS files found in {0}".format(tarfile_name))
                    continue
            else:
                if 'asdm' in tarfile_name and not include_asdm:
                    log.info("ASDM tarballs do not contain FITS files; "
                             "skipping.")
                    continue

            try:
                tarball_name = self._request('GET',
                                             url,
                                             save=True,
                                             timeout=self.TIMEOUT)
            except requests.ConnectionError as ex:
                self.partial_file_list = all_files
                log.error("There was an error downloading the file. "
                          "A partially completed download list is "
                          "in Alma.partial_file_list")
                raise ex
            except requests.HTTPError as ex:
                if ex.response.status_code == 401:
                    log.info("Access denied to {url}.  Skipping to"
                             " next file".format(url=url))
                    continue
                else:
                    raise ex

            fitsfilelist = self.get_files_from_tarballs([tarball_name],
                                                        regex=regex,
                                                        path=path,
                                                        verbose=verbose)

            if delete:
                log.info("Deleting {0}".format(tarball_name))
                os.remove(tarball_name)

            all_files += fitsfilelist
        return all_files
예제 #48
0
def load_FPorbit(orbit_filename):
    """Load data from an (RXTE or NICER) FPorbit file

    Reads a FPorbit FITS file

    Parameters
    ----------
    orbit_filename : str
        Name of file to load

    Returns
    -------
    astropy Table containing Time, x, y, z, v_x, v_y, v_z data

    """
    # Load orbit FITS file
    hdulist = pyfits.open(orbit_filename)
    # log.info('orb file HDU name is {0}'.format(hdulist[1].name))
    if hdulist[1].name not in ("ORBIT", "XTE_PE"):
        log.error(
            "NICER orb file first extension is {0}. It should be ORBIT".format(
                hdulist[1].name))
    FPorbit_hdr = hdulist[1].header
    FPorbit_dat = hdulist[1].data

    log.info("Opened FPorbit FITS file {0}".format(orbit_filename))
    # TIMESYS should be 'TT'

    # TIMEREF should be 'LOCAL', since no delays are applied

    timesys = FPorbit_hdr["TIMESYS"]
    log.debug("FPorbit TIMESYS {0}".format(timesys))
    timeref = FPorbit_hdr["TIMEREF"]
    log.debug("FPorbit TIMEREF {0}".format(timeref))

    mjds_TT = read_fits_event_mjds(hdulist[1])
    mjds_TT = mjds_TT * u.d
    log.debug("FPorbit spacing is {0}".format(
        (mjds_TT[1] - mjds_TT[0]).to(u.s)))
    X = FPorbit_dat.field("X") * u.m
    Y = FPorbit_dat.field("Y") * u.m
    Z = FPorbit_dat.field("Z") * u.m
    Vx = FPorbit_dat.field("Vx") * u.m / u.s
    Vy = FPorbit_dat.field("Vy") * u.m / u.s
    Vz = FPorbit_dat.field("Vz") * u.m / u.s
    log.info("Building FPorbit table covering MJDs {0} to {1}".format(
        mjds_TT.min(), mjds_TT.max()))
    FPorbit_table = Table(
        [mjds_TT, X, Y, Z, Vx, Vy, Vz],
        names=("MJD_TT", "X", "Y", "Z", "Vx", "Vy", "Vz"),
        meta={"name": "FPorbit"},
    )
    # Make sure table is sorted by time
    log.debug("Sorting FPorbit table")
    FPorbit_table.sort("MJD_TT")
    # Now delete any bad entries where the positions are 0.0
    idx = np.where(
        np.logical_and(FPorbit_table["X"] != 0.0,
                       FPorbit_table["Y"] != 0.0))[0]
    if len(idx) != len(FPorbit_table):
        log.warning("Dropping {0} zero entries from FPorbit table".format(
            len(FPorbit_table) - len(idx)))
        FPorbit_table = FPorbit_table[idx]
    return FPorbit_table
예제 #49
0
파일: fermi_toas.py 프로젝트: carolkng/PINT
def load_Fermi_TOAs(ft1name,weightcolumn=None,targetcoord=None,logeref=4.1,
                    logesig=0.5,minweight=0.0, minmjd=0.0, maxmjd=np.inf):
    '''
    TOAlist = load_Fermi_TOAs(ft1name)
      Read photon event times out of a Fermi FT1 file and return
      a list of PINT TOA objects.
      Correctly handles raw FT1 files, or ones processed with gtbary
      to have barycentered or geocentered TOAs.

      weightcolumn specifies the FITS column name to read the photon weights
      from.  The special value 'CALC' causes the weights to be computed empirically
      as in Philippe Bruel's SearchPulsation code.
      logeref and logesig are parameters for the weight computation and are only
      used when weightcolumn='CALC'.

      When weights are loaded, or computed, events are filtered by weight >= minweight
    '''
    import astropy.io.fits as pyfits
    # Load photon times from FT1 file
    hdulist = pyfits.open(ft1name)
    ft1hdr=hdulist[1].header
    ft1dat=hdulist[1].data

    # TIMESYS will be 'TT' for unmodified Fermi LAT events (or geocentered), and
    #                 'TDB' for events barycentered with gtbary
    # TIMEREF will be 'GEOCENTER' for geocentered events,
    #                 'SOLARSYSTEM' for barycentered,
    #             and 'LOCAL' for unmodified events

    timesys = ft1hdr['TIMESYS']
    log.info("TIMESYS {0}".format(timesys))
    timeref = ft1hdr['TIMEREF']
    log.info("TIMEREF {0}".format(timeref))

    # Read time column from FITS file
    mjds = read_fits_event_mjds_tuples(hdulist[1])
    if len(mjds) == 0:
        log.error('No MJDs read from file!')
        raise

    energies = ft1dat.field('ENERGY')*u.MeV
    if weightcolumn is not None:
        if weightcolumn == 'CALC':
            photoncoords = SkyCoord(ft1dat.field('RA')*u.degree,ft1dat.field('DEC')*u.degree,frame='icrs')
            weights = calc_lat_weights(ft1dat.field('ENERGY'),
                photoncoords.separation(targetcoord), logeref=logeref,
                logesig=logesig)
        else:
            weights = ft1dat.field(weightcolumn)
        if minweight > 0.0:
            idx = np.where(weights>minweight)[0]
            mjds = mjds[idx]
            energies = energies[idx]
            weights = weights[idx]

    # limit the TOAs to ones in selected MJD range
    mjds_float = np.array([r[0] + r[1] for r in mjds])
    idx = np.logical_and((mjds_float > minmjd),(mjds_float < maxmjd))
    mjds = mjds[idx]
    energies = energies[idx]
    if weightcolumn is not None:
        weights = weights[idx]

    if timesys == 'TDB':
        log.info("Building barycentered TOAs")
        if weightcolumn is None:
            toalist=[toa.TOA(m,obs='Barycenter',scale='tdb',energy=e) for m,e in zip(mjds,energies)]
        else:
            toalist=[toa.TOA(m,obs='Barycenter',scale='tdb',energy=e,weight=w) for m,e,w in zip(mjds,energies,weights)]
    else:
        if timeref == 'LOCAL':
            log.info('Building spacecraft local TOAs, with MJDs in range {0} to {1}'.format(mjds[0],mjds[-1]))
            assert timesys == 'TT'
            try:
                fermiobs = get_observatory('Fermi')
            except KeyError:
                log.error('Fermi observatory not defined. Make sure you have specified an FT2 file!')
                raise

            try:
                if weightcolumn is None:
                    toalist=[toa.TOA(m, obs='Fermi', scale='tt',energy=e)
                            for m,e in zip(mjds,energies)]
                else:
                    toalist=[toa.TOA(m, obs='Fermi', scale='tt',energy=e,weight=w)
                            for m,e,w in zip(mjds,energies,weights)]
            except KeyError:
                log.error('Error processing Fermi TOAs. You may have forgotten to specify an FT2 file with --ft2')
                raise
        else:
            log.info("Building geocentered TOAs")
            if weightcolumn is None:
                toalist=[toa.TOA(m, obs='Geocenter', scale='tt',energy=e) for m,e in zip(mjds,energies)]
            else:
                toalist=[toa.TOA(m, obs='Geocenter', scale='tt',energy=e,weight=w) for m,e,w in zip(mjds,energies,weights)]

    return toalist
예제 #50
0
    def __init__(
        self,
        survey_name: str = None,
        survey_specs: dict = None,
    ):
        self.survey_name = survey_name
        self.load_specs(survey_specs)

        start_time = time.time()
        # parse as a HTML table
        try:
            self.page = requests.get(self.survey_url)
        except requests.exceptions.ConnectionError:
            log.error("Unable to read URL '{}'".format(self.survey_url))
            return
        self.update = Time.now()
        self.soup = BeautifulSoup(self.page.content, "html.parser")
        tables = self.soup.find_all(name="table")
        if self.survey_name == "SUPERB":
            # so far this works for SUPERB
            self.raw_table = tables[1].find(name="tr")
        elif self.survey_name == "HTRU-S Low-latitude":
            self.raw_table = tables[1]
        elif self.survey_name == "DMB" or self.survey_name == "GBT350":
            # this is very hacky
            # but the HTML seems to be missing /tr tags which breaks the parsing
            s = str(tables[0].findChildren("tr")[0])
            sout = s.replace("<tr>",
                             "</tr><tr>")[5:].replace("<td>", "</td><td>")
            soup2 = BeautifulSoup(sout, "html.parser")
            self.raw_table = soup2
        else:
            self.raw_table = tables[self.table_index]

        self.rows = self.raw_table.find_all(name="tr")
        pulsar = []
        period = []
        DM = []
        RA = []
        Dec = []
        for row in self.rows[self.start_row:]:
            # iterate over each row in the table
            # each row represents a pulsar (usually)
            cols = row.find_all(name="td")
            if ((len(cols) < 3)
                    or ("pulsar" in cols[self.pulsar_column].text.lower())
                    or ("name" in cols[self.pulsar_column].text.lower())):
                continue
            name = cols[self.pulsar_column].text
            # replace some dashes with minus signs
            name = name.replace(chr(8211), "-")
            name = name.replace(chr(8722), "-")
            name = re.sub(r"[^J\d\+-\.A-Za-z]", "", name)
            if name.startswith("FRB") or len(name) == 0:
                continue
            pulsar.append(name.strip())
            P = cols[self.period_column].text
            # special cases and unit conversion
            P = re.sub(r"[^\d\.]", "", P)
            if self.period_units == "ms":
                try:
                    period.append(float(P))
                except ValueError:
                    period.append(np.nan)
            elif self.period_units == "s":
                try:
                    period.append(float(P) * 1000)
                except ValueError:
                    period.append(np.nan)
            try:
                dm = re.sub(r"[^\d\.]", "", cols[self.DM_column].text)
                DM.append(float(dm))
            except ValueError as e:
                log.error("Error parsing DM value of '{}' for pulsar '{}': {}".
                          format(cols[self.DM_column].text, pulsar[-1], e))
                return
            if self.ra_column is None or self.dec_column is None:
                try:
                    coord = name_to_position(pulsar[-1])
                except:
                    log.warning(
                        "Unable to parse pulsar '{}' to determine coordiates; assuming (0,0)"
                        .format(pulsar[-1]))
                    coord = SkyCoord(0 * u.deg, 0 * u.deg)
            else:
                ra_text = re.sub(r"[^\d:\.]", "", cols[self.ra_column].text)
                # some of the HTML tables have a non-breaking hyphen (Unicode 8209)
                # instead of a hyphen
                # convert it
                dec_text = cols[self.dec_column].text
                if chr(8209) in dec_text:
                    dec_text = dec_text.replace(chr(8209), "-")
                dec_text = re.sub(r"[^\d:\.\+-]", "", dec_text)
                if len(ra_text) == 0 or len(dec_text) == 0:
                    try:
                        coord = name_to_position(pulsar[-1])
                    except:
                        log.warning(
                            "No RA/Dec available and unable to parse pulsar '{}' to determine coordiates; assuming (0,0)"
                            .format(pulsar[-1]))
                        coord = SkyCoord(0 * u.deg, 0 * u.deg)
                else:
                    try:
                        coord = SkyCoord(
                            ra_text,
                            dec_text,
                            frame=self.coordinate_frame,
                            unit=(self.ra_unit, self.dec_unit),
                        ).icrs
                    except ValueError as e:
                        log.error(
                            "Error parsing position values of '{},{}' for pulsar '{}': {}"
                            .format(
                                cols[self.ra_column].text,
                                cols[self.dec_column].text,
                                pulsar[-1],
                                e,
                            ))
                        return
            if coord is None:
                log.warning(
                    "Unable to parse pulsar '{}'; assuming (0,0).".format(
                        pulsar[-1]))
                coord = SkyCoord(0 * u.deg, 0 * u.deg)
            RA.append(coord.ra.deg)
            Dec.append(coord.dec.deg)
            if (len(pulsar) >= 2 and pulsar[-1] == pulsar[-2]
                    and period[-1] == period[-2] and DM[-1] == DM[-2]):
                log.warning(
                    f"Identified apparent duplicate:\n\t{pulsar[-1]} {period[-1]} {DM[-1]}\n\t{pulsar[-2]} {period[-2]} {DM[-2]}\nDeleting..."
                )
                # it's a duplicate
                del pulsar[-1]
                del period[-1]
                del DM[-1]
                del RA[-1]
                del Dec[-1]

        self.data = Table([
            Column(pulsar, name="PSR"),
            Column(RA, name="RA", unit=u.deg, format="%.6f"),
            Column(Dec, name="Dec", unit=u.deg, format="%.6f"),
            Column(period, name="P", unit=u.ms, format="%.2f"),
            Column(DM, name="DM", unit=u.pc / u.cm**3, format="%.2f"),
        ])
        end_time = time.time()
        log.info(
            "Read data for {} pulsars for survey '{}' in {:.2f}s at {}".format(
                len(self.data),
                self.survey_name,
                end_time - start_time,
                self.update.to_value("iso", subfmt="date_hm"),
            ))
        self.data.meta["url"] = self.survey_url
        self.data.meta["survey"] = self.survey_name
        self.data.meta["date"] = self.update
예제 #51
0
def plot_uvspectra(msname, **kwargs):

    msmd.open(msname)
    tb.open(msname)
    spwtb.open(msname + "/SPECTRAL_WINDOW")

    fields = msmd.fieldnames()

    for fieldnum, fieldname in enumerate(fields):
        if 'J' in fieldname:
            continue

        pl.clf()
        fig = pl.figure(1)

        #spws = msmd.spwsforfield(fieldnum)
        spws = np.unique(tb.getcol('DATA_DESC_ID'))

        spws = [spw for spw in spws if len(msmd.chanfreqs(spw)) > 400]

        if len(spws) == 4:
            subplots = fig.subplots(2, 2).ravel()
        elif len(spws) == 5:
            subplots = fig.subplots(2, 3).ravel()
        elif len(spws) < 4:
            log.error(f"TOO FEW SPWS IN {msname}")
        elif len(spws) == 8:
            subplots = fig.subplots(2, 4).ravel()
        else:
            log.error(f"TOO ??? SPWS IN {msname}")

        hdul = fits.HDUList()

        ind = 0
        for spw in spws:
            # does not work: frq = msmd.chanfreqs(spw)
            frq = (spwtb.getcol('CHAN_FREQ', startrow=spw, nrow=1)).squeeze()
            if len(frq) < 100:
                continue

            stb = tb.query(
                f'ANTENNA1 != ANTENNA2 && FIELD_ID == {fieldnum} && DATA_DESC_ID == {spw}'
            )
            dat = stb.getcol('DATA')
            if dat.ndim < 3:
                stb.close()
                continue
            # axis = 0 is poln
            # axis = 1 is spec
            # axis = 2 is baseline (?)
            avgspec = dat.mean(axis=(0, 2))  # axis=0 is polarization

            ax = subplots[ind]
            ax.set_xlabel("Frequency (GHz)")
            ax.set_ylabel("UV Spectrum")

            if frq.size != avgspec.size:
                frq = msmd.chanfreqs(spw)
                if frq.size != avgspec.size:
                    #raise ValueError(f"spectrum shape = {dat.shape}, frq.shape = {frq.shape}")
                    #continue
                    frq = np.arange(dat.shape[1])
                    ax.set_xlabel("Index")
                    ax.set_ylabel("Who knows?!")
                    unit = u.dimensionless_unscaled
            else:
                unit = u.Hz

            band = 'B3' if frq.max() < 200e9 else 'B6'
            if fieldname not in metadata[band]:
                # probably not a target source
                stb.close()
                continue
            muid = metadata[band][fieldname]['muid_configs']['12Mshort']
            cdatfile = metadata[band][fieldname]['cont.dat'][muid]
            contfreqs = parse_contdotdat(cdatfile)

            ax.plot(frq / 1e9, avgspec)
            lims = ax.axis()

            sel = np.zeros(frq.size, dtype='int')

            if unit is not u.dimensionless_unscaled:
                for freqrange in contfreqs.split(";"):
                    low, high = freqrange.split("~")
                    high = u.Quantity(high)
                    low = u.Quantity(low, unit=high.unit)
                    sel += (frq * unit > low) & (frq * unit < high)
                    #print(f"{field}_{spw}: {low}-{high} count={sel.sum()}")

                usel = np.unique(sel)
                if set(usel) == {0, 1}:
                    sel = sel.astype('bool')

                    dat_to_plot = avgspec.copy()
                    dat_to_plot[~sel] = np.nan
                    ax.plot(frq / 1e9,
                            avgspec,
                            linewidth=4,
                            zorder=-5,
                            alpha=0.75)
                elif len(usel) > 1:
                    dat_to_plot = np.empty(avgspec.shape)
                    dat_to_plot[:] = np.nan
                    # skip zero
                    for selval in usel[1:]:
                        dat_to_plot[sel == selval] = avgspec[sel == selval]
                    ax.plot(frq / 1e9,
                            dat_to_plot,
                            linewidth=4,
                            zorder=selval - 10,
                            alpha=0.75,
                            color='orange')

            ax.axis(lims)
            ax.set_title(f"SPW {spw}")

            spectable = Table(
                [
                    Column(data=frq, unit=unit, name='Frequency'),
                    Column(data=avgspec, name='Spectrum')
                ],
                meta={
                    'basename': os.path.basename(msname),
                    'fieldname': fieldname,
                    'fieldnum': fieldnum,
                    'spw': spw,
                    'msname': msname,
                })
            hdul.append(fits.BinTableHDU(spectable))

            ind += 1

        fig.suptitle(f"field={fieldname} {fieldnum}")

        basename = os.path.basename(msname)
        fig.savefig(f"{basename}_{fieldname}_{fieldnum}_uvspectra.png")
        hdul.writeto(f"{basename}_{fieldname}_{fieldnum}_uvspectra.fits",
                     overwrite=True)

    tb.close()
    msmd.close()
    stb.close()
    spwtb.close()
예제 #52
0
    def __init__(
        self,
        survey_name: str = None,
        survey_specs: dict = None,
    ):
        self.survey_name = survey_name
        self.load_specs(survey_specs)

        # read as JSON
        start_time = time.time()
        req = urllib.request.Request(self.survey_url)
        try:
            response = urllib.request.urlopen(req)
        except urllib.error.URLError as e:
            if hasattr(e, "reason"):
                log.error("Failed to reach server '{}': {}".format(
                    self.survey_url, e.reason))
            elif hasattr(e, "code"):
                log.error(
                    "Server '{}' could not fulfill request: code {}".format(
                        self.survey_url, e.code))
            return
        self.raw_table = json.loads(response.read().decode())
        self.update = Time.now()
        pulsar = []
        period = []
        DM = []
        RA = []
        Dec = []
        for key in self.raw_table.keys():
            pulsar.append(key)
            if self.ra_key is not None and self.dec_key is not None:
                coord = SkyCoord(
                    extract_from_json(self.raw_table[key], self.ra_key),
                    extract_from_json(self.raw_table[key], self.dec_key),
                    unit=("hour", "deg"),
                )
            else:
                try:
                    coord = name_to_position(pulsar[-1])
                except:
                    log.warning(
                        "No RA/Dec available and unable to parse pulsar '{}' to determine coordiates; assuming (0,0)"
                        .format(pulsar[-1]))
                    coord = SkyCoord(0 * u.deg, 0 * u.deg)
            RA.append(coord.ra.deg)
            Dec.append(coord.dec.deg)
            if self.period_units == "ms":
                try:
                    period.append(
                        float(
                            extract_from_json(self.raw_table[key],
                                              self.period_key)))
                except TypeError:
                    period.append(np.nan)
            elif self.period_units == "s":
                try:
                    period.append(
                        float(
                            extract_from_json(self.raw_table[key],
                                              self.period_key)) * 1000)
                except TypeError:
                    period.append(np.nan)

            DM.append(extract_from_json(self.raw_table[key], self.dm_key))

        self.data = Table([
            Column(pulsar, name="PSR"),
            Column(RA, name="RA", unit=u.deg, format="%.6f"),
            Column(Dec, name="Dec", unit=u.deg, format="%.6f"),
            Column(period, name="P", unit=u.ms, format="%.2f"),
            Column(DM, name="DM", unit=u.pc / u.cm**3, format="%.2f"),
        ])
        end_time = time.time()
        log.info(
            "Read data for {} pulsars for survey '{}' in {:.2f}s at {}".format(
                len(self.data),
                self.survey_name,
                end_time - start_time,
                self.update.to_value("iso", subfmt="date_hm"),
            ))
        self.data.meta["url"] = self.survey_url
        self.data.meta["survey"] = self.survey_name
        self.data.meta["date"] = self.update
예제 #53
0
def load_fits_TOAs(
    eventname,
    mission,
    weights=None,
    extension=None,
    timesys=None,
    timeref=None,
    minmjd=-np.inf,
    maxmjd=np.inf,
):
    """
    Read photon event times out of a FITS file as PINT TOA objects.

    Correctly handles raw event files, or ones processed with axBary to have
    barycentered TOAs. Different conditions may apply to different missions.

    The minmjd/maxmjd parameters can be used to avoid instantiation of TOAs
    we don't want, which can otherwise be very slow.

    Parameters
    ----------
    eventname : str
        File name of the FITS event list
    mission : str
        Name of the mission (e.g. RXTE, XMM)
    weights : array or None
        The array has to be of the same size as the event list. Overwrites
        possible weight lists from mission-specific FITS files
    extension : str
        FITS extension to read
    timesys : str, default None
        Force this time system
    timeref : str, default None
        Forse this time reference
    minmjd : float, default "-infinity"
        minimum MJD timestamp to return
    maxmjd : float, default "infinity"
        maximum MJD timestamp to return

    Returns
    -------
    toalist : list of TOA objects
    """
    # Load photon times from event file
    hdulist = pyfits.open(eventname)
    if mission not in mission_config:
        log.warning("Mission not recognized. Using generic")
        mission = "generic"

    if (extension is not None and isinstance(extension, str)
            and hdulist[1].name not in extension.split(",")):
        raise RuntimeError(
            "First table in FITS file" +
            "must be {}. Found {}".format(extension, hdulist[1].name))
    if isinstance(extension, int) and extension != 1:
        raise ValueError(
            "At the moment, only data in the first FITS extension is supported"
        )

    if timesys is None:
        timesys = _get_timesys(hdulist[1])
    if timeref is None:
        timeref = _get_timeref(hdulist[1])
    check_timesys(timesys)
    check_timeref(timeref)

    if not mission_config[mission]["allow_local"] and timesys != "TDB":
        log.error("Raw spacecraft TOAs not yet supported for " + mission)

    obs, scale = _default_obs_and_scale(mission, timesys, timeref)

    # Read time column from FITS file
    mjds = read_fits_event_mjds_tuples(hdulist[1])

    new_kwargs = _get_columns_from_fits(
        hdulist[1], mission_config[mission]["fits_columns"])

    hdulist.close()

    if weights is not None:
        new_kwargs["weights"] = weights

    # mask out times/columns outside of mjd range
    mjds_float = np.asarray([r[0] + r[1] for r in mjds])
    idx = (minmjd < mjds_float) & (mjds_float < maxmjd)
    mjds = mjds[idx]
    for key in new_kwargs.keys():
        new_kwargs[key] = new_kwargs[key][idx]

    toalist = [None] * len(mjds)
    kw = {}
    for i in range(len(mjds)):
        # Create TOA list
        for key in new_kwargs.keys():
            kw[key] = new_kwargs[key][i]
        toalist[i] = toa.TOA(mjds[i], obs=obs, scale=scale, **kw)

    return toalist
예제 #54
0
                    else:
                        globblob = f"{field}_B{band}*_{config}_*{line}{suffix}"

                    fn = glob.glob(globblob)

                    if any(fn):
                        print(
                            f"Found some matches for fn {fn}, using {fn[0]}.")
                        fn = fn[0]
                    else:
                        print(f"Found no matches for glob {globblob}")
                        continue

                    modfn = fn.replace(".image", ".model")
                    if os.path.exists(fn) and not os.path.exists(modfn):
                        log.error(f"File {fn} is missing its model {modfn}")
                        continue

                    if line in default_lines:
                        spw = int(fn.split('spw')[1][0])

                    print(
                        f"Beginning field {field} band {band} config {config} line {line} spw {spw} suffix {suffix}"
                    )

                    ia.open(fn)
                    history = {
                        x.split(":")[0]: x.split(": ")[1]
                        for x in ia.history()
                    }
                    ia.close()
예제 #55
0
def load_FPorbit(orbit_filename):
    '''Load data from an (RXTE or NICER) FPorbit file

        Reads a FPorbit FITS file

        Parameters
        ----------
        orbit_filename : str
            Name of file to load

        Returns
        -------
        astropy Table containing Time, x, y, z, v_x, v_y, v_z data

    '''
    # Load photon times from FT1 file
    hdulist = pyfits.open(orbit_filename)
    #log.info('orb file HDU name is {0}'.format(hdulist[1].name))
    if hdulist[1].name != 'ORBIT':
        log.error(
            'NICER orb file first extension is {0}. It should be ORBIT'.format(
                hdulist[1].name))
    FPorbit_hdr = hdulist[1].header
    FPorbit_dat = hdulist[1].data

    log.info('Opened FPorbit FITS file {0}'.format(orbit_filename))
    # TIMESYS should be 'TT'

    # TIMEREF should be 'LOCAL', since no delays are applied

    timesys = FPorbit_hdr['TIMESYS']
    log.debug("FPorbit TIMESYS {0}".format(timesys))
    timeref = FPorbit_hdr['TIMEREF']
    log.debug("FPorbit TIMEREF {0}".format(timeref))

    mjds_TT = read_fits_event_mjds(hdulist[1])
    mjds_TT = mjds_TT * u.d
    log.debug("FPorbit spacing is {0}".format(
        (mjds_TT[1] - mjds_TT[0]).to(u.s)))
    X = FPorbit_dat.field('X') * u.m
    Y = FPorbit_dat.field('Y') * u.m
    Z = FPorbit_dat.field('Z') * u.m
    Vx = FPorbit_dat.field('Vx') * u.m / u.s
    Vy = FPorbit_dat.field('Vy') * u.m / u.s
    Vz = FPorbit_dat.field('Vz') * u.m / u.s
    log.info('Building FPorbit table covering MJDs {0} to {1}'.format(
        mjds_TT.min(), mjds_TT.max()))
    FPorbit_table = Table([mjds_TT, X, Y, Z, Vx, Vy, Vz],
                          names=('MJD_TT', 'X', 'Y', 'Z', 'Vx', 'Vy', 'Vz'),
                          meta={'name': 'FPorbit'})
    # Make sure table is sorted by time
    log.debug('Sorting FPorbit table')
    FPorbit_table.sort('MJD_TT')
    # Now delete any bad entries where the positions are 0.0
    idx = np.where(
        np.logical_and(FPorbit_table['X'] != 0.0,
                       FPorbit_table['Y'] != 0.0))[0]
    if (len(idx) != len(FPorbit_table)):
        log.warning('Dropping {0} zero entries from FPorbit table'.format(
            len(FPorbit_table) - len(idx)))
        FPorbit_table = FPorbit_table[idx]
    return FPorbit_table
예제 #56
0
def name_to_position(name: str) -> SkyCoord:
    """
    parses a pulsar name like J1234+5656 and returns an astropy SkyCoord object
    returns None if parsing fails

    Args:
        name (str): name to parse

    Returns:
        SkyCoord: the coordinates corresponding to the name (or None)

    formats:
    J1234+56
    J1234.5+56
    J123456+56
    J123456.7+56
    J1234+12.3
    J1234+1234
    J1234+1234.5
    J1234+123456
    J1234+123456.7

    """
    # remove any characters that are not a digit, decimal, or sign
    name = re.sub(r"[^\d\.\+-]", "", name)
    if "-" in name:
        try:
            ra, dec = name.split("-")
        except ValueError as e:
            log.error("Error converting pulsar name '{}' to RA/Dec: {}".format(
                name, e))
            return None
        sign = "-"
    else:
        try:
            ra, dec = name.split("+")
        except ValueError as e:
            log.error("Error converting pulsar name '{}' to RA/Dec: {}".format(
                name, e))
            return None
        sign = "+"
    match = re.match(
        r"J?(?P<hour>\d{2})(?P<minute>\d{2,4})(?P<decimal>\.?)(?P<frac>\d*)",
        ra)
    if match:
        if len(match.group("minute")) == 2:
            # HHMM
            ra_hms = "{}:{}{}{}".format(
                match.group("hour"),
                match.group("minute"),
                match.group("decimal"),
                match.group("frac"),
            )
        elif len(match.group("minute")) == 4:
            # HHMMSS
            ra_hms = "{}:{}:{}{}{}".format(
                match.group("hour"),
                match.group("minute")[:2],
                match.group("minute")[2:4],
                match.group("decimal"),
                match.group("frac"),
            )
        else:
            log.error("Cannot parse RA string '{}' from source '{}'".format(
                ra, name))
            return None
    else:
        log.error("Cannot parse RA string '{}' from source '{}'".format(
            ra, name))
        return None
    match = re.match(
        r"(?P<degree>\d{2})(?P<minute>\d{0,4})(?P<decimal>\.?)(?P<frac>\d*)",
        dec)
    if match:
        if len(match.group("minute")) == 0:
            # DD.D
            dec_dms = "{}{}{}".format(
                match.group("degree"),
                match.group("decimal"),
                match.group("frac"),
            )

        elif len(match.group("minute")) == 2:
            # DDMM
            dec_dms = "{}:{}{}{}".format(
                match.group("degree"),
                match.group("minute"),
                match.group("decimal"),
                match.group("frac"),
            )
        elif len(match.group("minute")) == 4:
            # DDMMSS
            dec_dms = "{}:{}:{}{}{}".format(
                match.group("degree"),
                match.group("minute")[:2],
                match.group("minute")[2:4],
                match.group("decimal"),
                match.group("frac"),
            )
        else:
            log.error("Cannot parse Dec string '{}' from source '{}'".format(
                dec, name))
            return None
    else:
        log.error("Cannot parse Dec string '{}' from source '{}'".format(
            dec, name))
        return None

    try:
        c = SkyCoord(ra_hms, sign + dec_dms, unit=("hour", "deg"))
    except ValueError as e:
        log.error("Cannot parse RA/Dec {},{}: {}".format(
            ra_hms, sign + dec_dms, e))
        return None
    return c
예제 #57
0
 def __init__(self, path=None):
     if path == None:
         self.path = os.environ.get("URAT1_PATH")
     else:
         self.path = path
     if self.path == None:
         log.error("URAT1 path is not set.")
         raise IOError("None type path.")
     if len(glob.glob(os.path.join(self.path, "z???"))) == 0:
         log.error("No URAT1 binary file found.")
         self.valid = 0
         raise IOError("Invalid path.")
     else:
         self.valid = 1
     self.indexfile = os.path.join(self.path, "v1index.asc")
     if not os.path.isfile(self.indexfile):
         log.warning("No URAT1 index file found.")
         self.indexfile = None
     self.data = Table()
     self.URAT1_RAW = np.dtype([('ra', np.int32), ('spd', np.int32),
                                ('coord_e_s', np.int16),
                                ('coord_e_m', np.int16), ('nst', np.int8),
                                ('nsu', np.int8), ('epoch', np.int16),
                                ('mag1', np.int16), ('mag_e', np.int16),
                                ('nsm', np.int8), ('ref', np.int8),
                                ('nit', np.int16), ('niu', np.int16),
                                ('ngt', np.int8), ('ngu', np.int8),
                                ('pm_ra', np.int16), ('pm_dec', np.int16),
                                ('pm_e', np.int16), ('mf2', np.int8),
                                ('mfa', np.int8), ('twomass_id', np.int32),
                                ('mag_j', np.int16), ('mag_h', np.int16),
                                ('mag_k', np.int16), ('mag_e_j', np.int16),
                                ('mag_e_h', np.int16), ('mag_e_k',
                                                        np.int16),
                                ('cc_flag_j', np.int8),
                                ('cc_flag_h', np.int8),
                                ('cc_flag_k', np.int8), ('phq_j', np.int8),
                                ('phq_h', np.int8), ('phq_k', np.int8),
                                ('apass_mag_b', np.int16),
                                ('apass_mag_v', np.int16),
                                ('apass_mag_g', np.int16),
                                ('apass_mag_r', np.int16),
                                ('apass_mag_i', np.int16),
                                ('apass_mag_e_b', np.int16),
                                ('apass_mag_e_v', np.int16),
                                ('apass_mag_e_g', np.int16),
                                ('apass_mag_e_r', np.int16),
                                ('apass_mag_e_i', np.int16),
                                ('ann', np.int8), ('ano', np.int8)])
     self.URAT1_STAR = np.dtype([('ra', np.float), ('spd', np.float),
                                 ('coord_e_s', np.float),
                                 ('coord_e_m', np.float), ('nst', np.int8),
                                 ('nsu', np.int8), ('epoch', np.float),
                                 ('mag1', np.float), ('mag_e', np.float),
                                 ('nsm', np.int8), ('ref', np.int8),
                                 ('nit', np.int16), ('niu', np.int16),
                                 ('ngt', np.int8), ('ngu', np.int8),
                                 ('pm_ra', np.float), ('pm_dec', np.float),
                                 ('pm_e', np.float), ('mf2', np.int8),
                                 ('mfa', np.int8), ('twomass_id', np.int32),
                                 ('mag_j', np.float), ('mag_h', np.float),
                                 ('mag_k', np.float), ('mag_e_j', np.float),
                                 ('mag_e_h', np.float),
                                 ('mag_e_k', np.float),
                                 ('cc_flag_j', np.int8),
                                 ('cc_flag_h', np.int8),
                                 ('cc_flag_k', np.int8), ('phq_j', np.int8),
                                 ('phq_h', np.int8), ('phq_k', np.int8),
                                 ('apass_mag_b', np.float),
                                 ('apass_mag_v', np.float),
                                 ('apass_mag_g', np.float),
                                 ('apass_mag_r', np.float),
                                 ('apass_mag_i', np.float),
                                 ('apass_mag_e_b', np.float),
                                 ('apass_mag_e_v', np.float),
                                 ('apass_mag_e_g', np.float),
                                 ('apass_mag_e_r', np.float),
                                 ('apass_mag_e_i', np.float),
                                 ('ann', np.int8), ('ano', np.int8)])
예제 #58
0
파일: core.py 프로젝트: nstarman/astroquery
    def query_async(self, payload, cache=None, public=True, science=True,
                    legacy_columns=False, max_retries=None,
                    get_html_version=None,
                    get_query_payload=None, **kwargs):
        """
        Perform a generic query with user-specified payload

        Parameters
        ----------
        payload : dictionary
            Please consult the `help` method
        cache : deprecated
        public : bool
            Return only publicly available datasets?
        science : bool
            Return only data marked as "science" in the archive?
        legacy_columns : bool
            True to return the columns from the obsolete ALMA advanced query,
            otherwise return the current columns based on ObsCore model.

        Returns
        -------

        Table with results. Columns are those in the ALMA ObsCore model
        (see ``help_tap``) unless ``legacy_columns`` argument is set to True.
        """
        local_args = dict(locals().items())

        for arg in local_args:
            # check if the deprecated attributes have been used
            for deprecated in ['cache', 'max_retries', 'get_html_version',
                               'get_query_payload']:
                if arg[0] == deprecated and arg[1] is not None:
                    warnings.warn(
                        ("Argument '{}' has been deprecated "
                         "since version 4.0.1 and will be ignored".format(arg[0])),
                        AstropyDeprecationWarning)
                    del kwargs[arg[0]]

        if payload is None:
            payload = {}
        for arg in kwargs:
            value = kwargs[arg]
            if 'band_list' == arg and isinstance(value, list):
                value = ' '.join([str(_) for _ in value])
            if arg in payload:
                payload[arg] = '{} {}'.format(payload[arg], value)
            else:
                payload[arg] = value

        if science:
            payload['science_observations'] = True

        if public is not None:
            if public:
                payload['public_data'] = True
            else:
                payload['public_data'] = False
        query = _gen_sql(payload)
        result = self.query_tap(query, **kwargs)
        if result:
            result = result.to_table()
        else:
            return result
        if legacy_columns:
            legacy_result = Table()
            # add 'Observation date' column

            for col_name in _OBSCORE_TO_ALMARESULT:
                if col_name in result.columns:
                    if col_name == 't_min':
                        legacy_result['Observation date'] = \
                            [Time(_['t_min'], format='mjd').strftime(
                                ALMA_DATE_FORMAT) for _ in result]
                    else:
                        legacy_result[_OBSCORE_TO_ALMARESULT[col_name]] = \
                            result[col_name]
                else:
                    log.error("Invalid column mapping in OBSCORE_TO_ALMARESULT: "
                              "{}:{}.  Please "
                              "report this as an Issue."
                              .format(col_name, _OBSCORE_TO_ALMARESULT[col_name]))
            return legacy_result
        return result
예제 #59
0
tbl.add_column(Column(name='mad_7m12m', data=[np.nan] * len(tbl)))
tbl.add_column(Column(name='mad_cleanest', data=[np.nan] * len(tbl)))
tbl.add_column(Column(name='dr_improvement', data=[np.nan] * len(tbl)))

for field in "G008.67 G337.92 W43-MM3 G328.25 G351.77 G012.80 G327.29 W43-MM1 G010.62 W51-IRS2 W43-MM2 G333.60 G338.93 W51-E G353.41".split(
):
    for band in (3, 6):
        for config in ("7M12M", ):

            fns = glob.glob(
                f"{basepath}/{field}/B{band}/7m12m/*_{config}_robust0_*final*.image.tt0.pbcor.fits"
            )
            if len(fns) > 1:
                raise ValueError("Too many matches!")
            elif len(fns) == 0:
                log.error(
                    f"No matches to field={field} band={band} config={config}")
                continue
                raise ValueError("No matches!")
            fn = fns[0]

            pl.clf()
            f7m12m = fn
            cleanest = fn.replace("_7m12m", "").replace(
                "/7m12m/", "/cleanest/").replace("_7M12M_", "_12M_")
            #print(os.path.exists(7m12m), os.path.exists(clean))
            #field = fn.split("_uid")[0].split("/")[-1]

            filepath = fn.split("7m12m")[0]

            f7m12m_fh = fits.open(f7m12m)
            try:
예제 #60
0
all_evfiles = []


def runcmd(cmd):
    # CMD should be a list of strings since it is not processed by a shell
    log.info('CMD: ' + " ".join(cmd))
    log.info(cmd)
    check_call(cmd, env=os.environ)


## Check if outdir contains 'None', 'NONE', or 'none' (causes bug in ni-extractevents)

names = ['none', 'None', 'NONE']
if any(st in args.outdir for st in names):
    log.error(
        "Due to a current bug in ni-extractevents, outdir cannot contain 'none', 'None', or 'NONE'.  Existing..."
    )
    exit()
if any(st in args.outroot for st in names):
    log.error(
        "Due to a current bug in ni-extractevents, outroot cannot contain 'none', 'None', or 'NONE'.  Existing..."
    )
    exit()

# Make directory for working files if it does not exist
pipedir = "{0}".format(args.outdir)
if not os.path.exists(pipedir):
    log.info("Creating merged directory: {}".format(args.outdir))
    os.makedirs(pipedir)

## The list of event files all_evfiles is created in the loop above