def generate_time_vector(epoch, value): """Generates a TimeDelta time vector. Parameters ---------- epoch : ~astropy.time.Time Epoch of simulation value : ~astropy.units.Quantity, ~astropy.time.Time, ~astropy.time.TimeDelta Scalar time to propagate. Returns ------- Orbit : ~astropy.time.TimeDelta Time vector """ if isinstance(value, time.Time) and not isinstance(value, time.TimeDelta): time_of_flight = value - epoch else: # Works for both Quantity and TimeDelta objects time_of_flight = time.TimeDelta(value) # Use the highest precision we can afford # np.atleast_1d does not work directly on TimeDelta objects jd1 = np.atleast_1d(time_of_flight.jd1) jd2 = np.atleast_1d(time_of_flight.jd2) return time.TimeDelta(jd1, jd2, format="jd", scale=time_of_flight.scale)
def get_intervals(): data = np.genfromtxt('observations.txt') times = np.genfromtxt('times.txt')[:, 0] years = data[:, 0] months = data[:, 1] days = data[:, 2] hours = data[:, 3] minutes = data[:, 4] seconds = data[:, 5] duration = data[:, 8] nFRB = data[:, 9] tel = data[:, 6] freq = data[:, 7] starts = [] ends = [] intlengths = [] intminlengths = [] FRBcount = 0 totFRB = int(nFRB.sum()) for i in range(len(years)): startstr = '%04i-%02i-%02iT%02i:%02i:%02i' % ( years[i], months[i], days[i], hours[i], minutes[i], seconds[i]) start = at.Time(startstr, format='isot', scale='utc', location=locs[int(tel[i])]) dur = at.TimeDelta(duration[i], format='sec') dmcorr = at.TimeDelta(k_DM * FRB_DM / freq[i]**2, format='sec') # print dmcorr ltt_bary = start.light_travel_time(FRB_loc) start_bary = (start.tdb - dmcorr + ltt_bary) end_bary = (start.tdb - dmcorr + dur + ltt_bary) starts.append(start_bary.mjd) ends.append(end_bary.mjd) if int(nFRB[i]) == 0: intmin = end_bary.mjd - start_bary.mjd if ((FRBcount == 0) or (FRBcount == totFRB)): intmax = np.inf else: intmax = times[FRBcount] - times[FRBcount - 1] intminlengths.append([intmin, intmax, 0]) else: intmin = times[FRBcount] - start_bary.mjd if FRBcount > 0: intmax = times[FRBcount] - times[FRBcount - 1] else: intmax = np.inf intminlengths.append([intmin, intmax, 1]) FRBcount += 1 for j in range(1, int(nFRB[i])): intlengths.append(times[FRBcount] - times[FRBcount - 1]) FRBcount += 1 intmin = end_bary.mjd - times[FRBcount - 1] if FRBcount < totFRB: intmax = times[FRBcount] - times[FRBcount - 1] else: intmax = np.inf intminlengths.append([intmin, intmax, 2]) return np.array(intlengths), np.array(intminlengths), np.array( starts), np.array(ends), times, nFRB
def state_vystest(wait, catch, scantime=0, preffile=None, **kwargs): """ Create state to read vys data after wait seconds with nsegment segments. kwargs passed in as preferences via inpref argument to State. """ try: from evla_mcast import scan_config except ImportError: logger.error('ImportError for evla_mcast. Need this library to consume multicast messages from CBE.') _install_dir = os.path.abspath(os.path.dirname(__file__)) meta = {} prefs = {} # set start time (and fix antids) dt = time.TimeDelta(wait, format='sec') onesec = time.TimeDelta(1, format='sec') t0 = (time.Time.now()+dt).mjd meta['starttime_mjd'] = t0 meta['stopime_mjd'] = t0+onesec*catch meta['antids'] = ['ea{0}'.format(i) for i in range(1, 26)] # fixed for scan_config test docs # read example scan configuration config = scan_config.ScanConfig(vci=os.path.join(_install_dir, 'data/vci.xml'), obs=os.path.join(_install_dir, 'data/obs.xml'), ant=os.path.join(_install_dir, 'data/antprop.xml'), requires=['ant', 'vci', 'obs']) config.stopTime = config.startTime+1/(24*3600.) st = State(config=config, preffile=preffile, inmeta=meta, inprefs=prefs) return st
def _generate_time_vector(self, start, stop, delta): """Generates time vectors for simulation Parameters ---------- start : ~astropy.time.Time Start time stop : ~astropy.time.Time End time. """ # Create time vector dt = stop - start self.t = np.arange(0, dt.to(u.s).value, delta.to(u.s).value) self._tof_vector = time.TimeDelta(self.t * u.s) # Use the highest precision we can afford # np.atleast_1d does not work directly on TimeDelta objects jd1 = np.atleast_1d(self._tof_vector.jd1) jd2 = np.atleast_1d(self._tof_vector.jd2) self._tof_vector = time.TimeDelta(jd1, jd2, format="jd", scale=self._tof_vector.scale)
def apply_clock_corrections(self): """Apply observatory clock corrections and TIME statments. Apply clock corrections to all the TOAs where corrections are available. This routine actually changes the value of the TOA, although the correction is also listed as a new flag for the TOA called 'clkcorr' so that it can be reversed if necessary. This routine also applies all 'TIME' commands and treats them exactly as if they were a part of the observatory clock corrections. # SUGGESTION(paulr): Somewhere in this docstring, or in a higher level # documentation, the assumptions about the timescales should be specified. # The docstring says apply "correction" but does not say what it is correcting. # Be more specific. """ # First make sure that we haven't already applied clock corrections flags = self.table['flags'] if any([f.has_key('clkcorr') for f in flags]): log.warn( "Some TOAs have 'clkcorr' flag. Not applying new clock corrections." ) return # An array of all the time corrections, one for each TOA corr = numpy.zeros(self.ntoas) * u.s times = self.table['mjd'] for ii, key in enumerate(self.table.groups.keys): grp = self.table.groups[ii] obs = self.table.groups.keys[ii]['obs'] loind, hiind = self.table.groups.indices[ii:ii + 2] # First apply any TIME statements for jj in range(loind, hiind): if flags[jj].has_key('time'): # TIME commands are in sec # SUGGESTION(paulr): These time correction units should # be applied in the parser, not here. In the table the time # correction should have units. corr[jj] = flags[jj]['time'] * u.s times[jj] += time.TimeDelta(corr[jj]) # These are observatory clock corrections. Do in groups. if (key['obs'] in observatories and key['obs'] != "Geocenter"): mjds, ccorr = obsmod.get_clock_corr_vals(key['obs']) tvals = numpy.array([t.mjd for t in grp['mjd']]) if numpy.any((tvals < mjds[0]) | (tvals > mjds[-1])): # FIXME: check the user sees this! should it be an exception? log.error( "Some TOAs are not covered by the %s clock correction" % key['obs'] + " file, treating clock corrections as constant" + " past the ends.") gcorr = numpy.interp(tvals, mjds, ccorr) * u.us for jj, cc in enumerate(gcorr): grp['mjd'][jj] += time.TimeDelta(cc) corr[loind:hiind] += gcorr # Now update the flags with the clock correction used for jj in range(loind, hiind): if corr[jj]: flags[jj]['clkcorr'] = corr[jj]
def apply_clock_corrections(self, include_bipm=True, bipm_version="BIPM2015", include_gps=True): """Apply observatory clock corrections and TIME statments. Apply clock corrections to all the TOAs where corrections are available. This routine actually changes the value of the TOA, although the correction is also listed as a new flag for the TOA called 'clkcorr' so that it can be reversed if necessary. This routine also applies all 'TIME' commands and treats them exactly as if they were a part of the observatory clock corrections. Options to include GPS or BIPM clock corrections are set to True by default in order to give the most accurate clock corrections. A description of how PINT handles clock corrections and timescales is here: https://github.com/nanograv/PINT/wiki/Clock-Corrections-and-Timescales-in-PINT """ # First make sure that we haven't already applied clock corrections flags = self.table['flags'] if any(['clkcorr' in f for f in flags]): log.warn("Some TOAs have 'clkcorr' flag. Not applying new clock corrections.") return # An array of all the time corrections, one for each TOA log.info("Applying clock corrections (include_GPS = {0}, include_BIPM = {1}.".format(include_gps,include_bipm)) corr = numpy.zeros(self.ntoas) * u.s times = self.table['mjd'] for ii, key in enumerate(self.table.groups.keys): grp = self.table.groups[ii] obs = self.table.groups.keys[ii]['obs'] site = get_observatory(obs, include_gps=include_gps, include_bipm=include_bipm, bipm_version=bipm_version) loind, hiind = self.table.groups.indices[ii:ii+2] # First apply any TIME statements for jj in range(loind, hiind): if 'to' in flags[jj]: # TIME commands are in sec # SUGGESTION(@paulray): These time correction units should # be applied in the parser, not here. In the table the time # correction should have units. corr[jj] = flags[jj]['to'] * u.s times[jj] += time.TimeDelta(corr[jj]) gcorr = site.clock_corrections(time.Time(grp['mjd'])) for jj, cc in enumerate(gcorr): grp['mjd'][jj] += time.TimeDelta(cc) corr[loind:hiind] += gcorr # Now update the flags with the clock correction used for jj in range(loind, hiind): if corr[jj]: flags[jj]['clkcorr'] = corr[jj] # Updat clock correction info self.clock_corr_info.update({'include_bipm':include_bipm, 'bipm_version':bipm_version, 'include_gps':include_gps})
def apply_clock_corrections(self): """Apply observatory clock corrections and TIME statments. Apply clock corrections to all the TOAs where corrections are available. This routine actually changes the value of the TOA, although the correction is also listed as a new flag for the TOA called 'clkcorr' so that it can be reversed if necessary. This routine also applies all 'TIME' commands and treats them exactly as if they were a part of the observatory clock corrections. # SUGGESTION(paulr): Somewhere in this docstring, or in a higher level # documentation, the assumptions about the timescales should be specified. # The docstring says apply "correction" but does not say what it is correcting. # Be more specific. """ # First make sure that we haven't already applied clock corrections flags = self.table['flags'] if any([f.has_key('clkcorr') for f in flags]): log.warn( "Some TOAs have 'clkcorr' flag. Not applying new clock corrections." ) return # An array of all the time corrections, one for each TOA corr = numpy.zeros(self.ntoas) * u.s times = self.table['mjd'] for ii, key in enumerate(self.table.groups.keys): grp = self.table.groups[ii] obs = self.table.groups.keys[ii]['obs'] site = Observatory.get(obs) loind, hiind = self.table.groups.indices[ii:ii + 2] # First apply any TIME statements for jj in range(loind, hiind): if flags[jj].has_key('to'): # TIME commands are in sec # SUGGESTION(paulr): These time correction units should # be applied in the parser, not here. In the table the time # correction should have units. corr[jj] = flags[jj]['to'] * u.s times[jj] += time.TimeDelta(corr[jj]) gcorr = site.clock_corrections(time.Time(grp['mjd'])) for jj, cc in enumerate(gcorr): grp['mjd'][jj] += time.TimeDelta(cc) corr[loind:hiind] += gcorr # Now update the flags with the clock correction used for jj in range(loind, hiind): if corr[jj]: flags[jj]['clkcorr'] = corr[jj]
def plot_tspan( self, time_maj_loc=mdates.HourLocator(), time_min_loc=mdates.MinuteLocator(byminute=np.arange(0, 60, 10)), time_maj_fmt='%H:%M', colors=None, alpha=0.2, **plotargs): ''' Args: **plotargs: You can set parameters of matplotlib.pyplot.plot() or matplotlib.pyplot.errorbars(). Defaults are {'ls': "none", 'marker': "."}. ''' if colors is None: colors = cm.jet(np.linspace(0, 1, self.Nt)) utccen = self.get_utc() tint = at.TimeDelta(self.timetable.tint.values, format="sec") / 2 utcst = utccen - tint utced = utccen + tint # Plotting ax = plt.gca() for i in xrange(self.Nt): ax.axvspan(xmin=utcst[i].datetime, xmax=utced[i].datetime, alpha=alpha, color=colors[i])
def d_phase_d_toa(self, toas, sample_step=None): """Return the derivative of phase wrt TOA. Parameters ---------- toas : PINT TOAs class The toas when the derivative of phase will be evaluated at. sample_step : float optional Finite difference steps. If not specified, it will take 1/10 of the spin period. """ copy_toas = copy.deepcopy(toas) if sample_step is None: pulse_period = 1.0 / (self.F0.quantity) sample_step = pulse_period * 1000 sample_dt = [-sample_step, 2 * sample_step] sample_phase = [] for dt in sample_dt: dt_array = ([dt.value] * copy_toas.ntoas * dt._unit) deltaT = time.TimeDelta(dt_array) copy_toas.adjust_TOAs(deltaT) phase = self.phase(copy_toas) sample_phase.append(phase) #Use finite difference method. # phase'(t) = (phase(t+h)-phase(t-h))/2+ 1/6*F2*h^2 + .. # The error should be near 1/6*F2*h^2 dp = (sample_phase[1] - sample_phase[0]) d_phase_d_toa = dp.int / (2 * sample_step) + dp.frac / (2 * sample_step) del copy_toas with u.set_enabled_equivalencies(dimensionless_cycles): return d_phase_d_toa.to(u.Hz)
def make_gyro_relativistic_correction(attdata): if attdata.times.size == 0: return attdata print("inverse relativistic correction required") vec = attdata(attdata.times).apply(OPAX) ra, dec = vec_to_pol(vec) ra, dec = np.rad2deg(ra), np.rad2deg(dec) sc = SkyCoord(ra, dec, unit=("deg", "deg"), frame="fk5", obstime=atime.Time(51543.875, format="mjd") + atime.TimeDelta(attdata.times, format="sec")) vec2 = np.asarray(sc.gcrs.cartesian.xyz.T) vrot = np.cross(vec2, vec) vrot = vrot / np.sqrt(np.sum(vrot**2, axis=1))[:, np.newaxis] calpha = np.sum(vec * vec2, axis=1) calphap2 = np.sqrt((calpha + 1.) / 2.) salphap2 = np.sqrt((1. - calpha) / 2.) #alpha = np.arccos(np.sum(vec*vec2, axis=1)) qcorr = np.empty((calphap2.size, 4), np.double) qcorr[:, :3] = vrot * salphap2[:, np.newaxis] qcorr[:, 3] = calphap2 return AttDATA(attdata.times, Rotation(qcorr).inv() * attdata(attdata.times), gti=attdata.gti)
def zero_residuals(ts, model, maxiter=10, tolerance=1 * u.ns): """Use a model to adjust a TOAs object, setting residuals to 0 iteratively. Parameters ---------- ts : pint.toa.TOAs Input TOAs (modified in-place) model : pint.models.timing_model.TimingModel current model maxiter : int, optional maximum number of iterations allowed tolerance : astropy.units.Quantity maximum allowed absolute deviation of residuals from 0 """ ts.compute_pulse_numbers(model) maxresid = None for i in range(maxiter): r = pint.residuals.Residuals(ts, model, track_mode="use_pulse_numbers") resids = r.calc_time_resids(calctype="taylor") if maxresid is not None and (np.abs(resids).max() > maxresid): log.warning( f"Residual increasing at iteration {i} while attempting to simulate TOAs" ) maxresid = np.abs(resids).max() if abs(resids).max() < tolerance: break ts.adjust_TOAs(-time.TimeDelta(resids)) else: raise ValueError( "Unable to make fake residuals - left over errors are {}".format( abs(resids).max() ) )
def propagate(self, value, method=mean_motion, rtol=1e-10): """ if value is true anomaly, propagate orbit to this anomaly and return the result if time is provided, propagate this `Orbit` some `time` and return the result. Parameters ---------- value : Multiple options True anomaly values, Time values. rtol : float, optional Relative tolerance for the propagation algorithm, default to 1e-10. """ if hasattr(value, "unit") and value.unit in ('rad', 'deg'): p, ecc, inc, raan, argp, _ = rv.rv2coe(self.attractor.k.to(u.km ** 3 / u.s ** 2).value, self.r.to(u.km).value, self.v.to(u.km / u.s).value) return self.from_classical(self.attractor, p / (1.0 - ecc ** 2) * u.km, ecc * u.one, inc * u.rad, raan * u.rad, argp * u.rad, value) else: if isinstance(value, time.Time) and not isinstance(value, time.TimeDelta): time_of_flight = value - self.epoch else: time_of_flight = time.TimeDelta(value) return propagate(self, time_of_flight, method=method, rtol=rtol)
def eci2ecef(self, eci, sec): """Convert eci position to ecef position. Args: eci: eci position {x, y, z} (m), type:list. sec: gps seconds (gpsweek * 7 * 86400 + time of week). Returns: ecef: ecef position {x, y, z} (m), type:list. """ tutc = GPST0 + Time.TimeDelta(sec, format='sec') if (tutc - self.__tutc).value < 0.01: ecef = self.__eci2ecft * np.array([eci]).T ecef = np.asarray(ecef).ravel().tolist() return ecef self.__tutc = tutc t = (tutc - J2K).value / 36525.0 P = self.precession(t) N = self.nutation(t) R = self.gastr(tutc) W = self.poler(tutc) U = W * R * N * P ecef = U.T * np.array([eci]).T ecef = np.asarray(ecef).ravel().tolist() self.__eci2ecft = U.T return ecef
def apply_tsys(tsysdata, year=2017): import copy import astropy.time as at outdata = copy.deepcopy(tsysdata) # Get index indexes = outdata["INDEX"] cols = ["DOY","TIME"] + indexes if tsysdata["FT"] is not None: for index in indexes: outdata["DATA"].loc[:, index] *= tsysdata["FT"] outdata["FT"] = None if tsysdata["TIMEOFF"] is not None: timetags = get_datetime(outdata["DATA"], year) timetags = at.Time(timetags, scale="utc") timetags+= at.TimeDelta(tsysdata["TIMEOFF"], format="sec") timetags = timetags.yday doy = [] hms = [] for timetag in timetags: dhms = timetag.split(":") doy.append(np.int64(dhms[1])) hms.append(":".join(dhms[2:])) outdata["TIMEOFF"] = None outdata["DATA"].loc[:, "DOY"] = np.asarray(doy) outdata["DATA"].loc[:, "TIME"] = np.asarray(hms) return outdata
def localSiderealTimeToDate(self, lst, date=None, format=None): """Returns an `astropy.time.Time` instance for a LST at a given date. Parameters ---------- lst : float or iterable A LST value or list of them to be converted to dates. date : optional An `astropy.Time.time` instance or the argument to create one. If None, the current time will be used. format : string, optional If date is not None or a Time instance, the value to be passed to the `astropy.time.Time` `format` keyword. Returns ------- result : `astropy.time.Time` An `astropy.time.Time` instance of the same size of the lst input list, with each element being the date of the corresponding LST. """ if date is None: date = time.Time.now() lst = np.atleast_1d(lst) if isinstance(date, time.Time): pass else: try: date = time.Time(date, format=format, scale='tai') except: raise ValueError('date format not recognised.') LST0 = self.localSiderealTime(date) testPoint = lst[0] diffs = (lst - testPoint) % 24 if np.abs(testPoint - LST0) > 12.: delta = (testPoint - LST0) % 24 else: delta = testPoint - LST0 lstDelta = diffs + delta UTDates = date + time.TimeDelta( lstDelta * 3600, format='sec', scale='tai') if len(UTDates) == 1: return UTDates[0] else: return UTDates
def propagate(orbit, time_of_flight, *, method=mean_motion, rtol=1e-10, **kwargs): """Propagate an orbit some time and return the result. Parameters ---------- orbit : ~poliastro.twobody.Orbit Orbit object to propagate. time_of_flight : ~astropy.time.TimeDelta Time of propagation. method : callable, optional Propagation method, default to mean_motion. rtol : float, optional Relative tolerance, default to 1e-10. Returns ------- astropy.coordinates.CartesianRepresentation Propagation coordinates. """ # Check if propagator fulfills orbit requirements if orbit.ecc < 1.0 and method not in ELLIPTIC_PROPAGATORS: raise ValueError( "Can not use an parabolic/hyperbolic propagator for elliptical orbits." ) elif orbit.ecc == 1.0 and method not in PARABOLIC_PROPAGATORS: raise ValueError( "Can not use an elliptic/hyperbolic propagator for parabolic orbits." ) elif orbit.ecc > 1.0 and method not in HYPERBOLIC_PROPAGATORS: raise ValueError( "Can not use an elliptic/parabolic propagator for hyperbolic orbits." ) else: pass # Use the highest precision we can afford # np.atleast_1d does not work directly on TimeDelta objects jd1 = np.atleast_1d(time_of_flight.jd1) jd2 = np.atleast_1d(time_of_flight.jd2) time_of_flight = time.TimeDelta(jd1, jd2, format="jd", scale=time_of_flight.scale) rr, vv = method( orbit.attractor.k, orbit.r, orbit.v, time_of_flight.to(u.s), rtol=rtol, **kwargs ) # TODO: Turn these into unit tests assert rr.ndim == 2 assert vv.ndim == 2 cartesian = CartesianRepresentation( rr, differentials=CartesianDifferential(vv, xyz_axis=1), xyz_axis=1 ) return cartesian
def propagate(self, value, method=mean_motion, rtol=1e-10, **kwargs): """Propagates an orbit a specified time. If value is true anomaly, propagate orbit to this anomaly and return the result. Otherwise, if time is provided, propagate this `Orbit` some `time` and return the result. Parameters ---------- value : ~astropy.units.Quantity, ~astropy.time.Time, ~astropy.time.TimeDelta Scalar time to propagate. rtol : float, optional Relative tolerance for the propagation algorithm, default to 1e-10. method : function, optional Method used for propagation **kwargs parameters used in perturbation models Returns ------- Orbit New orbit after propagation. """ if isinstance(value, time.Time) and not isinstance(value, time.TimeDelta): time_of_flight = value - self.epoch else: # Works for both Quantity and TimeDelta objects time_of_flight = time.TimeDelta(value) cartesian = propagate(self, time_of_flight, method=method, rtol=rtol, **kwargs) new_epoch = self.epoch + time_of_flight # TODO: Unify with sample # If the frame supports obstime, set the time values try: kwargs = {} if "obstime" in self.frame.frame_attributes: kwargs["obstime"] = new_epoch # Use of a protected method instead of frame.realize_frame # because the latter does not let the user choose the representation type # in one line despite its parameter names, see # https://github.com/astropy/astropy/issues/7784 coords = self.frame._replicate( cartesian, representation_type="cartesian", **kwargs ) return self.from_coords(self.attractor, coords, plane=self.plane) except NotImplementedError: return self.from_vectors( self.attractor, cartesian[0].xyz, cartesian[0].differentials["s"].d_xyz, new_epoch, )
def mkfitsloader(fitsdir, outdir, filename="loader.fits", skipna=True, skip31if=True): import astropy.io.fits as pf import astropy.time as at from tqdm import tqdm # Check data in FITS files datetimes = [] refdates = [] fitsnames = [] list1 = os.listdir(fitsdir) for comp in tqdm(list1, bar_format="Reading FITS directory: "+r'{l_bar}{bar}{r_bar}'): comppath=os.path.join(fitsdir,comp) if "na-" in comp and skipna: continue if not os.path.isfile(comppath): continue try: hdulist = pf.open(comppath) except IOError: continue # Get UV Data uvdata = hdulist["UV_DATA"] # Check number of IFs if uvdata.header["MAXIS4"]!=32: continue # FITS Files fitsnames.append(comppath) # Get Time Stamp times = at.Time(uvdata.data["DATE"], format="jd", scale="utc") times+= at.TimeDelta(uvdata.data["TIME"], format="jd") hdulist.close() datetimes.append(times.min().datetime) fitsfiles = {'datetime':datetimes,'fitsfile':fitsnames} fitsfiles = pd.DataFrame(fitsfiles, columns=["datetime", "fitsfile"]) fitsfiles = fitsfiles.sort_values(by="datetime").reset_index(drop=True) Nfile = len(fitsfiles.fitsfile) print((" - %d FITS files are found"%(Nfile))) print(fitsfiles) os.system("mkdir -p %s"%(outdir)) os.system("rm -rf %s*"%(os.path.join(outdir,filename))) for i in tqdm(range(Nfile), bar_format="Creating symbolic links: "+r'{l_bar}{bar}{r_bar}'): orgfile = os.path.relpath(fitsfiles.loc[i, "fitsfile"], start=outdir) lnfile = "%s%d"%(filename,i+1) os.system("cd %s; ln -s %s %s"%(outdir, orgfile, lnfile)) refdate = fitsfiles.loc[0, "datetime"] refdate = "%04d%2d%02d"%(refdate.year, refdate.month, refdate.day) return refdate
def get_timestamp(fitsfile): hdulist = pf.open(fitsfile) uvdata = hdulist["UV_DATA"] dates = at.Time(uvdata.data["DATE"], format="jd", scale="utc") times = at.TimeDelta(uvdata.data["TIME"], format="jd") datetimes = dates + times starttime = datetimes.min() hdulist.close() year, doy, h, m, s = starttime.yday.split(":") return "%04s-%03s-%02s%02s%02d" % (year, doy, h, m, np.int64(np.around(np.float64(s))))
def bcor(self, coord): mod, spos = self._vect(coord) # get helio/bary-centric position and velocity of observatory, in AU, AU/d h_pos, h_vel, b_pos, b_vel = self._obs_pos() # barycentric light travel time, s tcor_bar = const.au.value * np.array( [np.dot(spos, bpos) for bpos in b_pos]) / const.c.value #print 'Correction to add to get time at barycentre = %.7f s' % tcor_bar dt = time.TimeDelta(tcor_bar, format='sec', scale='tdb') return self.tdb + dt
def largest_dt(group): """Takes a group [pandas dataframe] and returns a list of tuples containing paired elements of the group. Each tuple is a (science, template, dt) triplet of pointing visit ids and time stamp difference, matched such that their time-stamp difference is the largest among all the pointings in the group. Except for groups of length 1, pairing should never fail. """ # pandas dataframes are not awkward to use at all... pairs = [] for (idx, science) in group.iterrows(): sci_obsdate = science["date_obs"] sci_id = science["visit_id"] sci_ra = science["ra"] sci_dec = science["dec"] sci_filename = science["filename"] sci_date = time.Time(sci_obsdate.decode("utf-8")) maxdt = 0 for (idx, template) in group.iterrows(): tmplt_obsdate = template["date_obs"] tmplt_id = template["visit_id"] tmplt_ra = template["ra"] tmplt_dec = template["dec"] tmplt_filename = template["filename"] tmplt_date = time.Time(tmplt_obsdate.decode("utf-8")) # dt is a astropy.time.TimeDelta object and can not be compared to non TimeDelta objects # it recognizes positive and negative time delta so we check for its absolute value dt = sci_date - tmplt_date if abs(dt) > time.TimeDelta(maxdt, format="sec"): maxdt = dt if sci_id != tmplt_id: pair = (sci_id, tmplt_id, abs(dt)) try: pairs.append(pair) except: print("Pair not found. Check the group.") maxdt = time.TimeDelta(0, format="sec") return pairs
def write_TOA_file(self,filename,name='pint', format='Princeton'): """Dump current TOA table out as a TOA file Parameters ---------- filename : str File name to write to; can be an open file handle. format : str Format specifier for file ('TEMPO' or 'Princeton') or ('Tempo2' or '1') Bugs ---- Currently does not undo any clock corrections that were applied, so TOA file won't match the input TOA file if any were applied. """ try: outf = open(filename,'w') handle = False except TypeError: outf = filename handle = True if format.upper() in ('TEMPO2','1'): outf.write('FORMAT 1\n') # NOTE(@paulray): This really should REMOVE any(?) clock corrections # that have been applied! # NOTE clock corrections has been removed. # Add pulse numbers to flags temporarily if there is a pulse number column pnChange = False if 'pn' in self.table.colnames: pnChange = True for i in range(len(self.table['flags'])): self.table['flags'][i]['pn'] = self.table['pn'][i] for toatime,toaerr,freq,obs,flags in zip(self.table['mjd'],self.table['error'].quantity, self.table['freq'].quantity,self.table['obs'],self.table['flags']): obs_obj = Observatory.get(obs) if 'clkcorr' in flags.keys(): toatime_out = toatime - time.TimeDelta(flags['clkcorr']) else: toatime_out = toatime out_str = format_toa_line(toatime_out, toaerr, freq, obs_obj, name=name, flags=flags, format=format) outf.write(out_str) # If pulse numbers were added to flags, remove them again if pnChange: for flags in self.table['flags']: del flags['pn'] if not handle: outf.close()
def propagate(self, value, method=mean_motion, rtol=1e-10, **kwargs): """Propagates an orbit. If value is true anomaly, propagate orbit to this anomaly and return the result. Otherwise, if time is provided, propagate this `Orbit` some `time` and return the result. Parameters ---------- value : Multiple options True anomaly values or time values. If given an angle, it will always propagate forward. rtol : float, optional Relative tolerance for the propagation algorithm, default to 1e-10. method : function, optional Method used for propagation **kwargs parameters used in perturbation models """ if hasattr(value, "unit") and value.unit in ("rad", "deg"): p, ecc, inc, raan, argp, _ = rv2coe( self.attractor.k.to(u.km**3 / u.s**2).value, self.r.to(u.km).value, self.v.to(u.km / u.s).value, ) # Compute time of flight for correct epoch M = nu_to_M(self.nu, self.ecc) new_M = nu_to_M(value, self.ecc) time_of_flight = Angle(new_M - M).wrap_at(360 * u.deg) / self.n return self.from_classical( self.attractor, p / (1.0 - ecc**2) * u.km, ecc * u.one, inc * u.rad, raan * u.rad, argp * u.rad, value, epoch=self.epoch + time_of_flight, plane=self._plane, ) else: if isinstance(value, time.Time) and not isinstance(value, time.TimeDelta): time_of_flight = value - self.epoch else: time_of_flight = time.TimeDelta(value) return propagate(self, time_of_flight, method=method, rtol=rtol, **kwargs)
def setMax(self, maxTime, format=__defFormat): #print("{0}".format(maxTime)) if maxTime is not None: hh = int(maxTime[11:13]) if hh >= 24: maxTime = maxTime[:11] + "{0:02d}".format(hh - 24) + maxTime[13:] self.max = Time.Time(maxTime, format=format) + Time.TimeDelta( 1.0, format="jd") else: self.max = Time.Time(maxTime, format=format) else: self.max = None
def setEnd(self, endTime, format=__defFormat): #print("{0}".format(endTime)) if endTime is not None: hh = int(endTime[11:13]) if hh >= 24: endTime = endTime[:11] + "{0:02d}".format(hh - 24) + endTime[13:] self.end = Time.Time(endTime, format=format) + Time.TimeDelta( 1.0, format="jd") else: self.end = Time.Time(endTime, format=format) else: self.end = None
def ephemeris(self, time): if self.observer is None: return {} sunrise = self.next_sunrise(time=time) sunset = self.next_sunset(time=time) if sunset is not None and sunset > sunrise: sunset = self.observer.sun_set_time(time, which='previous') time = sunset - ap_time.TimeDelta(30, format='sec') twilight_morning_astronomical = self.next_twilight_morning_astronomical( time=time) twilight_evening_astronomical = self.next_twilight_evening_astronomical( time=time) twilight_morning_nautical = self.next_twilight_morning_nautical( time=time) twilight_evening_nautical = self.next_twilight_evening_nautical( time=time) return { 'sunset_utc': sunset.isot, 'sunrise_utc': sunrise.isot, 'twilight_morning_astronomical_utc': twilight_morning_astronomical.isot, 'twilight_evening_astronomical_utc': twilight_evening_astronomical.isot, 'twilight_morning_nautical_utc': twilight_morning_nautical.isot, 'twilight_evening_nautical_utc': twilight_evening_nautical.isot, 'utc_offset_hours': self.observer.timezone.utcoffset(time.datetime) / timedelta(hours=1), 'sunset_unix_ms': sunset.unix * 1000, 'sunrise_unix_ms': sunrise.unix * 1000, 'twilight_morning_astronomical_unix_ms': twilight_morning_astronomical.unix * 1000, 'twilight_evening_astronomical_unix_ms': twilight_evening_astronomical.unix * 1000, 'twilight_morning_nautical_unix_ms': twilight_morning_nautical.unix * 1000, 'twilight_evening_nautical_unix_ms': twilight_evening_nautical.unix * 1000, }
def sample(self, values=100, *, min_anomaly=None, max_anomaly=None, method=mean_motion): r"""Samples an orbit to some specified time values. .. versionadded:: 0.8.0 Parameters ---------- values : int Number of interval points (default to 100). min_anomaly, max_anomaly : ~astropy.units.Quantity, optional Anomaly limits to sample the orbit. For elliptic orbits the default will be :math:`E \in \left[0, 2 \pi \right]`, and for hyperbolic orbits it will be :math:`\nu \in \left[-\nu_c, \nu_c \right]`, where :math:`\nu_c` is either the current true anomaly or a value that corresponds to :math:`r = 3p`. method : function, optional Method used for propagation Returns ------- positions: ~astropy.coordinates.BaseCoordinateFrame Array of x, y, z positions, with proper times as the frame attributes if supported. Notes ----- When specifying a number of points, the initial and final position is present twice inside the result (first and last row). This is more useful for plotting. Examples -------- >>> from astropy import units as u >>> from poliastro.examples import iss >>> iss.sample() # doctest: +ELLIPSIS <GCRS Coordinate ...> >>> iss.sample(10) # doctest: +ELLIPSIS <GCRS Coordinate ...> """ if self.ecc < 1: nu_values = self._sample_closed(values, min_anomaly, max_anomaly) else: nu_values = self._sample_open(values, min_anomaly, max_anomaly) time_values = self._generate_time_values(nu_values) return propagate(self, time.TimeDelta(time_values), method=method)
def ztf_obs(start_time=None, end_time=None): if start_time is None: start_time = time.Time.now() - time.TimeDelta(1.0 * u.day) if end_time is None: end_time = time.Time.now() obstable = client.search(""" SELECT field,rcid,fid,expid,obsjd,exptime,seeing,airmass,maglimit FROM ztf.ztf_current_meta_sci WHERE (obsjd BETWEEN {0} AND {1}) AND (field < 2000) """.format(start_time.jd, end_time.jd)).to_table() obstable = obstable.filled() if len(obstable) == 0: log.info('No observations in time window to ingest.') return obs_grouped_by_exp = obstable.group_by('expid').groups for expid, rows in zip(obs_grouped_by_exp.keys, obs_grouped_by_exp): for row in rows: obstime = time.Time(row['obsjd'], format='jd').datetime, models.db.session.merge( models.Observation(telescope='ZTF', field_id=int(row['field']), observation_id=int(row['expid']), obstime=obstime, exposure_time=int(row['exptime']), filter_id=int(row['fid']), airmass=float(row['airmass']), seeing=float(row['seeing']), limmag=float(row['maglimit']), subfield_id=int(row['rcid']), successful=1)) subfield_ids = rows['rcid'].tolist() quadrantIDs = np.arange(64) missing_quadrants = np.setdiff1d(quadrantIDs, subfield_ids) for missing_quadrant in missing_quadrants: obstime = time.Time(rows['obsjd'][0], format='jd').datetime, models.db.session.merge( models.Observation(telescope='ZTF', field_id=int(rows['field'][0]), observation_id=int(rows['expid'][0]), obstime=obstime, exposure_time=int(rows['exptime'][0]), filter_id=int(rows['fid'][0]), airmass=float(rows['airmass'][0]), subfield_id=int(missing_quadrant), successful=0)) models.db.session.commit()
def __init__(self, *args, **kwargs): inputFormat = kwargs.setdefault('format', 'TAI') self.longitude = kwargs.setdefault('longitude', 254.179722) if inputFormat.upper() == 'TAI': self.time = time.Time(0, format='mjd', scale='tai') + \ time.TimeDelta(args[0], format='sec', scale='tai') elif inputFormat.upper() == 'ISOT': self.time = time.Time(args[0], format='isot', scale='tai') elif inputFormat.upper() == 'JD': self.time = time.Time(args[0], format='jd', scale='tai') else: raise ValueError('format value not valid.')
def test_propagate_accepts_timedelta(): # Data from Vallado, example 2.4 r0 = [1131.340, -2282.343, 6672.423] * u.km v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s expected_r = [-4219.7527, 4363.0292, -3958.7666] * u.km expected_v = [3.689866, -1.916735, -6.112511] * u.km / u.s ss0 = Orbit.from_vectors(Earth, r0, v0) tof = time.TimeDelta(40 * u.min) ss1 = ss0.propagate(tof) r, v = ss1.rv() assert_quantity_allclose(r, expected_r, rtol=1e-5) assert_quantity_allclose(v, expected_v, rtol=1e-4)