Ejemplo n.º 1
0
    def compute_eccentric_anomaly(self, eccentricity, mean_anomaly):
        """compute eccentric anomaly, solve for Kepler Equation
            Parameter
            ----------
            eccentricity : array_like
                Eccentricity of binary system
            mean_anomaly : array_like
                Mean anomaly of the binary system
            Returns
            -------
            array_like
                The eccentric anomaly in radians, given a set of mean_anomalies
                in radians.
        """
        if hasattr(eccentricity,'unit'):
            e = np.longdouble(eccentricity).value
        else:
            e = eccentricity

        if any(e<0) or any(e>=1):
            raise ValueError('Eccentricity should be in the range of [0,1).')

        if hasattr(mean_anomaly,'unit'):
            ma = np.longdouble(mean_anomaly).value
        else:
            ma = mean_anomaly
        k = lambda E: E-e*np.sin(E)-ma   # Kepler Equation
        dk = lambda E: 1-e*np.cos(E)     # derivative Kepler Equation
        U = ma
        while(np.max(abs(k(U)))>5e-15):  # Newton-Raphson method
            U = U-k(U)/dk(U)
        return U*u.rad
Ejemplo n.º 2
0
    def __init__( self ):

        self.s = longdouble( 0 )
        self.m = longdouble( 0 )
        self.last_m = longdouble( 0 )
        self.n = ulonglong( 0 )
        self.is_started = False
Ejemplo n.º 3
0
 def TestD_phase_d_toa(self):
     pint_d_phase_d_toa = self.modelB1855.d_phase_d_toa(self.toasB1855)
     mjd = np.array([np.longdouble(t.jd1 - ut.DJM0)+np.longdouble(t.jd2) for t in self.toasB1855.table['mjd']])
     tempo_d_phase_d_toa = self.plc.eval_spin_freq(mjd)
     diff = pint_d_phase_d_toa.value - tempo_d_phase_d_toa
     relative_diff = diff/tempo_d_phase_d_toa
     assert np.all(relative_diff < 1e-8), 'd_phae_d_toa test filed.'
Ejemplo n.º 4
0
 def test_ldouble_mapping(self):
     """ Test mapping for extended-precision """
     self.assertEqual(h5t.NATIVE_LDOUBLE.dtype, np.longdouble(1).dtype)
     if hasattr(np, 'float96'):
         self.assertEqual(h5t.py_create(np.dtype('float96')).dtype, np.longdouble(1).dtype)
     if hasattr(np, 'float128'):
         self.assertEqual(h5t.py_create(np.dtype('float128')).dtype, np.longdouble(1).dtype)
Ejemplo n.º 5
0
def enn(x):
    enn = np.zeros_like(x, dtype=np.longdouble)
    enn[0] = np.longdouble(4.0 * w(0, d) * a0 ** 2)
    enn[1] = np.longdouble(4.0 * w(1, d) * a1 ** 2)
    for i in range(2, len(x)):
        enn[i] = np.longdouble(4.0 * w(1, d) * x[i] ** 2)
    return np.sum(enn, dtype=np.longdouble)
Ejemplo n.º 6
0
def ref_mjd(fits_file, hdu=1):
    """Read MJDREFF+ MJDREFI or, if failed, MJDREF, from the FITS header.

    Parameters
    ----------
    fits_file : str

    Returns
    -------
    mjdref : numpy.longdouble
        the reference MJD

    Other Parameters
    ----------------
    hdu : int
    """
    import collections

    if isinstance(fits_file, collections.Iterable) and\
            not is_string(fits_file):  # pragma: no cover
        fits_file = fits_file[0]
        logging.info("opening %s" % fits_file)

    try:
        ref_mjd_int = np.long(read_header_key(fits_file, 'MJDREFI'))
        ref_mjd_float = np.longdouble(read_header_key(fits_file, 'MJDREFF'))
        ref_mjd_val = ref_mjd_int + ref_mjd_float
    except:  # pragma: no cover
        ref_mjd_val = np.longdouble(read_header_key(fits_file, 'MJDREF'))
    return ref_mjd_val
Ejemplo n.º 7
0
def test_as_int():
    # Integer representation of number
    assert_equal(as_int(2.0), 2)
    assert_equal(as_int(-2.0), -2)
    assert_raises(FloatingError, as_int, 2.1)
    assert_raises(FloatingError, as_int, -2.1)
    assert_equal(as_int(2.1, False), 2)
    assert_equal(as_int(-2.1, False), -2)
    v = np.longdouble(2**64)
    assert_equal(as_int(v), 2**64)
    # Have all long doubles got 63+1 binary bits of precision?  Windows 32-bit
    # longdouble appears to have 52 bit precision, but we avoid that by checking
    # for known precisions that are less than that required
    try:
        nmant = type_info(np.longdouble)['nmant']
    except FloatingError:
        nmant = 63 # Unknown precision, let's hope it's at least 63
    v = np.longdouble(2) ** (nmant + 1) - 1
    assert_equal(as_int(v), 2**(nmant + 1) -1)
    # Check for predictable overflow
    nexp64 = floor_log2(type_info(np.float64)['max'])
    with np.errstate(over='ignore'):
        val = np.longdouble(2**nexp64) * 2  # outside float64 range
    assert_raises(OverflowError, as_int, val)
    assert_raises(OverflowError, as_int, -val)
Ejemplo n.º 8
0
	def __init__(self, delt_t, x=0.0, y=0.0, z=0.0,
				v_x=0.0, v_y=0.0, v_z=0.0, mass=0):
		"""Units:
		Position: km
		Volume: km/s
		Mass: kg
		Delta_t: days
		"""
		self.pos = np.array([np.longdouble(x),
							np.longdouble(y),
							np.longdouble(z)])

		self.vol = np.array([np.longdouble(v_x),
							np.longdouble(v_y),
							np.longdouble(v_z)])

		self.mass = mass

		self.del_t = np.longdouble(delt_t) * 86400

		# Initilize non input vars
		# Initilize history of positions
		self.hist = [[np.longdouble(x)],
					[np.longdouble(y)],
					[np.longdouble(z)]]
		# Initilize force array
		self.force = np.zeros(3, dtype=np.longdouble)
		# Initilize constants
		self.VELOCITY_FACTOR = self.del_t / self.mass / 1000
		self.FORCE_FACTOR = self.GRAVITATIONAL_CONSTANT * self.mass / 1000000
Ejemplo n.º 9
0
    def generate_affine_backtransformation(self):
        """ Generate synthetic examples and test them to determine transformation

        This is the key method!
        """
        if type(self.example) == FeatureVector:
            testsample = FeatureVector.replace_data(
                self.example, numpy.zeros(self.example.shape))
            self.offset = numpy.longdouble(self._execute(testsample))
            self.trafo = FeatureVector.replace_data(
                self.example, numpy.zeros(self.example.shape))
            for j in range(len(self.example.feature_names)):
                testsample = FeatureVector.replace_data(
                    self.example,
                    numpy.zeros(self.example.shape))
                testsample[0][j] = 1.0
                self.trafo[0][j] = \
                    numpy.longdouble(self._execute(testsample) - self.offset)
        elif type(self.example) == TimeSeries:
            testsample = TimeSeries.replace_data(
                self.example, numpy.zeros(self.example.shape))
            self.offset = numpy.longdouble(numpy.squeeze(
                self._execute(testsample)))
            self.trafo = TimeSeries.replace_data(
                self.example, numpy.zeros(self.example.shape))
            for i in range(self.example.shape[0]):
                for j in range(self.example.shape[1]):
                    testsample = TimeSeries.replace_data(
                        self.example, numpy.zeros_like(self.example))
                    testsample[i][j] = 1.0
                    self.trafo[i][j] = \
                        numpy.longdouble(numpy.squeeze(self._execute(testsample))
                                       - self.offset)
Ejemplo n.º 10
0
    def d_phase_d_param(self, toas, delay, param):
        """ Return the derivative of phase with respect to the parameter.
        """
        # TODO need to do correct chain rule stuff wrt delay derivs, etc
        # Is it safe to assume that any param affecting delay only affects
        # phase indirectly (and vice-versa)??
        par = getattr(self, param)
        result = np.longdouble(np.zeros(len(toas))) * u.cycle/par.units
        param_phase_derivs = []
        phase_derivs = self.phase_deriv_funcs
        delay_derivs = self.delay_deriv_funcs
        if param in list(phase_derivs.keys()):
            for df in phase_derivs[param]:
                result += df(toas, param, delay).to(result.unit,
                            equivalencies=u.dimensionless_angles())
        else:
            # Apply chain rule for the parameters in the delay.
            # total_phase = Phase1(delay(param)) + Phase2(delay(param))
            # d_total_phase_d_param = d_Phase1/d_delay*d_delay/d_param +
            #                         d_Phase2/d_delay*d_delay/d_param
            #                       = (d_Phase1/d_delay + d_Phase2/d_delay) *
            #                         d_delay_d_param

            d_delay_d_p = self.d_delay_d_param(toas, param)
            dpdd_result = np.longdouble(np.zeros(len(toas))) * u.cycle/u.second
            for dpddf in self.d_phase_d_delay_funcs:
                dpdd_result += dpddf(toas, delay)
            result = dpdd_result * d_delay_d_p
        return result.to(result.unit, equivalencies=u.dimensionless_angles())
Ejemplo n.º 11
0
    def compute_sentence_vector(self, sentence, model, w, vocab, zeros, word_list_to_exclude, settings):

        if len(sentence) == 1:
            tokens = sentence.split()  # tokenize by whitespace
        else:
            tokens = sentence  # sentence is already tokenized

        sentence_vector = np.longdouble(zeros)
        oov_count = 0

        for token in tokens:
            token = token.strip()

            try:
                if (settings['excludestopwords'] == '1') and token in word_list_to_exclude:
                    continue  # token somehow is not eligible to be used in our representation

                token_vector = self.get_word_vector(token, settings['method'], model, w, vocab)

            except Exception, e:
                # print(str(e))
                oov_count += 1
            else:
                token_vector = np.longdouble(np.asarray(token_vector))
                sentence_vector = sentence_vector + token_vector  # make vector summation for each token
Ejemplo n.º 12
0
 def __init__(self):
     self.number = 10
     self.str = 'Test'
     self.list = [1,2,3]
     self.array = np.array([1,2,3])
     self.long_number = np.longdouble(1)
     self.long_array = np.longdouble([1,2,3])
Ejemplo n.º 13
0
 def d_delay_d_DMs(self, toas, param_name, acc_delay=None): # NOTE we should have a better name for this.
     """Derivatives for constant DM
     """
     tbl = toas.table
     try:
         bfreq = self.barycentric_radio_freq(toas)
     except AttributeError:
         warn("Using topocentric frequency for dedispersion!")
         bfreq = tbl['freq']
     par = getattr(self, param_name)
     unit = par.units
     if param_name == 'DM':
         order = 0
     else:
         pn, idxf, idxv = split_prefixed_name(param_name)
         order = idxv
     dms = self.get_DM_terms()
     dm_terms = np.longdouble(np.zeros(len(dms)))
     dm_terms[order] = np.longdouble(1.0)
     if self.DMEPOCH.value is None:
         DMEPOCH = tbl['tdbld'][0]
     else:
         DMEPOCH = self.DMEPOCH.value
     dt = (tbl['tdbld'] - DMEPOCH) * u.day
     dt_value = (dt.to(u.yr)).value
     d_dm_d_dm_param = taylor_horner(dt_value, dm_terms)* (self.DM.units/par.units)
     return DMconst * d_dm_d_dm_param/ bfreq**2.0
Ejemplo n.º 14
0
 def __init__(self,):
     # Necessary parameters for all binary model
     self.binary_name = None
     self.param_default_value = {'PB':np.longdouble(10.0)*u.day,
                        'PBDOT':0.0*u.day/u.day,
                        'ECC': 0.9*u.Unit('') ,
                        'EDOT':0.0/u.second ,
                        'A1':10.0*ls,'A1DOT':0.0*ls/u.second,
                        'T0':np.longdouble(54000.0)*u.day,
                        'OM':10.0*u.deg,
                        'OMDOT':0.0*u.deg/u.year,
                        'XPBDOT':0.0*u.day/u.day,
                        'M2':0.0*u.M_sun,
                        'SINI':0*u.Unit(''),
                        'GAMMA':0*u.second, }
     # For Binary phase calculation
     self.param_default_value.update({'P0': 1.0*u.second,
                                      'P1': 0.0*u.second/u.second,
                                      'PEPOCH': np.longdouble(54000.0)*u.day
                                     })
     self.param_aliases = {'ECC':['E'],'EDOT':['ECCDOT'],
                           'A1DOT':['XDOT']}
     self.binary_params = list(self.param_default_value.keys())
     self.inter_vars = ['E','M','nu','ecc','omega','a1','TM2']
     self.binary_delay_funcs = []
     self.d_binarydelay_d_par_funcs = []
def poincare_section_sets(datafile, colx, coly, colvx):
    datf = open(datafile, 'r')
    data = np.array([0, 0, 0], dtype=np.longdouble)
    data1 = np.array([0, 0, 0], dtype=np.longdouble)
    firstline = datf.readline()
    firstline = firstline.strip()
    columns = firstline.split()
    cols = [colx, coly, colvx]
    secpoint = [[], []]
    for i in range(3):
        data[i] = np.longdouble(columns[cols[i]])
    for line in datf:
        for i in range(3):
            data1[i] = data[i]
            line = line.strip()
            columns = line.split()
        for i in range(3):
            data[i] = np.longdouble(columns[cols[i]])
        if (data[1] > 0 and data1[1] < 0):
            secpoint[0].append((data1[0] + data[0]) / 2)
            secpoint[1].append((data1[2] + data[2]) / 2)
        elif (data[1] == 0 and data1[1] < 0):
            secpoint[0].append(data[0])
            secpoint[1].append(data[2])
    datf.close()
    return secpoint
Ejemplo n.º 16
0
 def __init__(self,toa,freq,error,site,flags=dict(),name="X"):
     e=toa.split(".")
     self.toa = (np.longdouble(e[0])+ np.longdouble("0."+e[1]))*u.day
     self.freq = freq.to(u.megahertz)
     self.site = site
     self.flags=flags
     self.error = error.to(u.microsecond)
     self.name=name
Ejemplo n.º 17
0
 def evalfreq(self,t):
     '''Return the freq at time t, computed with this polyco entry'''
     dt = (np.longdouble(t)-self.tmid.value)*np.longdouble(1440.0)
     s = np.longdouble(0.0)
     for i in range(1,self.ncoeff):
         s += np.longdouble(i) * self.coeffs[i] * dt**(i-1)
     freq = self.f0 + s/60.0
     return(freq)
Ejemplo n.º 18
0
def gibb_roll(sides: int, biases: list):
    assert len(biases) == sides
    number = np.longdouble(random.uniform(0.0, np.sum(biases)))
    current = np.longdouble(0.0)
    for i, bias in enumerate(biases):
        current += bias
        if current >= number:
            return i
Ejemplo n.º 19
0
 def evalfreqderiv(self,t):
     """ Return the frequency derivative at time t."""
     dt = (np.longdouble(t)-self.tmid.value)*np.longdouble(1440.0)
     s = np.longdouble(0.0)
     for i in range(2,self.ncoeff):
         # Change to long double
         s += np.longdouble(i) * np.longdouble(i-1) * self.coeffs[i] * dt**(i-2)
     freqd = s/(60.0*60.0)
     return(freqd)
Ejemplo n.º 20
0
def _retrieve_hdf5_object(filename):
    """
    Retrieves an hdf5 format class object.

    Parameters
    ----------
    filename: str
        The name of file with which object was saved

    Returns
    -------
    data: dictionary
        Loads the data from an hdf5 object file and returns
        in dictionary format.
    """

    with h5py.File(filename, 'r') as hf:
        dset_keys = hf.keys()
        attr_keys = hf.attrs.keys()
        data = {}

        dset_copy = dset_keys[:]
        for key in dset_keys:

            # Make sure key hasn't been removed
            if key in dset_copy:
                # Longdouble case
                if key[-2:] in ['_I', '_F']:
                    m_key = key[:-2]
                    # Add integer and float parts
                    data[m_key] = np.longdouble(hf[m_key+'_I'].value) 
                    data[m_key] += np.longdouble(hf[m_key+'_F'].value)
                    # Remove integer and float parts from attributes
                    dset_copy.remove(m_key+'_I')
                    dset_copy.remove(m_key+'_F')
                else:
                    data[key] = hf[key].value
        
        attr_copy = attr_keys[:]
        for key in attr_keys:
            
            # Make sure key hasn't been removed
            if key in attr_copy:
                # Longdouble case
                if key[-2:] in ['_I', '_F']:
                    m_key = key[:-2]
                    # Add integer and float parts
                    data[m_key] = np.longdouble(hf.attrs[m_key+'_I'])
                    data[m_key] += np.longdouble(hf.attrs[m_key+'_F'])
                    # Remove integer and float parts from attributes
                    attr_copy.remove(m_key+'_I')
                    attr_copy.remove(m_key+'_F')
                else:
                    data[key] = hf.attrs[key]

    return data
Ejemplo n.º 21
0
    def __init__(self, timfile):

        self.TOAs = []
        self.jump_parameters = []
        self.jump_labels = []
        self.TOA_uncertainties = []

        jump_count = 0
 
        for line in file(timfile):

            if ("MODE" in line):
                pass

            elif ("EFAC" in line):
                pass

            elif ("EQUAD" in line):
                pass

            elif ("FORMAT" in line):
                pass

            elif ("INCLUDE" in line):
                pass

            elif ("JUMP" in line):

                jump_count += 1

                if (jump_count % 2 != 0):
                    if (jump_count < 10):
                        self.jump_parameters.append('JUMP_000' + str(jump_count - (jump_count - 1) / 2))
                    if (jump_count >= 10 and jump_count < 100):
                        self.jump_parameters.append('JUMP_00' + str(jump_count - (jump_count - 1) / 2))

            elif ("INFO" in line):
                pass

            elif ("MODE" in line):
                pass

            else:
                
                elems = line.split()
                self.TOAs.append(np.longdouble(elems[0]))
                self.TOA_uncertainties.append(np.longdouble(elems[1]))
                
                if (jump_count % 2 == 0):
                    self.jump_labels.append('base')

                else:
                    self.jump_labels.append('JUMP_000' + str(jump_count - (jump_count - 1) / 2))

        self.TOAs = np.array(self.TOAs, dtype=np.longdouble)
        self.TOA_uncertainties = np.array(self.TOA_uncertainties, dtype=np.longdouble)
Ejemplo n.º 22
0
def datread(fname):
    datfile = open(fname)
    lines = datfile.readlines()
    wave = np.empty(len(lines))
    flux = np.empty(len(lines))
    for i in range(0,len(lines)):
        wave[i] = np.longdouble(lines[i].strip().split()[0])
        flux[i] = np.longdouble(lines[i].strip().split()[1])
    datfile.close()
    return wave, flux
Ejemplo n.º 23
0
    def compute_visual_similarity(distance, minimum_distance, maximum_distance):

        dist = np.divide(np.longdouble(distance - minimum_distance), np.longdouble(maximum_distance - minimum_distance))
        similarity = 1 - dist

        if similarity == 0:
            similarity = 0.000001  # prevent division issues
        elif similarity > 1:
            similarity = 1  # prevent floating point issues

        return similarity
Ejemplo n.º 24
0
 def test_high_precision_keyword(self):
     """Test high precision FITS keyword read."""
     from maltpynt.io import high_precision_keyword_read
     hdr = {"MJDTESTI": 100, "MJDTESTF": np.longdouble(0.5),
            "CIAO": np.longdouble(0.)}
     assert \
         high_precision_keyword_read(hdr,
                                     "MJDTEST") == np.longdouble(100.5), \
         "Keyword MJDTEST read incorrectly"
     assert \
         high_precision_keyword_read(hdr, "CIAO") == np.longdouble(0.), \
         "Keyword CIAO read incorrectly"
Ejemplo n.º 25
0
def waterplot(file1, file2, atm_temp): 
    """receives two am output files, as well as the atmospheric temperature. It then 
    plots the difference between the atmospheric power loading of the two output files
    and plots that difference as a function of frequency. Also plots the difference
    in Rayleigh-Jeans temperature as a function of frequency."""
    f = open(file1, 'r')
    lines = f.readlines()
    f.close()
    nu1 = []
    T_RJ1 = []
    I1 = []
    for line in lines: 
        p = line.split()
        nu1.append(float(p[0]))
        T_RJ1.append(float(p[1]))
        I1.append(float(p[2]))
    nu1 = numpy.array(nu1)
    T_RJ1 = numpy.array(T_RJ1)
    I1 = numpy.array(I1)
    fi = open(file2, 'r')
    lines = fi.readlines()
    fi.close()
    #nu2 = []
    T_RJ2 = []
    I2 = []
    bump1 = nu1[1] - nu1[0]
    for line in lines: 
        p = line.split()
        #nu2.append(float(p[0]))
        T_RJ2.append(float(p[1]))
        I2.append(float(p[2]))
    graph_nu = []
    T_RJ2 = numpy.array(T_RJ2)
    I2 = numpy.array(I2)
    j = len(nu1)
    P_opt1 = []
    P_opt2 = []
    for i in range(int(j*bump)): #P_opt = eta*k*bandwidth*T_rj_mean. This creates unit bandwidth
        P_opt1.append(numpy.longdouble(k*T_RJ1[int(i/bump)]**2/atm_temp))
        P_opt2.append(numpy.longdouble(k*T_RJ2[int(i/bump)]**2/atm_temp))
        graph_nu.append(nu1[int(i/bump)])
    P_opt1 = numpy.array(P_opt1)
    P_opt2 = numpy.array(P_opt2)
    pylab.subplot(2,1,1)    
    pylab.plot(graph_nu, P_opt1-P_opt2)
    pylab.ylabel('power difference in watts')    
    pylab.xlabel('frequency, in GHz')
    pylab.title(file1+' - '+ file2 + ' atmospheric loading vs. frequency')
    pylab.subplot(2,1,2)
    pylab.plot(nu1, T_RJ1-T_RJ2)
    pylab.ylabel('T_RJ difference')    
    pylab.xlabel('frequency, in GHz')
    pylab.title('T_RJ difference by frequency')
Ejemplo n.º 26
0
 def add( self, x ):
     """Add a value, and update the running stats"""
     self.n += 1
     x = longdouble( x )
     if not self.is_started:
         self.m = x
         self.s = longdouble( 0 )
         self.is_started = True
     else:
         self.last_m = self.m
         self.m += ( x - self.m ) / longdouble( self.n )
         self.s += ( x - self.last_m ) * ( x - self.m )
Ejemplo n.º 27
0
def time_to_longdouble(t):
    """ Return an astropy Time value as MJD in longdouble

    ## SUGGESTION(@paulray): This function is at least partly redundant with
    ## ddouble2ldouble() below...

    ## Also, is it certain that this calculation retains the full precision?

    """
    try:
        return np.longdouble(t.jd1 - DJM0) + np.longdouble(t.jd2)
    except:
        return np.longdouble(t)
Ejemplo n.º 28
0
def constants(test):
    if test==1:
        a=.1
        c=25.
        b=25./2.
        beta = .2/(18.6*10**(-6))

    if test ==0:
        a=np.longdouble(50.*10**(-3))
        b=np.longdouble(50.*10**(-3))
        c=np.longdouble(100.*10**(-3))
        beta = np.longdouble(55.)
    return a,b,c,beta
Ejemplo n.º 29
0
    def buildMatricesFromObservations (self, emissionString, path):
        emissions = {state: { obs: np.longdouble(0.) for obs in self.emits} for state in self.states}
        A = {fm: { to: np.longdouble(0.) for to in self.states} for fm in self.states}

        emissions[path[0:1]][emissionString[0:1]] += 1.
        fm = path[0:1]
        for obs, state in zip(emissionString[1:], path[1:]):
            emissions[state][obs] += 1.
            A[fm][state] += 1.
            fm = state

        self.emission = {state: self._normalize(emissions[state]) for state in self.states}
        self.transition = {fm: self._normalize(A[fm]) for fm in self.states}
Ejemplo n.º 30
0
 def d_phase_d_F(self, toas, param, delay):
     """Calculate the derivative wrt to an spin term."""
     par = getattr(self, param)
     unit = par.units
     pn, idxf, idxv = split_prefixed_name(param)
     order = idxv + 1
     fterms = [0.0 * u.Unit("")] + self.get_spin_terms()
     # make the choosen fterms 1 others 0
     fterms = [ft * numpy.longdouble(0.0)/unit for ft in fterms]
     fterms[order] += numpy.longdouble(1.0)
     dt = self.get_dt(toas, delay)
     with u.set_enabled_equivalencies(dimensionless_cycles):
         d_pphs_d_f = taylor_horner(dt.to(u.second), fterms)
         return d_pphs_d_f.to(u.cycle/unit)
Ejemplo n.º 31
0
        ("pulsar_mjd_string", (str, bytes)),
    ],
)
def test_singleton_type(format_, type_):
    t = Time.now()
    assert isinstance(getattr(t, format_), type_)
    t.format = format_
    assert isinstance(t.value, type_)


@pytest.mark.parametrize(
    "format_, val, val2",
    [
        ("mjd", 40000, 1e-10),
        ("pulsar_mjd", 40000, 1e-10),
        ("mjd_long", np.longdouble(40000) + np.longdouble(1e-10), None),
        ("mjd_long", np.longdouble(40000), np.longdouble(1e-10)),
        ("pulsar_mjd_long", np.longdouble(40000) + np.longdouble(1e-10), None),
        ("pulsar_mjd_long", np.longdouble(40000), np.longdouble(1e-10)),
        ("mjd_string", "40000.0000000001", None),
        ("pulsar_mjd_string", "40000.0000000001", None),
    ],
)
def test_singleton_import(format_, val, val2):
    Time(val=val, val2=val2, format=format_, scale="utc")


# time_to


@pytest.mark.parametrize("format_", ["mjd", "pulsar_mjd"])
Ejemplo n.º 32
0
 def value(self):
     mjd1, mjd2 = jds_to_mjds(self.jd1, self.jd2)
     return np.longdouble(mjd1) + np.longdouble(mjd2)
Ejemplo n.º 33
0
                           maxRadius=150)
coordinates = []
distance = []
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
    # draw the outer circle
    cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
    # draw the center of the circle
    cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)
    #print i[0]-150,i[1]-150
    coordinates.append([float(i[0] / 3), float(i[1] / 3)])

for i in range(len(coordinates)):
    coordCopy = coordinates[:]
    del coordCopy[i]
    for c in coordCopy:
        distance.append(
            np.longdouble(
                sqrt((coordinates[i][0] - c[0])**2 +
                     (coordinates[i][1] - c[1])**2)))

s.sendline(str(min(distance)))
# s.interactive()
print min(distance), j
# print s.recv()
# cimg = cv2.resize(cimg, (image.width-150, image.height-150))
cv2.imwrite('test4.jpg', cimg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# break
Ejemplo n.º 34
0
def lcurve_from_fits(fits_file,
                     gtistring='GTI',
                     timecolumn='TIME',
                     ratecolumn=None,
                     ratehdu=1,
                     fracexp_limit=0.9):
    """
    Load a lightcurve from a fits file and returns dict.
    .. note ::
        FITS light curve handling is still under testing.
        Absolute times might be incorrect depending on the light curve format.
    Parameters
    ----------
    fits_file : str
        File name of the input light curve in FITS format
    Returns
    -------
    dict :
        Returned dict with the light curve parammeters
    Other Parameters
    ----------------
    gtistring : str
        Name of the GTI extension in the FITS file
    timecolumn : str
        Name of the column containing times in the FITS file
    ratecolumn : str
        Name of the column containing rates in the FITS file
    ratehdu : str or int
        Name or index of the FITS extension containing the light curve
    fracexp_limit : float
        Minimum exposure fraction allowed
    """
    logging.warn("""WARNING! FITS light curve handling is still under testing.
        Absolute times might be incorrect.""")
    # TODO:
    # treat consistently TDB, UTC, TAI, etc. This requires some documentation
    # reading. For now, we assume TDB
    from astropy.io import fits as pf
    from astropy.time import Time
    import numpy as np

    lchdulist = pf.open(fits_file)
    lctable = lchdulist[ratehdu].data

    # Units of header keywords
    tunit = lchdulist[ratehdu].header['TIMEUNIT']

    try:
        mjdref = high_precision_keyword_read(lchdulist[ratehdu].header,
                                             'MJDREF')
        mjdref = Time(mjdref, scale='tdb', format='mjd')
    except:
        mjdref = None

    try:
        instr = lchdulist[ratehdu].header['INSTRUME']
    except:
        instr = 'EXTERN'

    # ----------------------------------------------------------------
    # Trying to comply with all different formats of fits light curves.
    # It's a madness...
    try:
        tstart = high_precision_keyword_read(lchdulist[ratehdu].header,
                                             'TSTART')
        tstop = high_precision_keyword_read(lchdulist[ratehdu].header, 'TSTOP')
    except:
        raise (Exception('TSTART and TSTOP need to be specified'))

    # For nulccorr lcs this whould work
    try:
        timezero = high_precision_keyword_read(lchdulist[ratehdu].header,
                                               'TIMEZERO')
        if not timezero:
            timezero = 0
        # Sometimes timezero is "from tstart", sometimes it's an absolute time.
        # This tries to detect which case is this, and always consider it
        # referred to tstart
    except:
        timezero = 0

    # for lcurve light curves this should instead work
    if tunit == 'd':
        # TODO:
        # Check this. For now, I assume TD (JD - 2440000.5).
        # This is likely wrong
        timezero = Time(2440000.5 + timezero, scale='tdb', format='jd')
        tstart = Time(2440000.5 + tstart, scale='tdb', format='jd')
        tstop = Time(2440000.5 + tstop, scale='tdb', format='jd')
        # if None, use NuSTAR defaulf MJDREF
        mjdref = _assign_value_if_none(
            mjdref,
            Time(np.longdouble('55197.00076601852'), scale='tdb',
                 format='mjd'))

        timezero = (timezero - mjdref).to('s').value
        tstart = (tstart - mjdref).to('s').value
        tstop = (tstop - mjdref).to('s').value

    if timezero > tstart:
        timezero -= tstart

    time = np.array(lctable.field(timecolumn), dtype=np.longdouble)
    if time[-1] < tstart:
        time += timezero + tstart
    else:
        time += timezero

    try:
        dt = high_precision_keyword_read(lchdulist[ratehdu].header, 'TIMEDEL')
        if tunit == 'd':
            dt *= 86400
    except:
        logging.warn('Assuming that TIMEDEL is the difference between the'
                     ' first two times of the light curve')
        dt = time[1] - time[0]

    # ----------------------------------------------------------------
    ratecolumn = _assign_value_if_none(
        ratecolumn,
        _look_for_array_in_array(['RATE', 'RATE1', 'COUNTS'], lctable.names))

    rate = np.array(lctable.field(ratecolumn), dtype=np.float)

    try:
        rate_e = np.array(lctable.field('ERROR'), dtype=np.longdouble)
    except:
        rate_e = np.zeros_like(rate)

    if 'RATE' in ratecolumn:
        rate *= dt
        rate_e *= dt

    try:
        fracexp = np.array(lctable.field('FRACEXP'), dtype=np.longdouble)
    except:
        fracexp = np.ones_like(rate)

    good_intervals = np.logical_and(fracexp >= fracexp_limit, fracexp <= 1)
    good_intervals = (rate == rate) * (fracexp >= fracexp_limit) * \
        (fracexp <= 1)

    rate[good_intervals] /= fracexp[good_intervals]
    rate_e[good_intervals] /= fracexp[good_intervals]

    rate[np.logical_not(good_intervals)] = 0

    try:
        gtitable = lchdulist[gtistring].data
        gti_list = np.array(
            [[a, b]
             for a, b in zip(gtitable.field('START'), gtitable.field('STOP'))],
            dtype=np.longdouble)
    except:
        gti_list = create_gti_from_condition(time, good_intervals)

    lchdulist.close()

    out = {}
    out['lc'] = rate
    out['elc'] = rate_e
    out['time'] = time
    out['dt'] = dt
    out['GTI'] = gti_list
    out['Tstart'] = tstart
    out['Tstop'] = tstop
    out['Instr'] = instr

    out['MJDref'] = mjdref.value

    out['total_ctrate'] = calc_countrate(time, rate, gtis=gti_list, bintime=dt)
    out['source_ctrate'] = calc_countrate(time,
                                          rate,
                                          gtis=gti_list,
                                          bintime=dt)

    return out
Ejemplo n.º 35
0
 def Factorial(self, start, end):
     result = np.longdouble(1)
     for i in range(1, end + 1):
         result = result * i
     return (result)
Ejemplo n.º 36
0
    def makeConstantRoundTrip(self, file_ending):
        # write
        series = api.Series("unittest_py_constant_API." + file_ending,
                            api.Access_Type.create)

        ms = series.iterations[0].meshes
        SCALAR = api.Mesh_Record_Component.SCALAR
        DS = api.Dataset
        DT = api.Datatype

        extent = [42, 24, 11]

        # write one of each supported types
        ms["char"][SCALAR].reset_dataset(DS(DT.CHAR, extent))
        ms["char"][SCALAR].make_constant("c")
        ms["pyint"][SCALAR].reset_dataset(DS(DT.INT, extent))
        ms["pyint"][SCALAR].make_constant(13)
        ms["pyfloat"][SCALAR].reset_dataset(DS(DT.DOUBLE, extent))
        ms["pyfloat"][SCALAR].make_constant(3.1416)
        ms["pybool"][SCALAR].reset_dataset(DS(DT.BOOL, extent))
        ms["pybool"][SCALAR].make_constant(False)

        if found_numpy:
            ms["int16"][SCALAR].reset_dataset(DS(np.dtype("int16"), extent))
            ms["int16"][SCALAR].make_constant(np.int16(234))
            ms["int32"][SCALAR].reset_dataset(DS(np.dtype("int32"), extent))
            ms["int32"][SCALAR].make_constant(np.int32(43))
            ms["int64"][SCALAR].reset_dataset(DS(np.dtype("int64"), extent))
            ms["int64"][SCALAR].make_constant(np.int64(987654321))

            ms["uint16"][SCALAR].reset_dataset(DS(np.dtype("uint16"), extent))
            ms["uint16"][SCALAR].make_constant(np.uint16(134))
            ms["uint32"][SCALAR].reset_dataset(DS(np.dtype("uint32"), extent))
            ms["uint32"][SCALAR].make_constant(np.uint32(32))
            ms["uint64"][SCALAR].reset_dataset(DS(np.dtype("uint64"), extent))
            ms["uint64"][SCALAR].make_constant(np.uint64(9876543210))

            ms["single"][SCALAR].reset_dataset(DS(np.dtype("single"), extent))
            ms["single"][SCALAR].make_constant(np.single(1.234))
            ms["double"][SCALAR].reset_dataset(DS(np.dtype("double"), extent))
            ms["double"][SCALAR].make_constant(np.double(1.234567))
            ms["longdouble"][SCALAR].reset_dataset(
                DS(np.dtype("longdouble"), extent))
            ms["longdouble"][SCALAR].make_constant(np.longdouble(1.23456789))

        # flush and close file
        del series

        # read back
        series = api.Series("unittest_py_constant_API." + file_ending,
                            api.Access_Type.read_only)

        ms = series.iterations[0].meshes
        o = [1, 2, 3]
        e = [1, 1, 1]

        self.assertEqual(ms["char"][SCALAR].load_chunk(o, e), ord('c'))
        self.assertEqual(ms["pyint"][SCALAR].load_chunk(o, e), 13)
        self.assertEqual(ms["pyfloat"][SCALAR].load_chunk(o, e), 3.1416)
        self.assertEqual(ms["pybool"][SCALAR].load_chunk(o, e), False)

        if found_numpy:
            self.assertTrue(ms["int16"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('int16'))
            self.assertTrue(ms["int32"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('int32'))
            self.assertTrue(ms["int64"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('int64'))
            self.assertTrue(ms["uint16"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('uint16'))
            self.assertTrue(ms["uint32"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('uint32'))
            self.assertTrue(ms["uint64"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('uint64'))
            self.assertTrue(ms["single"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('single'))
            self.assertTrue(ms["double"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('double'))
            self.assertTrue(ms["longdouble"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('longdouble'))

            self.assertEqual(ms["int16"][SCALAR].load_chunk(o, e),
                             np.int16(234))
            self.assertEqual(ms["int32"][SCALAR].load_chunk(o, e),
                             np.int32(43))
            self.assertEqual(ms["int64"][SCALAR].load_chunk(o, e),
                             np.int64(987654321))
            self.assertEqual(ms["uint16"][SCALAR].load_chunk(o, e),
                             np.uint16(134))
            self.assertEqual(ms["uint32"][SCALAR].load_chunk(o, e),
                             np.uint32(32))
            self.assertEqual(ms["uint64"][SCALAR].load_chunk(o, e),
                             np.uint64(9876543210))
            self.assertEqual(ms["single"][SCALAR].load_chunk(o, e),
                             np.single(1.234))
            self.assertEqual(ms["longdouble"][SCALAR].load_chunk(o, e),
                             np.longdouble(1.23456789))
            self.assertEqual(ms["double"][SCALAR].load_chunk(o, e),
                             np.double(1.234567))
Ejemplo n.º 37
0
fmax = 1.0 / (dt * 2.0)
print(' old fmax=', fmax)
n1 = 1576800  ### original time axis length
print('n1=', n1)
freq = np.fft.rfftfreq(n1, d=dt)

nfreq = np.int((n1 + 1) / 2.0)
print(' old number of frequencies=', nfreq)

nfreq = freq.size
fmax = freq[nfreq - 1]
print(' new fmax=', fmax)
print(' number of frequencies=', nfreq)

#df = fmax/(nfreq-1)
df = np.longdouble(fmax) / (nfreq - 1)
print(' old freqency increment=', df)
df = (freq[nfreq - 1] - freq[0]) / nfreq
print(' freqency increment=', df)

operiods = operiod * 60.0  # orbital period, seconds
period = operiods / harmonic  # divide by the harmonic to get signal period
print(' orbital period=', operiod, ' minutes')
print(' orbital period=', operiods, ' seconds')
print(' signal period=', period)

finter = 1.0 / period
print(' finter=', finter)

ifnum = np.int(np.round(finter / df))
print(' df=', df)
Ejemplo n.º 38
0
 def test_longdouble_int(self):
     # gh-627
     x = np.longdouble(np.inf)
     assert_raises(OverflowError, x.__int__)
     x = np.clongdouble(np.inf)
     assert_raises(OverflowError, x.__int__)
Ejemplo n.º 39
0
    def attributeRoundTrip(self, file_ending):
        # write
        series = api.Series("unittest_py_API." + file_ending,
                            api.Access_Type.create)

        # write one of each supported types
        series.set_attribute("char", 'c')  # string
        series.set_attribute("pyint", 13)
        series.set_attribute("pyfloat", 3.1416)
        series.set_attribute("pystring", "howdy!")
        series.set_attribute("pystring2", str("howdy, too!"))
        series.set_attribute("pystring3", b"howdy, again!")
        series.set_attribute("pybool", False)

        # array of ...
        series.set_attribute("arr_pyint", (
            13,
            26,
            39,
            52,
        ))
        series.set_attribute("arr_pyfloat", (
            1.2,
            3.4,
            4.5,
            5.6,
        ))
        series.set_attribute("arr_pystring", (
            "x",
            "y",
            "z",
            "www",
        ))
        series.set_attribute("arr_pybool", (
            False,
            True,
            True,
            False,
        ))
        # list of ...
        series.set_attribute("l_pyint", [13, 26, 39, 52])
        series.set_attribute("l_pyfloat", [1.2, 3.4, 4.5, 5.6])
        series.set_attribute("l_pystring", ["x", "y", "z", "www"])
        series.set_attribute("l_pybool", [False, True, True, False])

        if found_numpy:
            series.set_attribute("int16", np.int16(234))
            series.set_attribute("int32", np.int32(43))
            series.set_attribute("int64", np.int64(987654321))
            series.set_attribute("uint16", np.uint16(134))
            series.set_attribute("uint32", np.uint32(32))
            series.set_attribute("uint64", np.int64(9876543210))
            series.set_attribute("single", np.single(1.234))
            series.set_attribute("double", np.double(1.234567))
            series.set_attribute("longdouble", np.longdouble(1.23456789))
            # array of ...
            series.set_attribute("arr_int16", (
                np.int16(23),
                np.int16(26),
            ))
            series.set_attribute("arr_int32", (
                np.int32(34),
                np.int32(37),
            ))
            series.set_attribute("arr_int64", (
                np.int64(45),
                np.int64(48),
            ))
            series.set_attribute("arr_uint16", (
                np.uint16(23),
                np.uint16(26),
            ))
            series.set_attribute("arr_uint32", (
                np.uint32(34),
                np.uint32(37),
            ))
            series.set_attribute("arr_uint64", (
                np.uint64(45),
                np.uint64(48),
            ))
            series.set_attribute("arr_single", (
                np.single(5.6),
                np.single(5.9),
            ))
            series.set_attribute("arr_double", (
                np.double(6.7),
                np.double(7.1),
            ))
            # list of ...
            series.set_attribute("l_int16", [np.int16(23), np.int16(26)])
            series.set_attribute("l_int32", [np.int32(34), np.int32(37)])
            series.set_attribute("l_int64", [np.int64(45), np.int64(48)])
            series.set_attribute("l_uint16", [np.uint16(23), np.uint16(26)])
            series.set_attribute("l_uint32", [np.uint32(34), np.uint32(37)])
            series.set_attribute("l_uint64", [np.uint64(45), np.uint64(48)])
            series.set_attribute("l_single", [np.single(5.6), np.single(5.9)])
            series.set_attribute("l_double", [np.double(6.7), np.double(7.1)])
            series.set_attribute("l_longdouble",
                                 [np.longdouble(7.8e9),
                                  np.longdouble(8.2e3)])
            # numpy.array of ...
            series.set_attribute("nparr_int16",
                                 np.array([234, 567], dtype=np.int16))
            series.set_attribute("nparr_int32",
                                 np.array([456, 789], dtype=np.int32))
            series.set_attribute("nparr_int64",
                                 np.array([678, 901], dtype=np.int64))
            series.set_attribute("nparr_single",
                                 np.array([1.2, 2.3], dtype=np.single))
            series.set_attribute("nparr_double",
                                 np.array([4.5, 6.7], dtype=np.double))
            series.set_attribute("nparr_longdouble",
                                 np.array([8.9, 7.6], dtype=np.longdouble))

        # c_types
        # TODO remove the .value and handle types directly?
        series.set_attribute("byte_c", ctypes.c_byte(30).value)
        series.set_attribute("ubyte_c", ctypes.c_ubyte(50).value)
        series.set_attribute("char_c", ctypes.c_char(100).value)  # 'd'
        series.set_attribute("int16_c", ctypes.c_int16(2).value)
        series.set_attribute("int32_c", ctypes.c_int32(3).value)
        series.set_attribute("int64_c", ctypes.c_int64(4).value)
        series.set_attribute("uint16_c", ctypes.c_uint16(5).value)
        series.set_attribute("uint32_c", ctypes.c_uint32(6).value)
        series.set_attribute("uint64_c", ctypes.c_uint64(7).value)
        series.set_attribute("float_c", ctypes.c_float(8.e9).value)
        series.set_attribute("double_c", ctypes.c_double(7.e289).value)
        # TODO init of > e304 ?
        series.set_attribute("longdouble_c", ctypes.c_longdouble(6.e200).value)

        del series

        # read back
        series = api.Series("unittest_py_API." + file_ending,
                            api.Access_Type.read_only)

        self.assertEqual(series.get_attribute("char"), "c")
        self.assertEqual(series.get_attribute("pystring"), "howdy!")
        self.assertEqual(series.get_attribute("pystring2"), "howdy, too!")
        self.assertEqual(bytes(series.get_attribute("pystring3")),
                         b"howdy, again!")
        self.assertEqual(series.get_attribute("pyint"), 13)
        self.assertAlmostEqual(series.get_attribute("pyfloat"), 3.1416)
        self.assertEqual(series.get_attribute("pybool"), False)

        if found_numpy:
            self.assertEqual(series.get_attribute("int16"), 234)
            self.assertEqual(series.get_attribute("int32"), 43)
            self.assertEqual(series.get_attribute("int64"), 987654321)
            self.assertAlmostEqual(series.get_attribute("single"), 1.234)
            self.assertAlmostEqual(series.get_attribute("double"), 1.234567)
            self.assertAlmostEqual(series.get_attribute("longdouble"),
                                   1.23456789)
            # array of ... (will be returned as list)
            self.assertListEqual(series.get_attribute("arr_int16"), [
                np.int16(23),
                np.int16(26),
            ])
            # list of ...
            self.assertListEqual(series.get_attribute("l_int16"),
                                 [np.int16(23), np.int16(26)])
            self.assertListEqual(series.get_attribute("l_int32"),
                                 [np.int32(34), np.int32(37)])
            self.assertListEqual(series.get_attribute("l_int64"),
                                 [np.int64(45), np.int64(48)])
            self.assertListEqual(series.get_attribute("l_uint16"),
                                 [np.uint16(23), np.uint16(26)])
            self.assertListEqual(series.get_attribute("l_uint32"),
                                 [np.uint32(34), np.uint32(37)])
            self.assertListEqual(series.get_attribute("l_uint64"),
                                 [np.uint64(45), np.uint64(48)])
            # self.assertListEqual(series.get_attribute("l_single"),
            #     [np.single(5.6), np.single(5.9)])
            self.assertListEqual(
                series.get_attribute("l_double"),
                [np.double(6.7), np.double(7.1)])
            self.assertListEqual(series.get_attribute("l_longdouble"),
                                 [np.longdouble(7.8e9),
                                  np.longdouble(8.2e3)])

            # numpy.array of ...
            self.assertListEqual(series.get_attribute("nparr_int16"),
                                 [234, 567])
            self.assertListEqual(series.get_attribute("nparr_int32"),
                                 [456, 789])
            self.assertListEqual(series.get_attribute("nparr_int64"),
                                 [678, 901])
            np.testing.assert_almost_equal(
                series.get_attribute("nparr_single"), [1.2, 2.3])
            np.testing.assert_almost_equal(
                series.get_attribute("nparr_double"), [4.5, 6.7])
            np.testing.assert_almost_equal(
                series.get_attribute("nparr_longdouble"), [8.9, 7.6])
            # TODO instead of returning lists, return all arrays as np.array?
            # self.assertEqual(
            #     series.get_attribute("nparr_int16").dtype, np.int16)
            # self.assertEqual(
            #     series.get_attribute("nparr_int32").dtype, np.int32)
            # self.assertEqual(
            #     series.get_attribute("nparr_int64").dtype, np.int64)
            # self.assertEqual(
            #     series.get_attribute("nparr_single").dtype, np.single)
            # self.assertEqual(
            #     series.get_attribute("nparr_double").dtype, np.double)
            # self.assertEqual(
            #    series.get_attribute("nparr_longdouble").dtype, np.longdouble)

        # c_types
        self.assertEqual(series.get_attribute("byte_c"), 30)
        self.assertEqual(series.get_attribute("ubyte_c"), 50)
        self.assertEqual(chr(series.get_attribute("char_c")), 'd')
        self.assertEqual(series.get_attribute("int16_c"), 2)
        self.assertEqual(series.get_attribute("int32_c"), 3)
        self.assertEqual(series.get_attribute("int64_c"), 4)
        self.assertEqual(series.get_attribute("uint16_c"), 5)
        self.assertEqual(series.get_attribute("uint32_c"), 6)
        self.assertEqual(series.get_attribute("uint64_c"), 7)
        self.assertAlmostEqual(series.get_attribute("float_c"), 8.e9)
        self.assertAlmostEqual(series.get_attribute("double_c"), 7.e289)
        self.assertAlmostEqual(series.get_attribute("longdouble_c"),
                               ctypes.c_longdouble(6.e200).value)
Ejemplo n.º 40
0
 def d_omega_d_OM(self):
     """dOmega/dOM = 1 """
     return np.longdouble(np.ones((len(self.tt0)))) * u.Unit("")
Ejemplo n.º 41
0
 def d_a1_d_A1(self):
     return np.longdouble(np.ones(len(self.tt0))) * u.Unit("")
Ejemplo n.º 42
0
 def d_eps2_d_EPS2(self):
     return np.longdouble(np.ones(len(self.t))) * u.Unit('')
Ejemplo n.º 43
0
def main(argv=None):
    import argparse
    parser = argparse.ArgumentParser(
        description="PINT tool for simulating TOAs")
    parser.add_argument("parfile", help="par file to read model from")
    parser.add_argument("timfile", help="Output TOA file name")
    parser.add_argument("--inputtim",
                        help="Input tim file for fake TOA sampling",
                        type=str,
                        default=None)
    parser.add_argument("--startMJD",
                        help="MJD of first fake TOA (default=56000.0)",
                        type=float,
                        default=56000.0)
    parser.add_argument("--ntoa",
                        help="Number of fake TOAs to generate",
                        type=int,
                        default=100)
    parser.add_argument("--duration",
                        help="Span of TOAs to generate (days)",
                        type=float,
                        default=400.0)
    parser.add_argument("--obs",
                        help="Observatory code (default: GBT)",
                        default="GBT")
    parser.add_argument("--freq",
                        help="Frequency for TOAs (MHz) (default: 1400)",
                        nargs='+',
                        type=float,
                        default=1400.0)
    parser.add_argument(
        "--error",
        help="Random error to apply to each TOA (us, default=1.0)",
        type=float,
        default=1.0)
    parser.add_argument(
        "--fuzzdays",
        help="Standard deviation of 'fuzz' distribution (jd) (default: 0.0)",
        type=float,
        default=0.0)
    parser.add_argument("--plot",
                        help="Plot residuals",
                        action="store_true",
                        default=False)
    parser.add_argument("--ephem", help="Ephemeris to use", default="DE421")
    parser.add_argument("--planets",
                        help="Use planetary Shapiro delay",
                        action="store_true",
                        default=False)
    parser.add_argument("--format",
                        help="The format of out put .tim file.",
                        default='TEMPO2')
    args = parser.parse_args(argv)

    log.info("Reading model from {0}".format(args.parfile))
    m = pint.models.get_model(args.parfile)

    out_format = args.format
    error = args.error * u.microsecond

    if args.inputtim is None:
        log.info('Generating uniformly spaced TOAs')
        duration = args.duration * u.day
        #start = Time(args.startMJD,scale='utc',format='pulsar_mjd',precision=9)
        start = np.longdouble(args.startMJD) * u.day
        freq = np.atleast_1d(args.freq) * u.MHz
        site = get_observatory(args.obs)
        scale = site.timescale

        times = np.linspace(0,
                            duration.to(u.day).value,
                            args.ntoa) * u.day + start

        # 'Fuzz' out times
        fuzz = np.random.normal(scale=args.fuzzdays, size=len(times)) * u.day
        times += fuzz

        # Add mulitple frequency
        freq_array = get_freq_array(freq, len(times))
        tl = [
            toa.TOA(t.value, error=error, obs=args.obs, freq=f, scale=scale)
            for t, f in zip(times, freq_array)
        ]
        ts = toa.TOAs(toalist=tl)
    else:
        log.info('Reading initial TOAs from {0}'.format(args.inputtim))
        ts = toa.TOAs(toafile=args.inputtim)
        ts.table['error'][:] = error

    # WARNING! I'm not sure how clock corrections should be handled here!
    # Do we apply them, or not?
    if not any(['clkcorr' in f for f in ts.table['flags']]):
        log.info("Applying clock corrections.")
        ts.apply_clock_corrections()
    if 'tdb' not in ts.table.colnames:
        log.info("Getting IERS params and computing TDBs.")
        ts.compute_TDBs()
    if 'ssb_obs_pos' not in ts.table.colnames:
        log.info("Computing observatory positions and velocities.")
        ts.compute_posvels(args.ephem, args.planets)

    # F_local has units of Hz; discard cycles unit in phase to get a unit
    # that TimeDelta understands
    log.info("Creating TOAs")
    F_local = m.d_phase_d_toa(ts)
    rs = m.phase(ts).frac.value / F_local

    # Adjust the TOA times to put them where their residuals will be 0.0

    ts.adjust_TOAs(TimeDelta(-1.0 * rs))
    rspost = m.phase(ts).frac.value / F_local

    log.info("Second iteration")
    # Do a second iteration
    ts.adjust_TOAs(TimeDelta(-1.0 * rspost))

    err = np.random.randn(len(ts.table)) * error
    #Add the actual error fuzzing
    ts.adjust_TOAs(TimeDelta(err))

    # Write TOAs to a file
    ts.write_TOA_file(args.timfile, name='fake', format=out_format)

    if args.plot:
        # This should be a very boring plot with all residuals flat at 0.0!
        import matplotlib.pyplot as plt
        rspost2 = m.phase(ts).frac / F_local
        plt.errorbar(ts.get_mjds().value,
                     rspost2.to(u.us).value,
                     yerr=ts.get_errors().to(u.us).value)
        newts = pint.toa.get_TOAs(args.timfile,
                                  ephem=args.ephem,
                                  planets=args.planets)
        rsnew = m.phase(newts).frac / F_local
        plt.errorbar(newts.get_mjds().value,
                     rsnew.to(u.us).value,
                     yerr=newts.get_errors().to(u.us).value)
        #plt.plot(ts.get_mjds(),rspost.to(u.us),'x')
        plt.xlabel('MJD')
        plt.ylabel('Residual (us)')
        plt.show()
Ejemplo n.º 44
0
 def __init__(self, me: Agent) -> None:
     self.me = me
     self.amount = np.longdouble(0.0)
Ejemplo n.º 45
0
    def __init__(
        self,
        channel_dir,
        dtype,
        subdir_cadence_secs,
        file_cadence_millisecs,
        sample_rate_numerator,
        sample_rate_denominator,
        start=None,
        ignore_tags=False,
        is_complex=True,
        num_subchannels=1,
        uuid_str=None,
        center_frequencies=None,
        metadata={},
        is_continuous=True,
        compression_level=0,
        checksum=False,
        marching_periods=True,
        stop_on_skipped=True,
        debug=False,
        min_chunksize=None,
    ):
        """Write a channel of data in Digital RF format.

        In addition to storing the input samples in Digital RF format, this
        block also populates the channel's accompanying Digital Metadata
        at the sample indices when the metadata changes or a data skip occurs.
        See the Notes section for details on what metadata is stored.

        Parameters
        ----------

        channel_dir : string
            The directory where this channel is to be written. It will be
            created if it does not exist. The basename (last component) of the
            path is considered the channel's name for reading purposes.

        dtype : numpy.dtype | object to be cast by numpy.dtype()
            Object that gives the numpy dtype of the data to be written. This
            value is passed into ``numpy.dtype`` to get the actual dtype
            (e.g. ``numpy.dtype('>i4')``). Scalar types, complex types, and
            structured complex types with 'r' and 'i' fields of scalar types
            are valid.

        subdir_cadence_secs : int
            The number of seconds of data to store in one subdirectory. The
            timestamp of any subdirectory will be an integer multiple of this
            value.

        file_cadence_millisecs : int
            The number of milliseconds of data to store in each file. Note that
            an integer number of files must exactly span a subdirectory,
            implying::

                (subdir_cadence_secs*1000 % file_cadence_millisecs) == 0

        sample_rate_numerator : int
            Numerator of sample rate in Hz.

        sample_rate_denominator : int
            Denominator of sample rate in Hz.


        Other Parameters
        ----------------

        start : None | int | float | string, optional
            A value giving the time/index of the channel's first sample. When
            `ignore_tags` is False, 'rx_time' tags will be used to identify
            data gaps and skip the sample index forward appropriately (tags
            that refer to an earlier time will be ignored).
            If None or '' and `ignore_tags` is False, drop data until an
            'rx_time' tag arrives and sets the start time (a ValueError is
            raised if `ignore_tags` is True).
            If an integer, it is interpreted as a sample index given in the
            number of samples since the epoch (time_since_epoch*sample_rate).
            If a float, it is interpreted as a UTC timestamp (seconds since
            epoch).
            If a string, two forms are permitted:
                1) a string which can be evaluated to an integer/float and
                    interpreted as above,
                2) a time in ISO8601 format, e.g. '2016-01-01T16:24:00Z'

        ignore_tags : bool, optional
            If True, do not use 'rx_time' tags to set the sample index and do
            not write other tags as Digital Metadata.

        is_complex : bool, optional
            This parameter is only used when `dtype` is not complex.
            If True (the default), interpret supplied data as interleaved
            complex I/Q samples. If False, each sample has a single value.

        num_subchannels : int, optional
            Number of subchannels to write simultaneously. Default is 1.

        uuid_str : None | string, optional
            UUID string that will act as a unique identifier for the data and
            can be used to tie the data files to metadata. If None, a random
            UUID will be generated.

        center_frequencies : None | array_like of floats, optional
            List of subchannel center frequencies to include in initial
            metadata. If None, ``[0.0]*num_subchannels`` will be used.
            Subsequent center frequency metadata samples can be written using
            'rx_freq' stream tags.

        metadata : dict, optional
            Dictionary of additional metadata to include in initial Digital
            Metadata sample. Subsequent metadata samples can be written
            using 'metadata' stream tags, but all keys intended to be included
            should be set here first even if their values are empty.

        is_continuous : bool, optional
            If True, data will be written in continuous blocks. If False data
            will be written with gapped blocks. Fastest write/read speed is
            achieved with `is_continuous` True, `checksum` False, and
            `compression_level` 0 (all defaults).

        compression_level : int, optional
            0 for no compression (default), 1-9 for varying levels of gzip
            compression (1 == least compression, least CPU; 9 == most
            compression, most CPU).

        checksum : bool, optional
            If True, use HDF5 checksum capability. If False (default), no
            checksum.

        marching_periods : bool, optional
            If True, write a period to stdout for every subdirectory when
            writing.

        stop_on_skipped : bool, optional
            If True, stop writing when a sample would be skipped (such as from
            a dropped packet).

        debug : bool, optional
            If True, print debugging information.

        min_chunksize : None | int, optional
            Minimum number of samples to consume at once. This value can be
            used to adjust the sink's performance to reduce processing time.
            If None, a sensible default will be used.


        Notes
        -----

        By convention, this block sets the following Digital Metadata fields:

            uuid_str : string
                Value provided by the `uuid_str` argument.

            sample_rate_numerator : int
                Value provided by the `sample_rate_numerator` argument.

            sample_rate_denominator : int
                Value provided by the `sample_rate_denominator` argument.

            center_frequencies : list of floats with length `num_subchannels`
                Subchannel center frequencies as specified by
                `center_frequencies` argument and 'rx_freq' stream tags.

        Additional metadata fields can be set using the `metadata` argument and
        stream tags. Nested dictionaries are permitted and are helpful for
        grouping properties. For example, receiver-specific metadata is
        typically specified with a sub-dictionary using the 'receiver' field.


        This block acts on the following stream tags when `ignore_tags` is
        False:

            rx_time : (int secs, float frac) tuple
                Used to set the sample index from the given time since epoch.

            rx_freq : float
                Used to set the 'center_frequencies' value in the channel's
                Digital Metadata as described above.

            metadata : dict
                Used to populate additional (key, value) pairs in the channel's
                Digital Metadata. Any keys passed in 'metadata' tags should be
                included in the `metadata` argument at initialization to ensure
                that they always exist in the Digital Metadata.

        """
        dtype = np.dtype(dtype)
        # create structured dtype for interleaved samples if necessary
        if is_complex and (not np.issubdtype(dtype, np.complexfloating)
                           and not dtype.names):
            realdtype = dtype
            dtype = np.dtype([('r', realdtype), ('i', realdtype)])

        if num_subchannels == 1:
            in_sig = [dtype]
        else:
            in_sig = [(dtype, num_subchannels)]

        gr.sync_block.__init__(
            self,
            name="digital_rf_channel_sink",
            in_sig=in_sig,
            out_sig=None,
        )

        self._channel_dir = channel_dir
        self._channel_name = os.path.basename(channel_dir)
        self._dtype = dtype
        self._subdir_cadence_secs = subdir_cadence_secs
        self._file_cadence_millisecs = file_cadence_millisecs
        self._sample_rate_numerator = sample_rate_numerator
        self._sample_rate_denominator = sample_rate_denominator
        self._uuid_str = uuid_str
        self._ignore_tags = ignore_tags
        self._is_complex = is_complex
        self._num_subchannels = num_subchannels
        self._is_continuous = is_continuous
        self._compression_level = compression_level
        self._checksum = checksum
        self._marching_periods = marching_periods
        self._stop_on_skipped = stop_on_skipped
        self._debug = debug

        self._samples_per_second = (
            np.longdouble(np.uint64(sample_rate_numerator)) /
            np.longdouble(np.uint64(sample_rate_denominator)))

        if min_chunksize is None:
            self._min_chunksize = int(self._samples_per_second // 1000)
        else:
            self._min_chunksize = min_chunksize

        # reduce CPU usage by setting a minimum number of samples to handle
        # at once
        # (really want to set_min_noutput_items, but no way to do that from
        #  Python)
        self.set_output_multiple(self._min_chunksize)

        # will be None if start is None or ''
        self._start_sample = util.parse_identifier_to_sample(
            start,
            self._samples_per_second,
            None,
        )
        if self._start_sample is None:
            if self._ignore_tags:
                raise ValueError('Must specify start if ignore_tags is True.')
            # data without a time tag will be written starting at global index
            # of 0, i.e. the Unix epoch
            # we don't want to guess the start time because the user would
            # know better and it could obscure bugs by setting approximately
            # the correct time (samples in 1970 are immediately obvious)
            self._start_sample = 0
        self._next_rel_sample = 0

        # stream tags to read (in addition to rx_time, handled specially)
        if LooseVersion(gr.version()) >= LooseVersion('3.7.12'):
            self._stream_tag_translators = {
                pmt.intern('rx_freq'): translate_rx_freq,
                pmt.intern('metadata'): translate_metadata,
            }
        else:
            # USRP source in gnuradio < 3.7.12 has bad rx_freq tags, so avoid
            # trouble by ignoring rx_freq tags for those gnuradio versions
            self._stream_tag_translators = {
                pmt.intern('metadata'): translate_metadata,
            }

        # create metadata dictionary that will be updated and written whenever
        # new metadata is received in stream tags
        self._metadata = metadata.copy()
        if center_frequencies is None:
            center_frequencies = np.array([0.0] * self._num_subchannels)
        else:
            center_frequencies = np.ascontiguousarray(center_frequencies)
        self._metadata.update(
            # standard metadata by convention
            uuid_str='',
            sample_rate_numerator=self._sample_rate_numerator,
            sample_rate_denominator=self._sample_rate_denominator,
            center_frequencies=center_frequencies,
        )

        # create directories for RF data channel and metadata
        self._metadata_dir = os.path.join(self._channel_dir, 'metadata')
        if not os.path.exists(self._metadata_dir):
            os.makedirs(self._metadata_dir)

        # sets self._Writer, self._DMDWriter, and adds to self._metadata
        self._create_writer()

        # dict of metadata samples to be written, add for first sample
        # keys: absolute sample index for metadata
        # values: metadata dictionary to update self._metadata and then write
        self._md_queue = defaultdict(dict)
        self._md_queue[self._start_sample] = {}
Ejemplo n.º 46
0
 def Combinations(self, n, r):
     C_n_r = self.Factorial(1, (n - r))
     C_r = self.Factorial(1, (r))
     C_n = self.Factorial(1, (n))
     result = np.longdouble(C_n / (C_n_r * C_r))
     return (result)
Ejemplo n.º 47
0
        A[B[i]] = B[j]
        if X[B[j]] < 1:  # If x_(b_j) now has too much probability mass,
            B[i], B[j] = B[j], B[i]  # Swap, b_i, b_j, it becomes a donor.
            j += 1
        else:  # Otherwise, leave it as an acceptor
            i -= 1

    new_pmf = np.copy(X[:-1])
    for a_i, pmf_i in zip(A, X[:-1]):
        new_pmf[a_i] += 1 - pmf_i
    check_equal(new_pmf, pmf)
    return X[:-1], A


if args.type == 'exponential':
    volume = 1 / np.longdouble(args.size)
    f = lambda x: exp(-x)
    CDF = lambda x: 1 - f(x)

elif args.type == 'normal':
    oneHalf = 1 / np.longdouble(2)
    pi = sum([
        np.longdouble(pi_digit) * power(10, -i)
        for i, pi_digit in enumerate('314159265358979323846')
    ])
    alpha = sqrt(oneHalf * pi)

    volume = sqrt(2 * pi) / np.longdouble(2 * args.size)
    f = lambda x: exp(-oneHalf * x * x)
    CDF = np.vectorize(lambda x: alpha * erf(sqrt(oneHalf) * x))
Ejemplo n.º 48
0
 def exponential(self, x, a):
     y = np.longdouble((-1 * a / (np.exp(-1 * a * np.max(self.data)) -
                                  np.exp(-1 * a * np.min(self.data)))) *
                       np.exp(-1 * a * x))
     return y
Ejemplo n.º 49
0
    def __init__(self, *args: Any, **kwargs: Any):
        """Constructor (see TrajectorySH constructor)"""
        TrajectorySH.__init__(self, *args, **kwargs)

        self.prob_cum = np.longdouble(0.0)
        self.zeta = self.random()
Ejemplo n.º 50
0
 def test_int_from_longdouble(self):
     x = np.longdouble(1.5)
     assert_equal(int(x), 1)
     x = np.longdouble(-10.5)
     assert_equal(int(x), -10)
Ejemplo n.º 51
0
 def test_xindex_dtype(self):
     x0 = numpy.longdouble(100)
     dx = numpy.float32(1e-4)
     series = self.create(x0=x0, dx=dx)
     assert series.xindex.dtype is x0.dtype
Ejemplo n.º 52
0
def tempo_polyco_table_reader(filename):
    """Read tempo style polyco file to an astropy table.

    Tempo style: The polynomial ephemerides are written to file 'polyco.dat'.  Entries
    are listed sequentially within the file.  The file format is::

        ====  =======   ============================================
        Line  Columns     Item
        ====  =======   ============================================
         1       1-10   Pulsar Name
                11-19   Date (dd-mmm-yy)
                20-31   UTC (hhmmss.ss)
                32-51   TMID (MJD)
                52-72   DM
                74-79   Doppler shift due to earth motion (10^-4)
                80-86   Log_10 of fit rms residual in periods
         2       1-20   Reference Phase (RPHASE)
                21-38   Reference rotation frequency (F0)
                39-43   Observatory number
                44-49   Data span (minutes)
                50-54   Number of coefficients
                55-75   Observing frequency (MHz)
                76-80   Binary phase
         3*      1-25   Coefficient 1 (COEFF(1))
                26-50   Coefficient 2 (COEFF(2))
                51-75   Coefficient 3 (COEFF(3))
        ====  =======   ============================================
        * Subsequent lines have three coefficients each, up to NCOEFF

    One polyco file could include more then one entrie

    The pulse phase and frequency at time T are then calculated as::

        DT = (T-TMID)*1440
        PHASE = RPHASE + DT*60*F0 + COEFF(1) + DT*COEFF(2) + DT^2*COEFF(3) + ....
        FREQ(Hz) = F0 + (1/60)*(COEFF(2) + 2*DT*COEFF(3) + 3*DT^2*COEFF(4) + ....)

    Parameters
    ----------
    filename : str
        Name of the input poloco file.

    References
    ----------
    http://tempo.sourceforge.net/ref_man_sections/tz-polyco.txt
    """
    f = open(filename, "r")
    # Read entries to the end of file
    entries = []
    while True:
        # Read first line
        line1 = f.readline()
        if len(line1) == 0:
            break

        fields = line1.split()
        psrname = fields[0].strip()
        date = fields[1].strip()
        utc = fields[2]
        tmid = np.longdouble(fields[3])
        dm = float(fields[4])
        doppler = float(fields[5])
        logrms = float(fields[6])
        # Read second line
        line2 = f.readline()
        fields = line2.split()
        refPhaseInt, refPhaseFrac = fields[0].split(".")
        refPhaseInt = data2longdouble(refPhaseInt)
        refPhaseFrac = data2longdouble("." + refPhaseFrac)
        if refPhaseInt < 0:
            refPhaseFrac = -refPhaseFrac

        refF0 = data2longdouble(fields[1])
        obs = fields[2]
        mjdSpan = data2longdouble(
            fields[3]) / MIN_PER_DAY  # Here change to constant
        nCoeff = int(fields[4])
        obsfreq = float(fields[5].strip())

        try:
            binaryPhase = data2longdouble(fields[6])
        except ValueError:
            binaryPhase = data2longdouble(0.0)

        # Read coefficients
        nCoeffLines = int(np.ceil(nCoeff / 3))

        # if nCoeff%3>0:
        #    nCoeffLines += 1
        coeffs = []

        for i in range(nCoeffLines):
            line = f.readline()
            for c in line.split():
                coeffs.append(data2longdouble(c))
        coeffs = np.array(coeffs)

        tmid = tmid * u.day
        mjdspan = mjdSpan * u.day
        tstart = data2longdouble(tmid) - data2longdouble(mjdspan) / 2.0
        tstop = data2longdouble(tmid) + data2longdouble(mjdspan) / 2.0
        rphase = Phase(refPhaseInt, refPhaseFrac)
        refF0 = data2longdouble(refF0)
        coeffs = data2longdouble(coeffs)
        entry = PolycoEntry(tmid, mjdspan, refPhaseInt, refPhaseFrac, refF0,
                            nCoeff, coeffs, obs)

        entries.append((
            psrname,
            date,
            utc,
            tmid.value,
            dm,
            doppler,
            logrms,
            binaryPhase,
            mjdspan,
            tstart,
            tstop,
            obs,
            obsfreq,
            entry,
        ))
    entry_list = []
    for ii in range(len(entries[0])):
        entry_list.append([t[ii] for t in entries])

    # Construct the polyco data table
    pTable = table.Table(
        entry_list,
        names=(
            "psr",
            "date",
            "utc",
            "tmid",
            "dm",
            "doppler",
            "logrms",
            "binary_phase",
            "mjd_span",
            "t_start",
            "t_stop",
            "obs",
            "obsfreq",
            "entry",
        ),
        meta={"name": "Polyco Data Table"},
    )

    pTable["index"] = np.arange(len(entries))
    return pTable
Ejemplo n.º 53
0
)
def test_singleton_type(format_, type_):
    t = Time.now()
    assert isinstance(getattr(t, format_), type_)
    t.format = format_
    assert isinstance(t.value, type_)


@pytest.mark.parametrize(
    "format_, val, val2",
    [
        ("mjd", 40000, 1e-10),
        ("pulsar_mjd", 40000, 1e-10),
        pytest.param(
            "mjd_long",
            np.longdouble(40000) + np.longdouble(1e-10),
            None,
            marks=pytest.mark.xfail(reason="astropy limitations"),
        ),
        pytest.param(
            "mjd_long",
            np.longdouble(40000),
            np.longdouble(1e-10),
            marks=pytest.mark.xfail(reason="astropy limitations"),
        ),
        pytest.param(
            "pulsar_mjd_long",
            np.longdouble(40000) + np.longdouble(1e-10),
            None,
            marks=pytest.mark.xfail(reason="astropy limitations"),
        ),
Ejemplo n.º 54
0
    F = list(map(f, x))
    for n in range(2, 100):
        x.append(x[n - 1] - F[n - 1] * (x[n - 1] - x[n - 2]) /
                 (F[n - 1] - F[n - 2]))
        if abs(x[n] - x[n - 1]) < xtol:
            return x[n]
        F.append(f(x[n]))
    return 0


# redefine Transcendental functions for proper precision
exp = lambda x: np.exp(x, dtype=np.longdouble)
sqrt = lambda x: np.sqrt(x, dtype=np.longdouble)
power = lambda x, y: np.power(x, y, dtype=np.longdouble)

oneHalf = 1 / np.longdouble(2)

# Define parameters
imax = longdouble(8)
area = 1 / longdouble(imax)

# Wanted distribution
P = lambda x: (2 / sqrt(2 * pi)) * exp(-x * x * oneHalf)

# Solve for X and Y
X = [fsolve(lambda x: x * P(x) - area, 1, 4)]
Y = [P(X[0])]
A = [1 - erf(X[0] / sqrt(2))]

while X[-1] > 0:
    X.append(
Ejemplo n.º 55
0
def test_locale_longdouble():
    assert_equal(str(np.longdouble(1.2)), str(float(1.2)))
Ejemplo n.º 56
0
import astropy.constants as c
import astropy.time as time
import numpy as np
from . import utils

# light-second unit
ls = u.def_unit('ls', c.c * 1.0 * u.s)

# DM unit (pc cm^-3)
dmu = u.def_unit('dmu', u.pc*u.cm**-3)

# define equivalency for astropy units
light_second_equivalency = [(ls, si.second, lambda x: x, lambda x: x)]
dimensionless_cycles = [(u.cycle, None)]
# hourangle_second unit
hourangle_second = u.def_unit('hourangle_second', u.hourangle/np.longdouble(3600.0))

# Following are from here:
# http://ssd.jpl.nasa.gov/?constants (grabbed on 30 Dec 2013)
GMsun = 1.32712440018e20 * u.m**3/u.s**2

# Solar mass in time units (sec)
Tsun = (GMsun / c.c**3).to(u.s)

# Planet system(!) masses in time units
Tmercury = Tsun / 6023600.
Tvenus = Tsun / 408523.71
Tearth = Tsun / 328900.56 # Includes Moon!
Tmars = Tsun / 3098708.
Tjupiter = Tsun / 1047.3486
Tsaturn = Tsun / 3497.898
Ejemplo n.º 57
0
def time_from_longdouble(t, scale="utc", format="pulsar_mjd"):
    t = np.longdouble(t)
    i = float(np.floor(t))
    f = float(t - i)
    return astropy.time.Time(val=i, val2=f, format=format, scale=scale)
Ejemplo n.º 58
0
 def __init__(self):
     self.position = np.array([r.uniform(490, 510), r.uniform(0.4, 0.6), r.uniform(0.4, 0.6), r.uniform(4900, 5100), r.uniform(0.4, 0.6), r.uniform(44000, 46000), r.uniform(0.4, 0.6)], dtype=np.longdouble)
     self.pbest_position = self.position
     self.pbest_value = np.longdouble('inf')
     self.velocity = np.zeros((7), dtype=np.longdouble)
Ejemplo n.º 59
0
    def prtl_der(self, y, x):
        """Find the partial derivatives in binary model pdy/pdx

        Parameters
        ----------
        y : str
           Name of variable to be differentiated
        x : str
           Name of variable the derivative respect to

        Returns
        -------
        np.array
           The derivatives pdy/pdx
        """
        if y not in self.binary_params + self.inter_vars:
            errorMesg = y + " is not in binary parameter and variables list."
            raise ValueError(errorMesg)

        if x not in self.inter_vars + self.binary_params:
            errorMesg = x + " is not in binary parameters and variables list."
            raise ValueError(errorMesg)
        # derivative to itself
        if x == y:
            return np.longdouble(np.ones(len(self.tt0))) * u.Unit("")
        # Get the unit right

        yAttr = getattr(self, y)
        xAttr = getattr(self, x)
        U = [None, None]
        for i, attr in enumerate([yAttr, xAttr]):
            if hasattr(attr,
                       "units"):  # If attr is a PINT Parameter class type
                U[i] = attr.units
            elif hasattr(attr, "unit"):  # If attr is a Quantity type
                U[i] = attr.unit
            elif hasattr(attr, "__call__"):  # If attr is a method
                U[i] = attr().unit
            else:
                raise TypeError(type(attr) + "can not get unit")
            # U[i] = 1*U[i]

            # commonU = list(set(U[i].unit.bases).intersection([u.rad,u.deg]))
            # if commonU != []:
            #     strU = U[i].unit.to_string()
            #     for cu in commonU:
            #         scu = cu.to_string()
            #         strU = strU.replace(scu,'1')
            #     U[i] = U[i].to(strU, equivalencies=u.dimensionless_angles()).unit

        yU = U[0]
        xU = U[1]
        # Call derivtive functions
        derU = yU / xU

        if hasattr(self, "d_" + y + "_d_" + x):
            dername = "d_" + y + "_d_" + x
            result = getattr(self, dername)()

        elif hasattr(self, "d_" + y + "_d_par"):
            dername = "d_" + y + "_d_par"
            result = getattr(self, dername)(x)

        else:
            result = np.longdouble(np.zeros(len(self.tt0)))

        if hasattr(result, "unit"):
            return result.to(derU, equivalencies=u.dimensionless_angles())
        else:
            return result * derU
Ejemplo n.º 60
0
 def d_ecc_d_ECC(self):
     return np.longdouble(np.ones(len(self.tt0))) * u.Unit("")