Exemplo n.º 1
0
def test_malloc():
    ''' early version had malloc problem : check for that '''

    tstart = 2456117.641933589
    tstep = 20
    trange = 1000
    geocentric = False
    n_particles = 1
    reparsed_input = np.array([
        -2.0938349524664743, 1.0009137200092553, 0.41979849545335507,
        -0.004226738336365523, -0.009129140909705197, -0.0036271214539287102
    ])

    # Call repeatedly to check for random crash with malloc ...
    for i in range(1000):
        integration_function(tstart, tstep, trange, geocentric, n_particles,
                             reparsed_input)
Exemplo n.º 2
0
def test_initialize_integration_function():
    '''
    If we put ANYTHING into the ephem_forces.integration_function,
    will it work or crash and burn?
    Most likely if there is a problem, it'll cause pytest to crash entirely,
    so might as well start with this.
    '''
    tstart, tstep, trange = 2456184.7, 20.0, 600
    geocentric = 0
    n_particles = 1
    instates = np.array([-3.1, 2.7, 3.6, -0.006, -0.004, -0.002])
    (times, states, n_out, n_particles_out
     ) = ephem_forces.integration_function(tstart, tstep, trange, geocentric,
                                           n_particles, instates)
    assert n_particles_out == n_particles
    assert isinstance(n_particles_out, int)
    assert isinstance(n_out, int)
    assert isinstance(states, np.ndarray)
    assert isinstance(times, np.ndarray)
Exemplo n.º 3
0
def test_initialize_integration_function():
    '''
    If we put ANYTHING into the ephem_forces.integration_function,
    will it work or crash and burn?
    Most likely if there is a problem, it'll cause pytest to crash entirely,
    so might as well start with this.

    *** MJP ***
    This test is insufficient. It does not capture the problem related to the
    hard-coded ephem file, which returns a message
    "could not load DE430 file, fool!"

    *** MA ***
    Indeed, when that file is missing, the C code prints that insult,
    then causes a complete crash, exiting python without a python error.
    As such, it is impossible to guard against it, even with a
    try:
      stuff
    except:
      print('Oh no, you probably have files missing!')
    as it exits python entirely (somehow).
    When run interactively, the C prints the insult, but when run inside the
    pytest, it doesn't get printed (because pytest suppresses outputs) and
    python just crashes and leaves you with no test output.
    So in principle it's clear that it didn't work, but it doesn't actually
    give useful information.
    For now, I have found out that the two files needed are actually smaller
    than the GitHub 100 MB file limit, so I'll just commit them into the repo.
    '''
    tstart, tstep, trange = 2456184.7, 20.0, 600
    geocentric = 0
    n_particles = 1
    instates = np.array([-3.1, 2.7, 3.6, -0.006, -0.004, -0.002])
    (times, states, n_out, n_particles_out
     ) = ephem_forces.integration_function(tstart, tstep, trange, geocentric,
                                           n_particles, instates)
    assert n_particles_out == n_particles
    assert isinstance(n_particles_out, int)
    assert isinstance(n_out, int)
    assert isinstance(states, np.ndarray)
    assert isinstance(times, np.ndarray)
Exemplo n.º 4
0
def run_nbody(input_vectors,
              tstart,
              tstep,
              trange,
              geocentric=False,
              verbose=False):
    '''
    Run the nbody integrator with the parsed input.

    Input:
    ------
    input_vectors = Either ParseElements object,
                    list of ParseElements objects,
                    or numpy array of elements.
    tstart = float, Julian Date at start of integration.
    tstep = float or integer, major time step of integrator.
    trange = float or integer, rough total time of integration.
    geocentric = boolean, use geo- (True) or heliocentric (False)

    Output:
    -------
    reparsed_input = numpy array, input elements, reparsed into array
    n_particles = integer, the input number of particles
    times = numpy array, all the output times (including sub-steps)
    output_vectors = numpy array, output elements of dimensions
                                  (n_times, n_particles_out, 6)
    n_times = integer, number of time outputs
    n_particles_out = integer, number of output particles (different why?)
    '''
    # First get input (3 types allowed) into a useful format:
    reparsed_input, n_particles = _fix_input(input_vectors, verbose)
    # Now run the nbody integrator:
    (times, output_vectors, n_times,
     n_particles_out) = integration_function(tstart, tstep, trange, geocentric,
                                             n_particles, reparsed_input)
    return (reparsed_input, n_particles, times, output_vectors, n_times,
            n_particles_out)
Exemplo n.º 5
0
    def run_nbody(  self,
                    epoch,
                    input_states,
                    input_covariances,
                    tstart,
                    tstep,
                    trange,
                    init_covariances = None,
                    geocentric=False,
                    verbose=False):
        '''
        Run the nbody integrator with the parsed input.

        Input:
        ------
        epoch :          float
         - Common epoch at which all input vectors are defined
        input_states :   np.ndarray
         - 2D, shape = (N_particles, 6)
         - elements (xyzuvw) for N_particles to be integrated
        input_covariances :   np.ndarray or None
         - 3D, shape = (N_particles, 6,6)
        tstart = float,
         - Julian Date at start of integration.
        tstep = float or integer,
         - major time step of integrator.
        trange = float or integer,
         - rough total time of integration.
        geocentric = boolean,
         - use geo- (True) or heliocentric (False)

        Output:
        -------
        reparsed_input      = numpy array
         - input elements, reparsed into array
        n_particles         = integer,
         - the input number of particles
        times               = numpy array,
         - all the output times (including sub-steps)
        states              = numpy array,
         - output elements of dimensions (n_times, n_particles_out, 6)
        output_covariance   = numpy array,
         - output elements of dimensions (n_times, n_particles_out*6, 6)
        n_times             = integer,
         - number of time outputs
        n_particles_out     = integer,
         - number of output particles (different why?)
        '''
        
        # Get the number of particles
        assert input_states.ndim == 2 and input_states.shape[1] == 6
        n_particles = input_states.shape[0]
        
        # Now run the nbody integrator:
        times, output_vectors, n_times, n_particles_out = integration_function( tstart,
                                                                                tstep,
                                                                                trange,
                                                                                geocentric,
                                                                                n_particles,
                                                                                input_states)
                                                                                
        # Split the output vectors
        final_states, partial_derivatives = _split_integration_output(output_vectors , n_times, n_particles_out )
                                  
        # Calculate the covariance matrix (at each timestep) from the \partial X / \partial X_0 data
        if init_covariances is not None:
            final_covariance_arrays = self._get_covariance_from_tangent_vectors(init_covariances, partial_derivatives )
        else:
            final_covariance_arrays = None
            
        return( epoch,
                init_states,
                init_covariances,
                n_particles,
                times,
                final_states,
                final_covariance_arrays,
                n_times,
                n_particles_out)