def _propagate_states(self, state_vectors, propagation_params, opm_params_templ): """Propagate states using many initial state vectors. Args: state_vectors (list of lists): list of lists with 6 elements [rx, ry, rz, vx, vy, vz] [km, km/s] propagation_params (PropagationParams): propagation-related parameters to be used for all propagations opm_params_templ (OpmParams): opm-related parameters to be used for all propagations, once with each of the given state vectors. Returns: end_state_vectors (list of lists): states at end of integration [rx, ry, rz, vx, vy, vz] [km, km/s] """ # Create batches from state vectors batches = [] for state_vector in state_vectors: opm_params = deepcopy(opm_params_templ) opm_params.set_state_vector(state_vector) batches.append(Batch(propagation_params, opm_params)) # submit batches and wait till they finish running runner = BatchRunManager(self.batches_module, batches) runner.run() # Get final states end_state_vectors = [] for batch in batches: end_state_vectors.append(batch.get_results().get_end_state_vector()) return end_state_vectors
def test_small_batch(self): num_batches = 3 duration = 365 # 1 year batches = [self.new_dummy_batch(duration) for i in range(num_batches)] runner = BatchRunManager(self.service.get_batches_module(), batches) runner.run() statuses = runner.get_latest_statuses() self.assertEqual(0, len(statuses['PENDING'])) self.assertEqual(0, len(statuses['RUNNING'])) self.assertEqual(num_batches, len(statuses['COMPLETED'])) self.assertEqual(0, len(statuses['FAILED'])) end_state_vec = [-37535741.96415495, 492953227.1713997, 204483503.94517875, -11.337510170756701, 7.185009462698965, 3.3597614338244766] for batch in batches: npt.assert_allclose(end_state_vec, batch.get_results().get_end_state_vector(), rtol=1e-3, atol=0)
def test_corners(self, service, working_project): batch = self._new_hypercube_batch('CORNERS', working_project) runner = BatchRunManager(service.get_batches_module(), [batch]) runner.run() 'COMPLETED' == batch.get_calc_state() parts = batch.get_results().get_parts() assert len(parts) == 65
def test_corners(self): batch = self.new_hypercube_batch('CORNERS') runner = BatchRunManager(self.service.get_batches_module(), [batch]) runner.run() self.assertEqual('COMPLETED', batch.get_calc_state()) parts = batch.get_results().get_parts() self.assertEqual(65, len(parts))
def test_sun_ememe(self, service, working_project): start_time_str = "2000-01-01T11:58:55.816Z" end_time_str = "2009-07-21T21:55:08.813Z" sun_ememe_state_vec = [ -306536346.18024945, -120966638.54521248, -12981.069369263947, 15.759854830195243, -11.539570959741736, 0.0005481049628786039 ] propagation_params = PropagationParams({ 'start_time': start_time_str, 'end_time': end_time_str, 'step_size': 86400, 'project_uuid': working_project.get_uuid(), 'description': 'Created by test at ' + start_time_str }) opm_params = OpmParams({ 'epoch': start_time_str, 'state_vector': sun_ememe_state_vec, 'center_name': 'SUN', 'ref_frame': 'EMEME2000', }) batch = Batch(propagation_params, opm_params) runner = BatchRunManager(service.get_batches_module(), [batch]) runner.run() end_state = batch.get_results().get_end_state_vector() # The output state is expected to be in ICRF. expected_end_state = [ 73978163.61069362, -121822760.05571477, -52811158.83249758, 31.71000343989318, 29.9657246374751, .6754531613947713 ] # These values are in EMEME. The resulting ephemeris is not expected to match these values. # expected_end_state = [73978158.47632701, -132777272.5255892, 5015.073123970032, # 31.710003506237434, 27.761693311026138, -11.299967713192564] difference = np.subtract(expected_end_state, end_state) print("Difference is %s" % difference) print("End state: %s" % end_state) npt.assert_allclose(difference[0:3], [0, 0, 0], rtol=0, atol=.02) npt.assert_allclose(difference[3:6], [0, 0, 0], rtol=0, atol=.00002) # The returned ephemeris will be in Sun-centered ICRF, not EMEME. My best guess is that # the ephemeris file doesn't support all reference frames, so if it encounters one that # isn't supported, it'll choose a similar one. ephem = batch.get_results().get_parts()[-1].get_ephemeris() assert "ICRF" in ephem assert "EMEME" not in ephem
def test_backwards_and_forwards(self): now = datetime.datetime.now() later = now + datetime.timedelta(10 * 365) # 10 years state_vec = [ 130347560.13690618, -74407287.6018632, -35247598.541470632, 23.935241263310683, 27.146279819258538, 10.346605942591514 ] print("Starting at %s" % (state_vec)) print("Propagating forward from %s to %s" % (now, later)) batch = self.make_batch(state_vec, now, later) runner = BatchRunManager(self.service.get_batches_module(), [batch]) runner.run() forward_end_state = batch.get_results().get_end_state_vector() print("Final state at %s" % forward_end_state) print("Propagating backward from %s to %s" % (later, now)) batch = self.make_batch(forward_end_state, later, now) runner = BatchRunManager(self.service.get_batches_module(), [batch]) runner.run() backwards_end_state = batch.get_results().get_end_state_vector() print("Final state at %s" % backwards_end_state) difference = np.subtract(state_vec, backwards_end_state) print("Difference is %s" % difference) npt.assert_allclose(difference[0:3], [0, 0, 0], rtol=0, atol=1e-3) npt.assert_allclose(difference[3:6], [0, 0, 0], rtol=0, atol=1e-10)
def test_icrf(self, service, working_project): start_time_str = "2000-01-01T11:58:55.816Z" end_time_str = "2009-07-21T21:55:08.813Z" sun_icrf_state_vec = [ -306536341.5010222, -110979556.84640282, -48129706.42252728, 15.75985527640906, -10.587567329195842, -4.589673432886975 ] propagation_params = PropagationParams({ 'start_time': start_time_str, 'end_time': end_time_str, 'step_size': 86400, 'project_uuid': working_project.get_uuid(), 'description': 'Created by test at ' + start_time_str }) opm_params = OpmParams({ 'epoch': start_time_str, 'state_vector': sun_icrf_state_vec, 'center_name': 'SUN', 'ref_frame': 'ICRF', }) batch = Batch(propagation_params, opm_params) runner = BatchRunManager(service.get_batches_module(), [batch]) runner.run() end_state = batch.get_results().get_end_state_vector() expected_end_state = [ 73978163.61069362, -121822760.05571477, -52811158.83249758, 31.71000343989318, 29.9657246374751, .6754531613947713 ] difference = np.subtract(expected_end_state, end_state) print("Difference is %s" % difference) print("End state: %s" % end_state) npt.assert_allclose(difference[0:3], [0, 0, 0], rtol=0, atol=.02) npt.assert_allclose(difference[3:6], [0, 0, 0], rtol=0, atol=.00002) ephem = batch.get_results().get_parts()[-1].get_ephemeris() assert "ICRF" in ephem
def propagate_states(state_vectors, epoch_time, end_time): """Propagate states from one time to another Assume state epoch is the same as integration start time Args: sate_vectors (list of lists) - list of lists with 6 elements [rx, ry, rz, vx, vy, vz] [km, km/s] epoch_time (datetime.datetime) - epoch of state (UTC datetime) end_time (datetime.datetime) - time at which to end the simulation (UTC datetime) Returns: end_state_vectors (list of lists) - states at end of integration [rx, ry, rz, vx, vy, vz] [km, km/s] """ # Convert times to strings epoch_time_str = batch_time_string_from_datetime(epoch_time) start_time_str = epoch_time_str end_time_str = batch_time_string_from_datetime(end_time) print("Propagating %i states to propagate from %s to %s" % (len(state_vectors), start_time_str, end_time_str)) url = "https://pro-equinox-162418.appspot.com/_ah/api/adam/v1" rest = RestRequests(url) batches_module = Batches(rest) # Create batches from statevectors batches = [] propagation_params = PropagationParams({ 'start_time': start_time_str, 'end_time': end_time_str, 'project_uuid': 'ffffffff-ffff-ffff-ffff-ffffffffffff' }) for state_vector in state_vectors: opm_params = OpmParams({ 'epoch': start_time_str, 'state_vector': state_vector }) batches.append(Batch(propagation_params, opm_params)) # submit batches and wait till they finish running BatchRunManager(batches_module, batches).run() # Get final states end_state_vectors = [] for batch in batches: end_state_vectors.append(batch.get_results().get_end_state_vector()) return end_state_vectors
def test_config_in_use_pins_project(self): # Config management isn't very common, doesn't merit direct addition to service. configs = PropagatorConfigs(self.service.rest) projects = self.service.get_projects_module() project = self.service.new_working_project() project1 = projects.new_project(project.get_uuid(), "", "") self.assertIsNotNone(project1) project2 = projects.new_project(project.get_uuid(), "", "") self.assertIsNotNone(project2) print("Added child projects to working project: " + "[" + project1.get_uuid() + ", " + project2.get_uuid() + "]") config = configs.new_config({ 'project': project1.get_uuid(), 'description': 'test config' }) self.assertEqual(project1.get_uuid(), config.get_project()) batch = Batch( PropagationParams({ 'start_time': '2017-10-04T00:00:00Z', 'end_time': '2017-10-05T00:00:00Z', 'project_uuid': project2.get_uuid(), 'propagator_uuid': config.get_uuid() }), OpmParams({ 'epoch': '2017-10-04T00:00:00Z', 'state_vector': [ 130347560.13690618, -74407287.6018632, -35247598.541470632, 23.935241263310683, 27.146279819258538, 10.346605942591514 ] })) BatchRunManager(self.service.get_batches_module(), [batch]).run() # Attempt to delete the project with the config in it. It should refuse because the # config is still in use by the batch. with self.assertRaises(RuntimeError): projects.delete_project(project1.get_uuid()) # Then delete the batch. After that, the project with the config in it should # delete no problem. self.service.batches.delete_batch(batch.get_uuid()) projects.delete_project(project1.get_uuid()) # Clean up the batch holder project. projects.delete_project(project2.get_uuid())
def test_keplerian_and_cartesian(self): start = datetime.datetime.now() end = start + datetime.timedelta(10) # 10 days start_time_str = start.isoformat() + 'Z' end_time_str = end.isoformat() + 'Z' cartesian, keplerian = self.make_cartesian_and_keplerian_batches( start_time_str, end_time_str) BatchRunManager(self.service.get_batches_module(), [cartesian, keplerian]).run() cartesian_end_state = cartesian.get_results().get_end_state_vector() keplerian_end_state = keplerian.get_results().get_end_state_vector() difference = np.subtract(cartesian_end_state, keplerian_end_state) npt.assert_allclose(difference[0:3], [0, 0, 0], rtol=0, atol=1e-5) npt.assert_allclose(difference[3:6], [0, 0, 0], rtol=0, atol=1e-10)
def test_backwards_and_forwards(self, service, working_project): now = datetime.datetime.now() later = now + datetime.timedelta(10 * 365) # 10 years state_vec = [ 130347560.13690618, -74407287.6018632, -35247598.541470632, 23.935241263310683, 27.146279819258538, 10.346605942591514 ] print("Starting at %s" % (state_vec)) print("Propagating forward from %s to %s" % (now, later)) batch = self._make_batch(state_vec, now, later, working_project) runner = BatchRunManager(service.get_batches_module(), [batch]) runner.run() forward_end_state = batch.get_results().get_end_state_vector() print("Final state at %s" % forward_end_state) print("Propagating backward from %s to %s" % (later, now)) batch = self._make_batch(forward_end_state, later, now, working_project) runner = BatchRunManager(service.get_batches_module(), [batch]) runner.run() backwards_end_state = batch.get_results().get_end_state_vector() print("Final state at %s" % backwards_end_state) difference = np.subtract(state_vec, backwards_end_state) print("Difference is %s" % difference) # from test: # Difference is [8.20617378e-03 9.39679146e-03 3.57937068e-03 # -1.62931713e-09 9.38538136e-10 4.41515269e-10] # Seems like when the test was written (and presumably was working), the tolerances # were small (the commented-out lines below). This was back in April 2018, so probably # ADAM was also using STK 2017r(something) and not 2018r3, which is the version ADAM # is currently using. Could the same propagation be calculated with slight differences? # npt.assert_allclose(difference[0:3], [0, 0, 0], rtol=0, atol=1e-3) # npt.assert_allclose(difference[3:6], [0, 0, 0], rtol=0, atol=1e-10) npt.assert_allclose(difference[0:3], [0, 0, 0], rtol=0, atol=1e-2) npt.assert_allclose(difference[3:6], [0, 0, 0], rtol=0, atol=1e-8)
def test_backwards_and_forwards(self, service, working_project): now = datetime.datetime.now() later = now + datetime.timedelta(10 * 365) # 10 years state_vec = [ 130347560.13690618, -74407287.6018632, -35247598.541470632, 23.935241263310683, 27.146279819258538, 10.346605942591514 ] print("Starting at %s" % (state_vec)) print("Propagating forward from %s to %s" % (now, later)) batch = self._make_batch(state_vec, now, later, working_project) runner = BatchRunManager(service.get_batches_module(), [batch]) runner.run() forward_end_state = batch.get_results().get_end_state_vector() print("Final state at %s" % forward_end_state) print("Propagating backward from %s to %s" % (later, now)) batch = self._make_batch(forward_end_state, later, now, working_project) runner = BatchRunManager(service.get_batches_module(), [batch]) runner.run() backwards_end_state = batch.get_results().get_end_state_vector() print("Final state at %s" % backwards_end_state) difference = np.subtract(state_vec, backwards_end_state) print("Difference is %s" % difference) # This test appears to be flaky. Usually it fails on the first of the following 2 asserts, # but sometimes, very rarely, it will pass on the first assert and fail on the second. # Example differences (subtracting backwards end state from initial state vector): # [ 4.60617244e-03 5.23680449e-03 1.98936462e-03 # -9.19317955e-10 5.18536325e-10 2.51514365e-10] <-- fails on 1st assert # [ 2.29061693e-02 2.58967876e-02 9.86936688e-03 # -4.50932092e-09 2.57853827e-09 1.22151356e-09] <-- fails on 1st assert # [ 6.06164336e-04 7.16790557e-04 2.69368291e-04 # -1.19317889e-10 7.85398413e-11 3.15143467e-11] <-- fails on 2nd assert npt.assert_allclose(difference[0:3], [0, 0, 0], rtol=0, atol=1e-3) npt.assert_allclose(difference[3:6], [0, 0, 0], rtol=0, atol=1e-10)
# 'covariance': covariance, # object covariance # 'perturbation': 3, # sigma perturbation on state vector # 'hypercube': 'FACES', # hypercube propagation type # 'originator': 'Robot', # originator of run # 'object_name': 'TestObj', # object name # 'object_id': 'test1234', # object ID }) batch = Batch(propagation_params, opm_params) print("Submitting OPM:") print(batch.get_opm_params().generate_opm()) # Submit and wait until batch run is ready batches_module = Batches(auth_rest) BatchRunManager(batches_module, [batch]).run() # Get final parts count parts_count = batch.get_state_summary().get_parts_count() print("Final state: %s, part count %s\n" % (batch.get_calc_state(), parts_count)) # Get ephemeris of specified part part_to_get = 0 eph = batch.get_results().get_parts()[part_to_get].get_ephemeris() print("Ephemeris:") print(eph) # Get the end state vector (uncomment to use) # end_state_vector = batch.get_results().get_end_state_vector() # print("State vector at the end of propagation:") # print(end_state_vector)